xref: /titanic_52/usr/src/uts/common/io/igb/igb_gld.c (revision e373b6e444a487df89a5dc6a4a4030b5b2aa87f9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29  * Copyright 2014 Pluribus Networks Inc.
30  * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
31  * Copyright (c) 2017, Joyent, Inc.
32  */
33 
34 #include "igb_sw.h"
35 
36 int
37 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
38 {
39 	igb_t *igb = (igb_t *)arg;
40 	struct e1000_hw *hw = &igb->hw;
41 	igb_stat_t *igb_ks;
42 	uint32_t low_val, high_val;
43 
44 	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
45 
46 	mutex_enter(&igb->gen_lock);
47 
48 	if (igb->igb_state & IGB_SUSPENDED) {
49 		mutex_exit(&igb->gen_lock);
50 		return (ECANCELED);
51 	}
52 
53 	switch (stat) {
54 	case MAC_STAT_IFSPEED:
55 		*val = igb->link_speed * 1000000ull;
56 		break;
57 
58 	case MAC_STAT_MULTIRCV:
59 		igb->stat_mprc += E1000_READ_REG(hw, E1000_MPRC);
60 		*val = igb->stat_mprc;
61 		break;
62 
63 	case MAC_STAT_BRDCSTRCV:
64 		igb->stat_bprc += E1000_READ_REG(hw, E1000_BPRC);
65 		*val = igb->stat_bprc;
66 		break;
67 
68 	case MAC_STAT_MULTIXMT:
69 		igb->stat_mptc += E1000_READ_REG(hw, E1000_MPTC);
70 		*val = igb->stat_mptc;
71 		break;
72 
73 	case MAC_STAT_BRDCSTXMT:
74 		igb->stat_bptc += E1000_READ_REG(hw, E1000_BPTC);
75 		*val = igb->stat_bptc;
76 		break;
77 
78 	case MAC_STAT_NORCVBUF:
79 		igb->stat_rnbc += E1000_READ_REG(hw, E1000_RNBC);
80 		*val = igb->stat_rnbc;
81 		break;
82 
83 	case MAC_STAT_IERRORS:
84 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
85 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
86 		igb_ks->rlec.value.ui64 +=
87 		    E1000_READ_REG(hw, E1000_RLEC);
88 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
89 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
90 		*val = igb->stat_rxerrc +
91 		    igb->stat_algnerrc +
92 		    igb_ks->rlec.value.ui64 +
93 		    igb->stat_crcerrs +
94 		    igb->stat_cexterr;
95 		break;
96 
97 	case MAC_STAT_NOXMTBUF:
98 		*val = 0;
99 		break;
100 
101 	case MAC_STAT_OERRORS:
102 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
103 		*val = igb->stat_ecol;
104 		break;
105 
106 	case MAC_STAT_COLLISIONS:
107 		igb->stat_colc += E1000_READ_REG(hw, E1000_COLC);
108 		*val = igb->stat_colc;
109 		break;
110 
111 	case MAC_STAT_RBYTES:
112 		/*
113 		 * The 64-bit register will reset whenever the upper
114 		 * 32 bits are read. So we need to read the lower
115 		 * 32 bits first, then read the upper 32 bits.
116 		 */
117 		low_val = E1000_READ_REG(hw, E1000_TORL);
118 		high_val = E1000_READ_REG(hw, E1000_TORH);
119 		igb->stat_tor += (uint64_t)high_val << 32 | (uint64_t)low_val;
120 		*val = igb->stat_tor;
121 		break;
122 
123 	case MAC_STAT_IPACKETS:
124 		igb->stat_tpr += E1000_READ_REG(hw, E1000_TPR);
125 		*val = igb->stat_tpr;
126 		break;
127 
128 	case MAC_STAT_OBYTES:
129 		/*
130 		 * The 64-bit register will reset whenever the upper
131 		 * 32 bits are read. So we need to read the lower
132 		 * 32 bits first, then read the upper 32 bits.
133 		 */
134 		low_val = E1000_READ_REG(hw, E1000_TOTL);
135 		high_val = E1000_READ_REG(hw, E1000_TOTH);
136 		igb->stat_tot += (uint64_t)high_val << 32 | (uint64_t)low_val;
137 		*val = igb->stat_tot;
138 		break;
139 
140 	case MAC_STAT_OPACKETS:
141 		igb->stat_tpt += E1000_READ_REG(hw, E1000_TPT);
142 		*val = igb->stat_tpt;
143 		break;
144 
145 	/* RFC 1643 stats */
146 	case ETHER_STAT_ALIGN_ERRORS:
147 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
148 		*val = igb->stat_algnerrc;
149 		break;
150 
151 	case ETHER_STAT_FCS_ERRORS:
152 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
153 		*val = igb->stat_crcerrs;
154 		break;
155 
156 	case ETHER_STAT_FIRST_COLLISIONS:
157 		igb->stat_scc += E1000_READ_REG(hw, E1000_SCC);
158 		*val = igb->stat_scc;
159 		break;
160 
161 	case ETHER_STAT_MULTI_COLLISIONS:
162 		igb->stat_mcc += E1000_READ_REG(hw, E1000_MCC);
163 		*val = igb->stat_mcc;
164 		break;
165 
166 	case ETHER_STAT_SQE_ERRORS:
167 		igb->stat_sec += E1000_READ_REG(hw, E1000_SEC);
168 		*val = igb->stat_sec;
169 		break;
170 
171 	case ETHER_STAT_DEFER_XMTS:
172 		igb->stat_dc += E1000_READ_REG(hw, E1000_DC);
173 		*val = igb->stat_dc;
174 		break;
175 
176 	case ETHER_STAT_TX_LATE_COLLISIONS:
177 		igb->stat_latecol += E1000_READ_REG(hw, E1000_LATECOL);
178 		*val = igb->stat_latecol;
179 		break;
180 
181 	case ETHER_STAT_EX_COLLISIONS:
182 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
183 		*val = igb->stat_ecol;
184 		break;
185 
186 	case ETHER_STAT_MACXMT_ERRORS:
187 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
188 		*val = igb->stat_ecol;
189 		break;
190 
191 	case ETHER_STAT_CARRIER_ERRORS:
192 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
193 		*val = igb->stat_cexterr;
194 		break;
195 
196 	case ETHER_STAT_TOOLONG_ERRORS:
197 		igb->stat_roc += E1000_READ_REG(hw, E1000_ROC);
198 		*val = igb->stat_roc;
199 		break;
200 
201 	case ETHER_STAT_MACRCV_ERRORS:
202 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
203 		*val = igb->stat_rxerrc;
204 		break;
205 
206 	/* MII/GMII stats */
207 	case ETHER_STAT_XCVR_ADDR:
208 		/* The Internal PHY's MDI address for each MAC is 1 */
209 		*val = 1;
210 		break;
211 
212 	case ETHER_STAT_XCVR_ID:
213 		*val = hw->phy.id | hw->phy.revision;
214 		break;
215 
216 	case ETHER_STAT_XCVR_INUSE:
217 		switch (igb->link_speed) {
218 		case SPEED_1000:
219 			*val =
220 			    (hw->phy.media_type == e1000_media_type_copper) ?
221 			    XCVR_1000T : XCVR_1000X;
222 			break;
223 		case SPEED_100:
224 			*val =
225 			    (hw->phy.media_type == e1000_media_type_copper) ?
226 			    (igb->param_100t4_cap == 1) ?
227 			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
228 			break;
229 		case SPEED_10:
230 			*val = XCVR_10;
231 			break;
232 		default:
233 			*val = XCVR_NONE;
234 			break;
235 		}
236 		break;
237 
238 	case ETHER_STAT_CAP_1000FDX:
239 		*val = igb->param_1000fdx_cap;
240 		break;
241 
242 	case ETHER_STAT_CAP_1000HDX:
243 		*val = igb->param_1000hdx_cap;
244 		break;
245 
246 	case ETHER_STAT_CAP_100FDX:
247 		*val = igb->param_100fdx_cap;
248 		break;
249 
250 	case ETHER_STAT_CAP_100HDX:
251 		*val = igb->param_100hdx_cap;
252 		break;
253 
254 	case ETHER_STAT_CAP_10FDX:
255 		*val = igb->param_10fdx_cap;
256 		break;
257 
258 	case ETHER_STAT_CAP_10HDX:
259 		*val = igb->param_10hdx_cap;
260 		break;
261 
262 	case ETHER_STAT_CAP_ASMPAUSE:
263 		*val = igb->param_asym_pause_cap;
264 		break;
265 
266 	case ETHER_STAT_CAP_PAUSE:
267 		*val = igb->param_pause_cap;
268 		break;
269 
270 	case ETHER_STAT_CAP_AUTONEG:
271 		*val = igb->param_autoneg_cap;
272 		break;
273 
274 	case ETHER_STAT_ADV_CAP_1000FDX:
275 		*val = igb->param_adv_1000fdx_cap;
276 		break;
277 
278 	case ETHER_STAT_ADV_CAP_1000HDX:
279 		*val = igb->param_adv_1000hdx_cap;
280 		break;
281 
282 	case ETHER_STAT_ADV_CAP_100FDX:
283 		*val = igb->param_adv_100fdx_cap;
284 		break;
285 
286 	case ETHER_STAT_ADV_CAP_100HDX:
287 		*val = igb->param_adv_100hdx_cap;
288 		break;
289 
290 	case ETHER_STAT_ADV_CAP_10FDX:
291 		*val = igb->param_adv_10fdx_cap;
292 		break;
293 
294 	case ETHER_STAT_ADV_CAP_10HDX:
295 		*val = igb->param_adv_10hdx_cap;
296 		break;
297 
298 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
299 		*val = igb->param_adv_asym_pause_cap;
300 		break;
301 
302 	case ETHER_STAT_ADV_CAP_PAUSE:
303 		*val = igb->param_adv_pause_cap;
304 		break;
305 
306 	case ETHER_STAT_ADV_CAP_AUTONEG:
307 		*val = hw->mac.autoneg;
308 		break;
309 
310 	case ETHER_STAT_LP_CAP_1000FDX:
311 		*val = igb->param_lp_1000fdx_cap;
312 		break;
313 
314 	case ETHER_STAT_LP_CAP_1000HDX:
315 		*val = igb->param_lp_1000hdx_cap;
316 		break;
317 
318 	case ETHER_STAT_LP_CAP_100FDX:
319 		*val = igb->param_lp_100fdx_cap;
320 		break;
321 
322 	case ETHER_STAT_LP_CAP_100HDX:
323 		*val = igb->param_lp_100hdx_cap;
324 		break;
325 
326 	case ETHER_STAT_LP_CAP_10FDX:
327 		*val = igb->param_lp_10fdx_cap;
328 		break;
329 
330 	case ETHER_STAT_LP_CAP_10HDX:
331 		*val = igb->param_lp_10hdx_cap;
332 		break;
333 
334 	case ETHER_STAT_LP_CAP_ASMPAUSE:
335 		*val = igb->param_lp_asym_pause_cap;
336 		break;
337 
338 	case ETHER_STAT_LP_CAP_PAUSE:
339 		*val = igb->param_lp_pause_cap;
340 		break;
341 
342 	case ETHER_STAT_LP_CAP_AUTONEG:
343 		*val = igb->param_lp_autoneg_cap;
344 		break;
345 
346 	case ETHER_STAT_LINK_ASMPAUSE:
347 		*val = igb->param_asym_pause_cap;
348 		break;
349 
350 	case ETHER_STAT_LINK_PAUSE:
351 		*val = igb->param_pause_cap;
352 		break;
353 
354 	case ETHER_STAT_LINK_AUTONEG:
355 		*val = hw->mac.autoneg;
356 		break;
357 
358 	case ETHER_STAT_LINK_DUPLEX:
359 		*val = (igb->link_duplex == FULL_DUPLEX) ?
360 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
361 		break;
362 
363 	case ETHER_STAT_TOOSHORT_ERRORS:
364 		igb->stat_ruc += E1000_READ_REG(hw, E1000_RUC);
365 		*val = igb->stat_ruc;
366 		break;
367 
368 	case ETHER_STAT_CAP_REMFAULT:
369 		*val = igb->param_rem_fault;
370 		break;
371 
372 	case ETHER_STAT_ADV_REMFAULT:
373 		*val = igb->param_adv_rem_fault;
374 		break;
375 
376 	case ETHER_STAT_LP_REMFAULT:
377 		*val = igb->param_lp_rem_fault;
378 		break;
379 
380 	case ETHER_STAT_JABBER_ERRORS:
381 		igb->stat_rjc += E1000_READ_REG(hw, E1000_RJC);
382 		*val = igb->stat_rjc;
383 		break;
384 
385 	case ETHER_STAT_CAP_100T4:
386 		*val = igb->param_100t4_cap;
387 		break;
388 
389 	case ETHER_STAT_ADV_CAP_100T4:
390 		*val = igb->param_adv_100t4_cap;
391 		break;
392 
393 	case ETHER_STAT_LP_CAP_100T4:
394 		*val = igb->param_lp_100t4_cap;
395 		break;
396 
397 	default:
398 		mutex_exit(&igb->gen_lock);
399 		return (ENOTSUP);
400 	}
401 
402 	mutex_exit(&igb->gen_lock);
403 
404 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
405 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
406 		return (EIO);
407 	}
408 
409 	return (0);
410 }
411 
412 /*
413  * Bring the device out of the reset/quiesced state that it
414  * was in when the interface was registered.
415  */
416 int
417 igb_m_start(void *arg)
418 {
419 	igb_t *igb = (igb_t *)arg;
420 
421 	mutex_enter(&igb->gen_lock);
422 
423 	if (igb->igb_state & IGB_SUSPENDED) {
424 		mutex_exit(&igb->gen_lock);
425 		return (ECANCELED);
426 	}
427 
428 	if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
429 		mutex_exit(&igb->gen_lock);
430 		return (EIO);
431 	}
432 
433 	atomic_or_32(&igb->igb_state, IGB_STARTED);
434 
435 	mutex_exit(&igb->gen_lock);
436 
437 	/*
438 	 * Enable and start the watchdog timer
439 	 */
440 	igb_enable_watchdog_timer(igb);
441 
442 	return (0);
443 }
444 
445 /*
446  * Stop the device and put it in a reset/quiesced state such
447  * that the interface can be unregistered.
448  */
449 void
450 igb_m_stop(void *arg)
451 {
452 	igb_t *igb = (igb_t *)arg;
453 
454 	mutex_enter(&igb->gen_lock);
455 
456 	if (igb->igb_state & IGB_SUSPENDED) {
457 		mutex_exit(&igb->gen_lock);
458 		return;
459 	}
460 
461 	atomic_and_32(&igb->igb_state, ~IGB_STARTED);
462 
463 	igb_stop(igb, B_TRUE);
464 
465 	mutex_exit(&igb->gen_lock);
466 
467 	/*
468 	 * Disable and stop the watchdog timer
469 	 */
470 	igb_disable_watchdog_timer(igb);
471 }
472 
473 /*
474  * Set the promiscuity of the device.
475  */
476 int
477 igb_m_promisc(void *arg, boolean_t on)
478 {
479 	igb_t *igb = (igb_t *)arg;
480 	uint32_t reg_val;
481 
482 	mutex_enter(&igb->gen_lock);
483 
484 	if (igb->igb_state & IGB_SUSPENDED) {
485 		mutex_exit(&igb->gen_lock);
486 		return (ECANCELED);
487 	}
488 
489 	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
490 
491 	if (on)
492 		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
493 	else
494 		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
495 
496 	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
497 
498 	mutex_exit(&igb->gen_lock);
499 
500 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
501 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
502 		return (EIO);
503 	}
504 
505 	return (0);
506 }
507 
508 /*
509  * Add/remove the addresses to/from the set of multicast
510  * addresses for which the device will receive packets.
511  */
512 int
513 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
514 {
515 	igb_t *igb = (igb_t *)arg;
516 	int result;
517 
518 	mutex_enter(&igb->gen_lock);
519 
520 	if (igb->igb_state & IGB_SUSPENDED) {
521 		mutex_exit(&igb->gen_lock);
522 		return (ECANCELED);
523 	}
524 
525 	result = (add) ? igb_multicst_add(igb, mcst_addr)
526 	    : igb_multicst_remove(igb, mcst_addr);
527 
528 	mutex_exit(&igb->gen_lock);
529 
530 	return (result);
531 }
532 
533 /*
534  * Pass on M_IOCTL messages passed to the DLD, and support
535  * private IOCTLs for debugging and ndd.
536  */
537 void
538 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
539 {
540 	igb_t *igb = (igb_t *)arg;
541 	struct iocblk *iocp;
542 	enum ioc_reply status;
543 
544 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
545 	iocp->ioc_error = 0;
546 
547 	mutex_enter(&igb->gen_lock);
548 	if (igb->igb_state & IGB_SUSPENDED) {
549 		mutex_exit(&igb->gen_lock);
550 		miocnak(q, mp, 0, EINVAL);
551 		return;
552 	}
553 	mutex_exit(&igb->gen_lock);
554 
555 	switch (iocp->ioc_cmd) {
556 	case LB_GET_INFO_SIZE:
557 	case LB_GET_INFO:
558 	case LB_GET_MODE:
559 	case LB_SET_MODE:
560 		status = igb_loopback_ioctl(igb, iocp, mp);
561 		break;
562 
563 	default:
564 		status = IOC_INVAL;
565 		break;
566 	}
567 
568 	/*
569 	 * Decide how to reply
570 	 */
571 	switch (status) {
572 	default:
573 	case IOC_INVAL:
574 		/*
575 		 * Error, reply with a NAK and EINVAL or the specified error
576 		 */
577 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
578 		    EINVAL : iocp->ioc_error);
579 		break;
580 
581 	case IOC_DONE:
582 		/*
583 		 * OK, reply already sent
584 		 */
585 		break;
586 
587 	case IOC_ACK:
588 		/*
589 		 * OK, reply with an ACK
590 		 */
591 		miocack(q, mp, 0, 0);
592 		break;
593 
594 	case IOC_REPLY:
595 		/*
596 		 * OK, send prepared reply as ACK or NAK
597 		 */
598 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
599 		    M_IOCACK : M_IOCNAK;
600 		qreply(q, mp);
601 		break;
602 	}
603 }
604 
605 /*
606  * Add a MAC address to the target RX group.
607  */
608 static int
609 igb_addmac(void *arg, const uint8_t *mac_addr)
610 {
611 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
612 	igb_t *igb = rx_group->igb;
613 	struct e1000_hw *hw = &igb->hw;
614 	int i, slot;
615 
616 	mutex_enter(&igb->gen_lock);
617 
618 	if (igb->igb_state & IGB_SUSPENDED) {
619 		mutex_exit(&igb->gen_lock);
620 		return (ECANCELED);
621 	}
622 
623 	if (igb->unicst_avail == 0) {
624 		/* no slots available */
625 		mutex_exit(&igb->gen_lock);
626 		return (ENOSPC);
627 	}
628 
629 	/*
630 	 * The slots from 0 to igb->num_rx_groups are reserved slots which
631 	 * are 1 to 1 mapped with group index directly. The other slots are
632 	 * shared between the all of groups. While adding a MAC address,
633 	 * it will try to set the reserved slots first, then the shared slots.
634 	 */
635 	slot = -1;
636 	if (igb->unicst_addr[rx_group->index].mac.set == 1) {
637 		/*
638 		 * The reserved slot for current group is used, find the free
639 		 * slots in the shared slots.
640 		 */
641 		for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
642 			if (igb->unicst_addr[i].mac.set == 0) {
643 				slot = i;
644 				break;
645 			}
646 		}
647 	} else
648 		slot = rx_group->index;
649 
650 	if (slot == -1) {
651 		/* no slots available in the shared slots */
652 		mutex_exit(&igb->gen_lock);
653 		return (ENOSPC);
654 	}
655 
656 	/* Set VMDq according to the mode supported by hardware. */
657 	e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
658 
659 	bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
660 	igb->unicst_addr[slot].mac.group_index = rx_group->index;
661 	igb->unicst_addr[slot].mac.set = 1;
662 	igb->unicst_avail--;
663 
664 	mutex_exit(&igb->gen_lock);
665 
666 	return (0);
667 }
668 
669 /*
670  * Remove a MAC address from the specified RX group.
671  */
672 static int
673 igb_remmac(void *arg, const uint8_t *mac_addr)
674 {
675 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
676 	igb_t *igb = rx_group->igb;
677 	struct e1000_hw *hw = &igb->hw;
678 	int slot;
679 
680 	mutex_enter(&igb->gen_lock);
681 
682 	if (igb->igb_state & IGB_SUSPENDED) {
683 		mutex_exit(&igb->gen_lock);
684 		return (ECANCELED);
685 	}
686 
687 	slot = igb_unicst_find(igb, mac_addr);
688 	if (slot == -1) {
689 		mutex_exit(&igb->gen_lock);
690 		return (EINVAL);
691 	}
692 
693 	if (igb->unicst_addr[slot].mac.set == 0) {
694 		mutex_exit(&igb->gen_lock);
695 		return (EINVAL);
696 	}
697 
698 	/* Clear the MAC ddress in the slot */
699 	e1000_rar_clear(hw, slot);
700 	igb->unicst_addr[slot].mac.set = 0;
701 	igb->unicst_avail++;
702 
703 	mutex_exit(&igb->gen_lock);
704 
705 	return (0);
706 }
707 
708 /*
709  * Enable interrupt on the specificed rx ring.
710  */
711 int
712 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
713 {
714 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
715 	igb_t *igb = rx_ring->igb;
716 	struct e1000_hw *hw = &igb->hw;
717 	uint32_t index = rx_ring->index;
718 
719 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
720 		/* Interrupt enabling for MSI-X */
721 		igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
722 		E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
723 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
724 	} else {
725 		ASSERT(index == 0);
726 		/* Interrupt enabling for MSI and legacy */
727 		igb->ims_mask |= E1000_IMS_RXT0;
728 		E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
729 	}
730 
731 	E1000_WRITE_FLUSH(hw);
732 
733 	return (0);
734 }
735 
736 /*
737  * Disable interrupt on the specificed rx ring.
738  */
739 int
740 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
741 {
742 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
743 	igb_t *igb = rx_ring->igb;
744 	struct e1000_hw *hw = &igb->hw;
745 	uint32_t index = rx_ring->index;
746 
747 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
748 		/* Interrupt disabling for MSI-X */
749 		igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
750 		E1000_WRITE_REG(hw, E1000_EIMC,
751 		    (E1000_EICR_RX_QUEUE0 << index));
752 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
753 	} else {
754 		ASSERT(index == 0);
755 		/* Interrupt disabling for MSI and legacy */
756 		igb->ims_mask &= ~E1000_IMS_RXT0;
757 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
758 	}
759 
760 	E1000_WRITE_FLUSH(hw);
761 
762 	return (0);
763 }
764 
765 /*
766  * Get the global ring index by a ring index within a group.
767  */
768 int
769 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
770 {
771 	igb_rx_ring_t *rx_ring;
772 	int i;
773 
774 	for (i = 0; i < igb->num_rx_rings; i++) {
775 		rx_ring = &igb->rx_rings[i];
776 		if (rx_ring->group_index == gindex)
777 			rindex--;
778 		if (rindex < 0)
779 			return (i);
780 	}
781 
782 	return (-1);
783 }
784 
785 static int
786 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
787 {
788 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
789 
790 	mutex_enter(&rx_ring->rx_lock);
791 	rx_ring->ring_gen_num = mr_gen_num;
792 	mutex_exit(&rx_ring->rx_lock);
793 	return (0);
794 }
795 
796 /*
797  * Callback funtion for MAC layer to register all rings.
798  */
799 /* ARGSUSED */
800 void
801 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
802     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
803 {
804 	igb_t *igb = (igb_t *)arg;
805 	mac_intr_t *mintr = &infop->mri_intr;
806 
807 	switch (rtype) {
808 	case MAC_RING_TYPE_RX: {
809 		igb_rx_ring_t *rx_ring;
810 		int global_index;
811 
812 		/*
813 		 * 'index' is the ring index within the group.
814 		 * We need the global ring index by searching in group.
815 		 */
816 		global_index = igb_get_rx_ring_index(igb, rg_index, index);
817 
818 		ASSERT(global_index >= 0);
819 
820 		rx_ring = &igb->rx_rings[global_index];
821 		rx_ring->ring_handle = rh;
822 
823 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
824 		infop->mri_start = igb_ring_start;
825 		infop->mri_stop = NULL;
826 		infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
827 		infop->mri_stat = igb_rx_ring_stat;
828 
829 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
830 		mintr->mi_enable = igb_rx_ring_intr_enable;
831 		mintr->mi_disable = igb_rx_ring_intr_disable;
832 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
833 			mintr->mi_ddi_handle =
834 			    igb->htable[rx_ring->intr_vector];
835 		}
836 		break;
837 	}
838 	case MAC_RING_TYPE_TX: {
839 		ASSERT(index < igb->num_tx_rings);
840 
841 		igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
842 		tx_ring->ring_handle = rh;
843 
844 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
845 		infop->mri_start = NULL;
846 		infop->mri_stop = NULL;
847 		infop->mri_tx = igb_tx_ring_send;
848 		infop->mri_stat = igb_tx_ring_stat;
849 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
850 			mintr->mi_ddi_handle =
851 			    igb->htable[tx_ring->intr_vector];
852 		}
853 		break;
854 	}
855 	default:
856 		break;
857 	}
858 }
859 
860 void
861 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
862     mac_group_info_t *infop, mac_group_handle_t gh)
863 {
864 	igb_t *igb = (igb_t *)arg;
865 
866 	switch (rtype) {
867 	case MAC_RING_TYPE_RX: {
868 		igb_rx_group_t *rx_group;
869 
870 		ASSERT((index >= 0) && (index < igb->num_rx_groups));
871 
872 		rx_group = &igb->rx_groups[index];
873 		rx_group->group_handle = gh;
874 
875 		infop->mgi_driver = (mac_group_driver_t)rx_group;
876 		infop->mgi_start = NULL;
877 		infop->mgi_stop = NULL;
878 		infop->mgi_addmac = igb_addmac;
879 		infop->mgi_remmac = igb_remmac;
880 		infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
881 
882 		break;
883 	}
884 	case MAC_RING_TYPE_TX:
885 		break;
886 	default:
887 		break;
888 	}
889 }
890 
891 static int
892 igb_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
893 {
894 	igb_t *igb = arg;
895 
896 	if (flags != 0)
897 		return (EINVAL);
898 
899 	if (mode != MAC_LED_DEFAULT &&
900 	    mode != MAC_LED_IDENT &&
901 	    mode != MAC_LED_OFF &&
902 	    mode != MAC_LED_ON)
903 		return (ENOTSUP);
904 
905 	if (mode != MAC_LED_DEFAULT && !igb->igb_led_setup) {
906 		if (e1000_setup_led(&igb->hw) != E1000_SUCCESS)
907 			return (EIO);
908 
909 		igb->igb_led_setup = B_TRUE;
910 	}
911 
912 	switch (mode) {
913 	case MAC_LED_DEFAULT:
914 		if (igb->igb_led_setup) {
915 			if (e1000_cleanup_led(&igb->hw) != E1000_SUCCESS)
916 				return (EIO);
917 			igb->igb_led_setup = B_FALSE;
918 		}
919 		break;
920 	case MAC_LED_IDENT:
921 		if (e1000_blink_led(&igb->hw) != E1000_SUCCESS)
922 			return (EIO);
923 		break;
924 	case MAC_LED_OFF:
925 		if (e1000_led_off(&igb->hw) != E1000_SUCCESS)
926 			return (EIO);
927 		break;
928 	case MAC_LED_ON:
929 		if (e1000_led_on(&igb->hw) != E1000_SUCCESS)
930 			return (EIO);
931 		break;
932 	default:
933 		return (ENOTSUP);
934 	}
935 
936 	return (0);
937 }
938 
939 /*
940  * Obtain the MAC's capabilities and associated data from
941  * the driver.
942  */
943 boolean_t
944 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
945 {
946 	igb_t *igb = (igb_t *)arg;
947 
948 	switch (cap) {
949 	case MAC_CAPAB_HCKSUM: {
950 		uint32_t *tx_hcksum_flags = cap_data;
951 
952 		/*
953 		 * We advertise our capabilities only if tx hcksum offload is
954 		 * enabled.  On receive, the stack will accept checksummed
955 		 * packets anyway, even if we haven't said we can deliver
956 		 * them.
957 		 */
958 		if (!igb->tx_hcksum_enable)
959 			return (B_FALSE);
960 
961 		*tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
962 		break;
963 	}
964 	case MAC_CAPAB_LSO: {
965 		mac_capab_lso_t *cap_lso = cap_data;
966 
967 		if (igb->lso_enable) {
968 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
969 			cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
970 			break;
971 		} else {
972 			return (B_FALSE);
973 		}
974 	}
975 	case MAC_CAPAB_RINGS: {
976 		mac_capab_rings_t *cap_rings = cap_data;
977 
978 		switch (cap_rings->mr_type) {
979 		case MAC_RING_TYPE_RX:
980 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
981 			cap_rings->mr_rnum = igb->num_rx_rings;
982 			cap_rings->mr_gnum = igb->num_rx_groups;
983 			cap_rings->mr_rget = igb_fill_ring;
984 			cap_rings->mr_gget = igb_fill_group;
985 			cap_rings->mr_gaddring = NULL;
986 			cap_rings->mr_gremring = NULL;
987 
988 			break;
989 		case MAC_RING_TYPE_TX:
990 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
991 			cap_rings->mr_rnum = igb->num_tx_rings;
992 			cap_rings->mr_gnum = 0;
993 			cap_rings->mr_rget = igb_fill_ring;
994 			cap_rings->mr_gget = NULL;
995 
996 			break;
997 		default:
998 			break;
999 		}
1000 		break;
1001 	}
1002 
1003 	case MAC_CAPAB_LED: {
1004 		mac_capab_led_t *cap_led = cap_data;
1005 
1006 		cap_led->mcl_flags = 0;
1007 		cap_led->mcl_modes = MAC_LED_DEFAULT;
1008 		if (igb->hw.mac.ops.blink_led != NULL &&
1009 		    igb->hw.mac.ops.blink_led != e1000_null_ops_generic) {
1010 			cap_led->mcl_modes |= MAC_LED_IDENT;
1011 		}
1012 		if (igb->hw.mac.ops.led_off != NULL &&
1013 		    igb->hw.mac.ops.led_off != e1000_null_ops_generic) {
1014 			cap_led->mcl_modes |= MAC_LED_OFF;
1015 		}
1016 		if (igb->hw.mac.ops.led_on != NULL &&
1017 		    igb->hw.mac.ops.led_on != e1000_null_ops_generic) {
1018 			cap_led->mcl_modes |= MAC_LED_ON;
1019 		}
1020 		cap_led->mcl_set = igb_led_set;
1021 		break;
1022 	}
1023 
1024 	default:
1025 		return (B_FALSE);
1026 	}
1027 	return (B_TRUE);
1028 }
1029 
1030 int
1031 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1032     uint_t pr_valsize, const void *pr_val)
1033 {
1034 	igb_t *igb = (igb_t *)arg;
1035 	struct e1000_hw *hw = &igb->hw;
1036 	int err = 0;
1037 	uint32_t flow_control;
1038 	uint32_t cur_mtu, new_mtu;
1039 	uint32_t rx_size;
1040 	uint32_t tx_size;
1041 
1042 	mutex_enter(&igb->gen_lock);
1043 	if (igb->igb_state & IGB_SUSPENDED) {
1044 		mutex_exit(&igb->gen_lock);
1045 		return (ECANCELED);
1046 	}
1047 
1048 	if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
1049 		/*
1050 		 * All en_* parameters are locked (read-only)
1051 		 * while the device is in any sort of loopback mode.
1052 		 */
1053 		mutex_exit(&igb->gen_lock);
1054 		return (EBUSY);
1055 	}
1056 
1057 	switch (pr_num) {
1058 	case MAC_PROP_EN_1000FDX_CAP:
1059 		/* read/write on copper, read-only on serdes */
1060 		if (hw->phy.media_type != e1000_media_type_copper) {
1061 			err = ENOTSUP;
1062 			break;
1063 		}
1064 		igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
1065 		igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
1066 		goto setup_link;
1067 	case MAC_PROP_EN_100FDX_CAP:
1068 		if (hw->phy.media_type != e1000_media_type_copper) {
1069 			err = ENOTSUP;
1070 			break;
1071 		}
1072 		igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1073 		igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1074 		goto setup_link;
1075 	case MAC_PROP_EN_100HDX_CAP:
1076 		if (hw->phy.media_type != e1000_media_type_copper) {
1077 			err = ENOTSUP;
1078 			break;
1079 		}
1080 		igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1081 		igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1082 		goto setup_link;
1083 	case MAC_PROP_EN_10FDX_CAP:
1084 		if (hw->phy.media_type != e1000_media_type_copper) {
1085 			err = ENOTSUP;
1086 			break;
1087 		}
1088 		igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1089 		igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1090 		goto setup_link;
1091 	case MAC_PROP_EN_10HDX_CAP:
1092 		if (hw->phy.media_type != e1000_media_type_copper) {
1093 			err = ENOTSUP;
1094 			break;
1095 		}
1096 		igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1097 		igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1098 		goto setup_link;
1099 	case MAC_PROP_AUTONEG:
1100 		if (hw->phy.media_type != e1000_media_type_copper) {
1101 			err = ENOTSUP;
1102 			break;
1103 		}
1104 		igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1105 		goto setup_link;
1106 	case MAC_PROP_FLOWCTRL:
1107 		bcopy(pr_val, &flow_control, sizeof (flow_control));
1108 
1109 		switch (flow_control) {
1110 		default:
1111 			err = EINVAL;
1112 			break;
1113 		case LINK_FLOWCTRL_NONE:
1114 			hw->fc.requested_mode = e1000_fc_none;
1115 			break;
1116 		case LINK_FLOWCTRL_RX:
1117 			hw->fc.requested_mode = e1000_fc_rx_pause;
1118 			break;
1119 		case LINK_FLOWCTRL_TX:
1120 			hw->fc.requested_mode = e1000_fc_tx_pause;
1121 			break;
1122 		case LINK_FLOWCTRL_BI:
1123 			hw->fc.requested_mode = e1000_fc_full;
1124 			break;
1125 		}
1126 setup_link:
1127 		if (err == 0) {
1128 			if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1129 				err = EINVAL;
1130 		}
1131 		break;
1132 	case MAC_PROP_ADV_1000FDX_CAP:
1133 	case MAC_PROP_ADV_1000HDX_CAP:
1134 	case MAC_PROP_ADV_100T4_CAP:
1135 	case MAC_PROP_ADV_100FDX_CAP:
1136 	case MAC_PROP_ADV_100HDX_CAP:
1137 	case MAC_PROP_ADV_10FDX_CAP:
1138 	case MAC_PROP_ADV_10HDX_CAP:
1139 	case MAC_PROP_EN_1000HDX_CAP:
1140 	case MAC_PROP_EN_100T4_CAP:
1141 	case MAC_PROP_STATUS:
1142 	case MAC_PROP_SPEED:
1143 	case MAC_PROP_DUPLEX:
1144 		err = ENOTSUP; /* read-only prop. Can't set this. */
1145 		break;
1146 	case MAC_PROP_MTU:
1147 		/* adapter must be stopped for an MTU change */
1148 		if (igb->igb_state & IGB_STARTED) {
1149 			err = EBUSY;
1150 			break;
1151 		}
1152 
1153 		cur_mtu = igb->default_mtu;
1154 		bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1155 		if (new_mtu == cur_mtu) {
1156 			err = 0;
1157 			break;
1158 		}
1159 
1160 		if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1161 			err = EINVAL;
1162 			break;
1163 		}
1164 
1165 		err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1166 		if (err == 0) {
1167 			igb->default_mtu = new_mtu;
1168 			igb->max_frame_size = igb->default_mtu +
1169 			    sizeof (struct ether_vlan_header) + ETHERFCSL;
1170 
1171 			/*
1172 			 * Set rx buffer size
1173 			 */
1174 			rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1175 			igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1176 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1177 
1178 			/*
1179 			 * Set tx buffer size
1180 			 */
1181 			tx_size = igb->max_frame_size;
1182 			igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1183 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1184 		}
1185 		break;
1186 	case MAC_PROP_PRIVATE:
1187 		err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1188 		break;
1189 	default:
1190 		err = ENOTSUP;
1191 		break;
1192 	}
1193 
1194 	mutex_exit(&igb->gen_lock);
1195 
1196 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1197 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1198 		return (EIO);
1199 	}
1200 
1201 	return (err);
1202 }
1203 
1204 int
1205 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1206     uint_t pr_valsize, void *pr_val)
1207 {
1208 	igb_t *igb = (igb_t *)arg;
1209 	struct e1000_hw *hw = &igb->hw;
1210 	int err = 0;
1211 	uint32_t flow_control;
1212 	uint64_t tmp = 0;
1213 
1214 	switch (pr_num) {
1215 	case MAC_PROP_DUPLEX:
1216 		ASSERT(pr_valsize >= sizeof (link_duplex_t));
1217 		bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t));
1218 		break;
1219 	case MAC_PROP_SPEED:
1220 		ASSERT(pr_valsize >= sizeof (uint64_t));
1221 		tmp = igb->link_speed * 1000000ull;
1222 		bcopy(&tmp, pr_val, sizeof (tmp));
1223 		break;
1224 	case MAC_PROP_AUTONEG:
1225 		ASSERT(pr_valsize >= sizeof (uint8_t));
1226 		*(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1227 		break;
1228 	case MAC_PROP_FLOWCTRL:
1229 		ASSERT(pr_valsize >= sizeof (uint32_t));
1230 		switch (hw->fc.requested_mode) {
1231 			case e1000_fc_none:
1232 				flow_control = LINK_FLOWCTRL_NONE;
1233 				break;
1234 			case e1000_fc_rx_pause:
1235 				flow_control = LINK_FLOWCTRL_RX;
1236 				break;
1237 			case e1000_fc_tx_pause:
1238 				flow_control = LINK_FLOWCTRL_TX;
1239 				break;
1240 			case e1000_fc_full:
1241 				flow_control = LINK_FLOWCTRL_BI;
1242 				break;
1243 		}
1244 		bcopy(&flow_control, pr_val, sizeof (flow_control));
1245 		break;
1246 	case MAC_PROP_ADV_1000FDX_CAP:
1247 		*(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1248 		break;
1249 	case MAC_PROP_EN_1000FDX_CAP:
1250 		*(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1251 		break;
1252 	case MAC_PROP_ADV_1000HDX_CAP:
1253 		*(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1254 		break;
1255 	case MAC_PROP_EN_1000HDX_CAP:
1256 		*(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1257 		break;
1258 	case MAC_PROP_ADV_100T4_CAP:
1259 		*(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1260 		break;
1261 	case MAC_PROP_EN_100T4_CAP:
1262 		*(uint8_t *)pr_val = igb->param_en_100t4_cap;
1263 		break;
1264 	case MAC_PROP_ADV_100FDX_CAP:
1265 		*(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1266 		break;
1267 	case MAC_PROP_EN_100FDX_CAP:
1268 		*(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1269 		break;
1270 	case MAC_PROP_ADV_100HDX_CAP:
1271 		*(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1272 		break;
1273 	case MAC_PROP_EN_100HDX_CAP:
1274 		*(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1275 		break;
1276 	case MAC_PROP_ADV_10FDX_CAP:
1277 		*(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1278 		break;
1279 	case MAC_PROP_EN_10FDX_CAP:
1280 		*(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1281 		break;
1282 	case MAC_PROP_ADV_10HDX_CAP:
1283 		*(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1284 		break;
1285 	case MAC_PROP_EN_10HDX_CAP:
1286 		*(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1287 		break;
1288 	case MAC_PROP_PRIVATE:
1289 		err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val);
1290 		break;
1291 	default:
1292 		err = ENOTSUP;
1293 		break;
1294 	}
1295 	return (err);
1296 }
1297 
1298 void
1299 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1300     mac_prop_info_handle_t prh)
1301 {
1302 	igb_t *igb = (igb_t *)arg;
1303 	struct e1000_hw *hw = &igb->hw;
1304 	uint16_t phy_status, phy_ext_status;
1305 
1306 	switch (pr_num) {
1307 	case MAC_PROP_DUPLEX:
1308 	case MAC_PROP_SPEED:
1309 	case MAC_PROP_ADV_1000FDX_CAP:
1310 	case MAC_PROP_ADV_1000HDX_CAP:
1311 	case MAC_PROP_EN_1000HDX_CAP:
1312 	case MAC_PROP_ADV_100T4_CAP:
1313 	case MAC_PROP_EN_100T4_CAP:
1314 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1315 		break;
1316 
1317 	case MAC_PROP_EN_1000FDX_CAP:
1318 		if (hw->phy.media_type != e1000_media_type_copper) {
1319 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1320 		} else {
1321 			(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
1322 			    &phy_ext_status);
1323 			mac_prop_info_set_default_uint8(prh,
1324 			    ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1325 			    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
1326 		}
1327 		break;
1328 
1329 	case MAC_PROP_ADV_100FDX_CAP:
1330 	case MAC_PROP_EN_100FDX_CAP:
1331 		if (hw->phy.media_type != e1000_media_type_copper) {
1332 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1333 		} else {
1334 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1335 			mac_prop_info_set_default_uint8(prh,
1336 			    ((phy_status & MII_SR_100X_FD_CAPS) ||
1337 			    (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0);
1338 		}
1339 		break;
1340 
1341 	case MAC_PROP_ADV_100HDX_CAP:
1342 	case MAC_PROP_EN_100HDX_CAP:
1343 		if (hw->phy.media_type != e1000_media_type_copper) {
1344 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1345 		} else {
1346 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1347 			mac_prop_info_set_default_uint8(prh,
1348 			    ((phy_status & MII_SR_100X_HD_CAPS) ||
1349 			    (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0);
1350 		}
1351 		break;
1352 
1353 	case MAC_PROP_ADV_10FDX_CAP:
1354 	case MAC_PROP_EN_10FDX_CAP:
1355 		if (hw->phy.media_type != e1000_media_type_copper) {
1356 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1357 		} else {
1358 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1359 			mac_prop_info_set_default_uint8(prh,
1360 			    (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
1361 		}
1362 		break;
1363 
1364 	case MAC_PROP_ADV_10HDX_CAP:
1365 	case MAC_PROP_EN_10HDX_CAP:
1366 		if (hw->phy.media_type != e1000_media_type_copper) {
1367 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1368 		} else {
1369 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1370 			mac_prop_info_set_default_uint8(prh,
1371 			    (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
1372 		}
1373 		break;
1374 
1375 	case MAC_PROP_AUTONEG:
1376 		if (hw->phy.media_type != e1000_media_type_copper) {
1377 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1378 		} else {
1379 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1380 			mac_prop_info_set_default_uint8(prh,
1381 			    (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
1382 		}
1383 		break;
1384 
1385 	case MAC_PROP_FLOWCTRL:
1386 		mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1387 		break;
1388 
1389 	case MAC_PROP_MTU:
1390 		mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU);
1391 		break;
1392 
1393 	case MAC_PROP_PRIVATE:
1394 		igb_priv_prop_info(igb, pr_name, prh);
1395 		break;
1396 	}
1397 
1398 }
1399 
1400 boolean_t
1401 igb_param_locked(mac_prop_id_t pr_num)
1402 {
1403 	/*
1404 	 * All en_* parameters are locked (read-only) while
1405 	 * the device is in any sort of loopback mode ...
1406 	 */
1407 	switch (pr_num) {
1408 		case MAC_PROP_EN_1000FDX_CAP:
1409 		case MAC_PROP_EN_1000HDX_CAP:
1410 		case MAC_PROP_EN_100T4_CAP:
1411 		case MAC_PROP_EN_100FDX_CAP:
1412 		case MAC_PROP_EN_100HDX_CAP:
1413 		case MAC_PROP_EN_10FDX_CAP:
1414 		case MAC_PROP_EN_10HDX_CAP:
1415 		case MAC_PROP_AUTONEG:
1416 		case MAC_PROP_FLOWCTRL:
1417 			return (B_TRUE);
1418 	}
1419 	return (B_FALSE);
1420 }
1421 
1422 /* ARGSUSED */
1423 int
1424 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1425     uint_t pr_valsize, const void *pr_val)
1426 {
1427 	int err = 0;
1428 	long result;
1429 	struct e1000_hw *hw = &igb->hw;
1430 	int i;
1431 
1432 	if (strcmp(pr_name, "_eee_support") == 0) {
1433 		if (pr_val == NULL)
1434 			return (EINVAL);
1435 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1436 		switch (result) {
1437 		case 0:
1438 		case 1:
1439 			/*
1440 			 * For now, only supported on I350/I354.
1441 			 * Add new mac.type values (or use < instead)
1442 			 * as new cards offer up EEE.
1443 			 */
1444 			switch (hw->mac.type) {
1445 			case e1000_i350:
1446 				/* Must set this prior to the set call. */
1447 				hw->dev_spec._82575.eee_disable = !result;
1448 				if (e1000_set_eee_i350(hw, result,
1449 				    result) != E1000_SUCCESS)
1450 					err = EIO;
1451 				break;
1452 			case e1000_i354:
1453 				/* Must set this prior to the set call. */
1454 				hw->dev_spec._82575.eee_disable = !result;
1455 				if (e1000_set_eee_i354(hw, result,
1456 				    result) != E1000_SUCCESS)
1457 					err = EIO;
1458 				break;
1459 			default:
1460 				return (ENXIO);
1461 			}
1462 			break;
1463 		default:
1464 			err = EINVAL;
1465 			/* FALLTHRU */
1466 		}
1467 		return (err);
1468 	}
1469 	if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1470 		if (pr_val == NULL) {
1471 			err = EINVAL;
1472 			return (err);
1473 		}
1474 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1475 		if (result < MIN_TX_COPY_THRESHOLD ||
1476 		    result > MAX_TX_COPY_THRESHOLD)
1477 			err = EINVAL;
1478 		else {
1479 			igb->tx_copy_thresh = (uint32_t)result;
1480 		}
1481 		return (err);
1482 	}
1483 	if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1484 		if (pr_val == NULL) {
1485 			err = EINVAL;
1486 			return (err);
1487 		}
1488 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1489 		if (result < MIN_TX_RECYCLE_THRESHOLD ||
1490 		    result > MAX_TX_RECYCLE_THRESHOLD)
1491 			err = EINVAL;
1492 		else {
1493 			igb->tx_recycle_thresh = (uint32_t)result;
1494 		}
1495 		return (err);
1496 	}
1497 	if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1498 		if (pr_val == NULL) {
1499 			err = EINVAL;
1500 			return (err);
1501 		}
1502 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1503 		if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1504 		    result > MAX_TX_OVERLOAD_THRESHOLD)
1505 			err = EINVAL;
1506 		else {
1507 			igb->tx_overload_thresh = (uint32_t)result;
1508 		}
1509 		return (err);
1510 	}
1511 	if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1512 		if (pr_val == NULL) {
1513 			err = EINVAL;
1514 			return (err);
1515 		}
1516 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1517 		if (result < MIN_TX_RESCHED_THRESHOLD ||
1518 		    result > MAX_TX_RESCHED_THRESHOLD ||
1519 		    result > igb->tx_ring_size)
1520 			err = EINVAL;
1521 		else {
1522 			igb->tx_resched_thresh = (uint32_t)result;
1523 		}
1524 		return (err);
1525 	}
1526 	if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1527 		if (pr_val == NULL) {
1528 			err = EINVAL;
1529 			return (err);
1530 		}
1531 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1532 		if (result < MIN_RX_COPY_THRESHOLD ||
1533 		    result > MAX_RX_COPY_THRESHOLD)
1534 			err = EINVAL;
1535 		else {
1536 			igb->rx_copy_thresh = (uint32_t)result;
1537 		}
1538 		return (err);
1539 	}
1540 	if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1541 		if (pr_val == NULL) {
1542 			err = EINVAL;
1543 			return (err);
1544 		}
1545 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1546 		if (result < MIN_RX_LIMIT_PER_INTR ||
1547 		    result > MAX_RX_LIMIT_PER_INTR)
1548 			err = EINVAL;
1549 		else {
1550 			igb->rx_limit_per_intr = (uint32_t)result;
1551 		}
1552 		return (err);
1553 	}
1554 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1555 		if (pr_val == NULL) {
1556 			err = EINVAL;
1557 			return (err);
1558 		}
1559 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1560 
1561 		if (result < igb->capab->min_intr_throttle ||
1562 		    result > igb->capab->max_intr_throttle)
1563 			err = EINVAL;
1564 		else {
1565 			igb->intr_throttling[0] = (uint32_t)result;
1566 
1567 			for (i = 0; i < MAX_NUM_EITR; i++)
1568 				igb->intr_throttling[i] =
1569 				    igb->intr_throttling[0];
1570 
1571 			/* Set interrupt throttling rate */
1572 			for (i = 0; i < igb->intr_cnt; i++)
1573 				E1000_WRITE_REG(hw, E1000_EITR(i),
1574 				    igb->intr_throttling[i]);
1575 		}
1576 		return (err);
1577 	}
1578 	return (ENOTSUP);
1579 }
1580 
1581 int
1582 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize,
1583     void *pr_val)
1584 {
1585 	int value;
1586 
1587 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1588 		value = igb->param_adv_pause_cap;
1589 	} else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1590 		value = igb->param_adv_asym_pause_cap;
1591 	} else if (strcmp(pr_name, "_eee_support") == 0) {
1592 		/*
1593 		 * For now, only supported on I350.  Add new mac.type values
1594 		 * (or use < instead) as new cards offer up EEE.
1595 		 */
1596 		switch (igb->hw.mac.type) {
1597 		case e1000_i350:
1598 		case e1000_i354:
1599 			value = !(igb->hw.dev_spec._82575.eee_disable);
1600 			break;
1601 		default:
1602 			value = 0;
1603 		}
1604 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1605 		value = igb->tx_copy_thresh;
1606 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1607 		value = igb->tx_recycle_thresh;
1608 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1609 		value = igb->tx_overload_thresh;
1610 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1611 		value = igb->tx_resched_thresh;
1612 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1613 		value = igb->rx_copy_thresh;
1614 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1615 		value = igb->rx_limit_per_intr;
1616 	} else if (strcmp(pr_name, "_intr_throttling") == 0) {
1617 		value = igb->intr_throttling[0];
1618 	} else {
1619 		return (ENOTSUP);
1620 	}
1621 
1622 	(void) snprintf(pr_val, pr_valsize, "%d", value);
1623 	return (0);
1624 }
1625 
1626 void
1627 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh)
1628 {
1629 	char valstr[64];
1630 	int value;
1631 
1632 	if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
1633 	    strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1634 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1635 		return;
1636 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1637 		value = DEFAULT_TX_COPY_THRESHOLD;
1638 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1639 		value = DEFAULT_TX_RECYCLE_THRESHOLD;
1640 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1641 		value = DEFAULT_TX_OVERLOAD_THRESHOLD;
1642 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1643 		value = DEFAULT_TX_RESCHED_THRESHOLD;
1644 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1645 		value = DEFAULT_RX_COPY_THRESHOLD;
1646 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1647 		value = DEFAULT_RX_LIMIT_PER_INTR;
1648 	} else 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1649 		value = igb->capab->def_intr_throttle;
1650 	} else {
1651 		return;
1652 	}
1653 
1654 	(void) snprintf(valstr, sizeof (valstr), "%d", value);
1655 	mac_prop_info_set_default_str(prh, valstr);
1656 }
1657