xref: /titanic_51/usr/src/uts/common/io/igb/igb_gld.c (revision 6bbe05905a1c10a2703f95fb4912eb14b87f6670)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 #include "igb_sw.h"
32 
33 int
34 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
35 {
36 	igb_t *igb = (igb_t *)arg;
37 	struct e1000_hw *hw = &igb->hw;
38 	igb_stat_t *igb_ks;
39 	uint32_t low_val, high_val;
40 
41 	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
42 
43 	mutex_enter(&igb->gen_lock);
44 
45 	if (igb->igb_state & IGB_SUSPENDED) {
46 		mutex_exit(&igb->gen_lock);
47 		return (ECANCELED);
48 	}
49 
50 	switch (stat) {
51 	case MAC_STAT_IFSPEED:
52 		*val = igb->link_speed * 1000000ull;
53 		break;
54 
55 	case MAC_STAT_MULTIRCV:
56 		igb_ks->mprc.value.ui64 +=
57 		    E1000_READ_REG(hw, E1000_MPRC);
58 		*val = igb_ks->mprc.value.ui64;
59 		break;
60 
61 	case MAC_STAT_BRDCSTRCV:
62 		igb_ks->bprc.value.ui64 +=
63 		    E1000_READ_REG(hw, E1000_BPRC);
64 		*val = igb_ks->bprc.value.ui64;
65 		break;
66 
67 	case MAC_STAT_MULTIXMT:
68 		igb_ks->mptc.value.ui64 +=
69 		    E1000_READ_REG(hw, E1000_MPTC);
70 		*val = igb_ks->mptc.value.ui64;
71 		break;
72 
73 	case MAC_STAT_BRDCSTXMT:
74 		igb_ks->bptc.value.ui64 +=
75 		    E1000_READ_REG(hw, E1000_BPTC);
76 		*val = igb_ks->bptc.value.ui64;
77 		break;
78 
79 	case MAC_STAT_NORCVBUF:
80 		igb_ks->rnbc.value.ui64 +=
81 		    E1000_READ_REG(hw, E1000_RNBC);
82 		*val = igb_ks->rnbc.value.ui64;
83 		break;
84 
85 	case MAC_STAT_IERRORS:
86 		igb_ks->rxerrc.value.ui64 +=
87 		    E1000_READ_REG(hw, E1000_RXERRC);
88 		igb_ks->algnerrc.value.ui64 +=
89 		    E1000_READ_REG(hw, E1000_ALGNERRC);
90 		igb_ks->rlec.value.ui64 +=
91 		    E1000_READ_REG(hw, E1000_RLEC);
92 		igb_ks->crcerrs.value.ui64 +=
93 		    E1000_READ_REG(hw, E1000_CRCERRS);
94 		igb_ks->cexterr.value.ui64 +=
95 		    E1000_READ_REG(hw, E1000_CEXTERR);
96 		*val = igb_ks->rxerrc.value.ui64 +
97 		    igb_ks->algnerrc.value.ui64 +
98 		    igb_ks->rlec.value.ui64 +
99 		    igb_ks->crcerrs.value.ui64 +
100 		    igb_ks->cexterr.value.ui64;
101 		break;
102 
103 	case MAC_STAT_NOXMTBUF:
104 		*val = 0;
105 		break;
106 
107 	case MAC_STAT_OERRORS:
108 		igb_ks->ecol.value.ui64 +=
109 		    E1000_READ_REG(hw, E1000_ECOL);
110 		*val = igb_ks->ecol.value.ui64;
111 		break;
112 
113 	case MAC_STAT_COLLISIONS:
114 		igb_ks->colc.value.ui64 +=
115 		    E1000_READ_REG(hw, E1000_COLC);
116 		*val = igb_ks->colc.value.ui64;
117 		break;
118 
119 	case MAC_STAT_RBYTES:
120 		/*
121 		 * The 64-bit register will reset whenever the upper
122 		 * 32 bits are read. So we need to read the lower
123 		 * 32 bits first, then read the upper 32 bits.
124 		 */
125 		low_val = E1000_READ_REG(hw, E1000_TORL);
126 		high_val = E1000_READ_REG(hw, E1000_TORH);
127 		igb_ks->tor.value.ui64 +=
128 		    (uint64_t)high_val << 32 | (uint64_t)low_val;
129 		*val = igb_ks->tor.value.ui64;
130 		break;
131 
132 	case MAC_STAT_IPACKETS:
133 		igb_ks->tpr.value.ui64 +=
134 		    E1000_READ_REG(hw, E1000_TPR);
135 		*val = igb_ks->tpr.value.ui64;
136 		break;
137 
138 	case MAC_STAT_OBYTES:
139 		/*
140 		 * The 64-bit register will reset whenever the upper
141 		 * 32 bits are read. So we need to read the lower
142 		 * 32 bits first, then read the upper 32 bits.
143 		 */
144 		low_val = E1000_READ_REG(hw, E1000_TOTL);
145 		high_val = E1000_READ_REG(hw, E1000_TOTH);
146 		igb_ks->tot.value.ui64 +=
147 		    (uint64_t)high_val << 32 | (uint64_t)low_val;
148 		*val = igb_ks->tot.value.ui64;
149 		break;
150 
151 	case MAC_STAT_OPACKETS:
152 		igb_ks->tpt.value.ui64 +=
153 		    E1000_READ_REG(hw, E1000_TPT);
154 		*val = igb_ks->tpt.value.ui64;
155 		break;
156 
157 	/* RFC 1643 stats */
158 	case ETHER_STAT_ALIGN_ERRORS:
159 		igb_ks->algnerrc.value.ui64 +=
160 		    E1000_READ_REG(hw, E1000_ALGNERRC);
161 		*val = igb_ks->algnerrc.value.ui64;
162 		break;
163 
164 	case ETHER_STAT_FCS_ERRORS:
165 		igb_ks->crcerrs.value.ui64 +=
166 		    E1000_READ_REG(hw, E1000_CRCERRS);
167 		*val = igb_ks->crcerrs.value.ui64;
168 		break;
169 
170 	case ETHER_STAT_FIRST_COLLISIONS:
171 		igb_ks->scc.value.ui64 +=
172 		    E1000_READ_REG(hw, E1000_SCC);
173 		*val = igb_ks->scc.value.ui64;
174 		break;
175 
176 	case ETHER_STAT_MULTI_COLLISIONS:
177 		igb_ks->mcc.value.ui64 +=
178 		    E1000_READ_REG(hw, E1000_MCC);
179 		*val = igb_ks->mcc.value.ui64;
180 		break;
181 
182 	case ETHER_STAT_SQE_ERRORS:
183 		igb_ks->sec.value.ui64 +=
184 		    E1000_READ_REG(hw, E1000_SEC);
185 		*val = igb_ks->sec.value.ui64;
186 		break;
187 
188 	case ETHER_STAT_DEFER_XMTS:
189 		igb_ks->dc.value.ui64 +=
190 		    E1000_READ_REG(hw, E1000_DC);
191 		*val = igb_ks->dc.value.ui64;
192 		break;
193 
194 	case ETHER_STAT_TX_LATE_COLLISIONS:
195 		igb_ks->latecol.value.ui64 +=
196 		    E1000_READ_REG(hw, E1000_LATECOL);
197 		*val = igb_ks->latecol.value.ui64;
198 		break;
199 
200 	case ETHER_STAT_EX_COLLISIONS:
201 		igb_ks->ecol.value.ui64 +=
202 		    E1000_READ_REG(hw, E1000_ECOL);
203 		*val = igb_ks->ecol.value.ui64;
204 		break;
205 
206 	case ETHER_STAT_MACXMT_ERRORS:
207 		igb_ks->ecol.value.ui64 +=
208 		    E1000_READ_REG(hw, E1000_ECOL);
209 		*val = igb_ks->ecol.value.ui64;
210 		break;
211 
212 	case ETHER_STAT_CARRIER_ERRORS:
213 		igb_ks->cexterr.value.ui64 +=
214 		    E1000_READ_REG(hw, E1000_CEXTERR);
215 		*val = igb_ks->cexterr.value.ui64;
216 		break;
217 
218 	case ETHER_STAT_TOOLONG_ERRORS:
219 		igb_ks->roc.value.ui64 +=
220 		    E1000_READ_REG(hw, E1000_ROC);
221 		*val = igb_ks->roc.value.ui64;
222 		break;
223 
224 	case ETHER_STAT_MACRCV_ERRORS:
225 		igb_ks->rxerrc.value.ui64 +=
226 		    E1000_READ_REG(hw, E1000_RXERRC);
227 		*val = igb_ks->rxerrc.value.ui64;
228 		break;
229 
230 	/* MII/GMII stats */
231 	case ETHER_STAT_XCVR_ADDR:
232 		/* The Internal PHY's MDI address for each MAC is 1 */
233 		*val = 1;
234 		break;
235 
236 	case ETHER_STAT_XCVR_ID:
237 		*val = hw->phy.id | hw->phy.revision;
238 		break;
239 
240 	case ETHER_STAT_XCVR_INUSE:
241 		switch (igb->link_speed) {
242 		case SPEED_1000:
243 			*val =
244 			    (hw->phy.media_type == e1000_media_type_copper) ?
245 			    XCVR_1000T : XCVR_1000X;
246 			break;
247 		case SPEED_100:
248 			*val =
249 			    (hw->phy.media_type == e1000_media_type_copper) ?
250 			    (igb->param_100t4_cap == 1) ?
251 			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
252 			break;
253 		case SPEED_10:
254 			*val = XCVR_10;
255 			break;
256 		default:
257 			*val = XCVR_NONE;
258 			break;
259 		}
260 		break;
261 
262 	case ETHER_STAT_CAP_1000FDX:
263 		*val = igb->param_1000fdx_cap;
264 		break;
265 
266 	case ETHER_STAT_CAP_1000HDX:
267 		*val = igb->param_1000hdx_cap;
268 		break;
269 
270 	case ETHER_STAT_CAP_100FDX:
271 		*val = igb->param_100fdx_cap;
272 		break;
273 
274 	case ETHER_STAT_CAP_100HDX:
275 		*val = igb->param_100hdx_cap;
276 		break;
277 
278 	case ETHER_STAT_CAP_10FDX:
279 		*val = igb->param_10fdx_cap;
280 		break;
281 
282 	case ETHER_STAT_CAP_10HDX:
283 		*val = igb->param_10hdx_cap;
284 		break;
285 
286 	case ETHER_STAT_CAP_ASMPAUSE:
287 		*val = igb->param_asym_pause_cap;
288 		break;
289 
290 	case ETHER_STAT_CAP_PAUSE:
291 		*val = igb->param_pause_cap;
292 		break;
293 
294 	case ETHER_STAT_CAP_AUTONEG:
295 		*val = igb->param_autoneg_cap;
296 		break;
297 
298 	case ETHER_STAT_ADV_CAP_1000FDX:
299 		*val = igb->param_adv_1000fdx_cap;
300 		break;
301 
302 	case ETHER_STAT_ADV_CAP_1000HDX:
303 		*val = igb->param_adv_1000hdx_cap;
304 		break;
305 
306 	case ETHER_STAT_ADV_CAP_100FDX:
307 		*val = igb->param_adv_100fdx_cap;
308 		break;
309 
310 	case ETHER_STAT_ADV_CAP_100HDX:
311 		*val = igb->param_adv_100hdx_cap;
312 		break;
313 
314 	case ETHER_STAT_ADV_CAP_10FDX:
315 		*val = igb->param_adv_10fdx_cap;
316 		break;
317 
318 	case ETHER_STAT_ADV_CAP_10HDX:
319 		*val = igb->param_adv_10hdx_cap;
320 		break;
321 
322 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
323 		*val = igb->param_adv_asym_pause_cap;
324 		break;
325 
326 	case ETHER_STAT_ADV_CAP_PAUSE:
327 		*val = igb->param_adv_pause_cap;
328 		break;
329 
330 	case ETHER_STAT_ADV_CAP_AUTONEG:
331 		*val = hw->mac.autoneg;
332 		break;
333 
334 	case ETHER_STAT_LP_CAP_1000FDX:
335 		*val = igb->param_lp_1000fdx_cap;
336 		break;
337 
338 	case ETHER_STAT_LP_CAP_1000HDX:
339 		*val = igb->param_lp_1000hdx_cap;
340 		break;
341 
342 	case ETHER_STAT_LP_CAP_100FDX:
343 		*val = igb->param_lp_100fdx_cap;
344 		break;
345 
346 	case ETHER_STAT_LP_CAP_100HDX:
347 		*val = igb->param_lp_100hdx_cap;
348 		break;
349 
350 	case ETHER_STAT_LP_CAP_10FDX:
351 		*val = igb->param_lp_10fdx_cap;
352 		break;
353 
354 	case ETHER_STAT_LP_CAP_10HDX:
355 		*val = igb->param_lp_10hdx_cap;
356 		break;
357 
358 	case ETHER_STAT_LP_CAP_ASMPAUSE:
359 		*val = igb->param_lp_asym_pause_cap;
360 		break;
361 
362 	case ETHER_STAT_LP_CAP_PAUSE:
363 		*val = igb->param_lp_pause_cap;
364 		break;
365 
366 	case ETHER_STAT_LP_CAP_AUTONEG:
367 		*val = igb->param_lp_autoneg_cap;
368 		break;
369 
370 	case ETHER_STAT_LINK_ASMPAUSE:
371 		*val = igb->param_asym_pause_cap;
372 		break;
373 
374 	case ETHER_STAT_LINK_PAUSE:
375 		*val = igb->param_pause_cap;
376 		break;
377 
378 	case ETHER_STAT_LINK_AUTONEG:
379 		*val = hw->mac.autoneg;
380 		break;
381 
382 	case ETHER_STAT_LINK_DUPLEX:
383 		*val = (igb->link_duplex == FULL_DUPLEX) ?
384 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
385 		break;
386 
387 	case ETHER_STAT_TOOSHORT_ERRORS:
388 		igb_ks->ruc.value.ui64 +=
389 		    E1000_READ_REG(hw, E1000_RUC);
390 		*val = igb_ks->ruc.value.ui64;
391 		break;
392 
393 	case ETHER_STAT_CAP_REMFAULT:
394 		*val = igb->param_rem_fault;
395 		break;
396 
397 	case ETHER_STAT_ADV_REMFAULT:
398 		*val = igb->param_adv_rem_fault;
399 		break;
400 
401 	case ETHER_STAT_LP_REMFAULT:
402 		*val = igb->param_lp_rem_fault;
403 		break;
404 
405 	case ETHER_STAT_JABBER_ERRORS:
406 		igb_ks->rjc.value.ui64 +=
407 		    E1000_READ_REG(hw, E1000_RJC);
408 		*val = igb_ks->rjc.value.ui64;
409 		break;
410 
411 	case ETHER_STAT_CAP_100T4:
412 		*val = igb->param_100t4_cap;
413 		break;
414 
415 	case ETHER_STAT_ADV_CAP_100T4:
416 		*val = igb->param_adv_100t4_cap;
417 		break;
418 
419 	case ETHER_STAT_LP_CAP_100T4:
420 		*val = igb->param_lp_100t4_cap;
421 		break;
422 
423 	default:
424 		mutex_exit(&igb->gen_lock);
425 		return (ENOTSUP);
426 	}
427 
428 	mutex_exit(&igb->gen_lock);
429 
430 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
431 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_UNAFFECTED);
432 
433 	return (0);
434 }
435 
436 /*
437  * Bring the device out of the reset/quiesced state that it
438  * was in when the interface was registered.
439  */
440 int
441 igb_m_start(void *arg)
442 {
443 	igb_t *igb = (igb_t *)arg;
444 
445 	mutex_enter(&igb->gen_lock);
446 
447 	if (igb->igb_state & IGB_SUSPENDED) {
448 		mutex_exit(&igb->gen_lock);
449 		return (ECANCELED);
450 	}
451 
452 	if (igb_start(igb) != IGB_SUCCESS) {
453 		mutex_exit(&igb->gen_lock);
454 		return (EIO);
455 	}
456 
457 	igb->igb_state |= IGB_STARTED;
458 
459 	mutex_exit(&igb->gen_lock);
460 
461 	/*
462 	 * Enable and start the watchdog timer
463 	 */
464 	igb_enable_watchdog_timer(igb);
465 
466 	return (0);
467 }
468 
469 /*
470  * Stop the device and put it in a reset/quiesced state such
471  * that the interface can be unregistered.
472  */
473 void
474 igb_m_stop(void *arg)
475 {
476 	igb_t *igb = (igb_t *)arg;
477 
478 	mutex_enter(&igb->gen_lock);
479 
480 	if (igb->igb_state & IGB_SUSPENDED) {
481 		mutex_exit(&igb->gen_lock);
482 		return;
483 	}
484 
485 	igb->igb_state &= ~IGB_STARTED;
486 
487 	igb_stop(igb);
488 
489 	mutex_exit(&igb->gen_lock);
490 
491 	/*
492 	 * Disable and stop the watchdog timer
493 	 */
494 	igb_disable_watchdog_timer(igb);
495 }
496 
497 /*
498  * Set the promiscuity of the device.
499  */
500 int
501 igb_m_promisc(void *arg, boolean_t on)
502 {
503 	igb_t *igb = (igb_t *)arg;
504 	uint32_t reg_val;
505 
506 	mutex_enter(&igb->gen_lock);
507 
508 	if (igb->igb_state & IGB_SUSPENDED) {
509 		mutex_exit(&igb->gen_lock);
510 		return (ECANCELED);
511 	}
512 
513 	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
514 
515 	if (on)
516 		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
517 	else
518 		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
519 
520 	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
521 
522 	mutex_exit(&igb->gen_lock);
523 
524 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
525 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
526 		return (EIO);
527 	}
528 
529 	return (0);
530 }
531 
532 /*
533  * Add/remove the addresses to/from the set of multicast
534  * addresses for which the device will receive packets.
535  */
536 int
537 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
538 {
539 	igb_t *igb = (igb_t *)arg;
540 	int result;
541 
542 	mutex_enter(&igb->gen_lock);
543 
544 	if (igb->igb_state & IGB_SUSPENDED) {
545 		mutex_exit(&igb->gen_lock);
546 		return (ECANCELED);
547 	}
548 
549 	result = (add) ? igb_multicst_add(igb, mcst_addr)
550 	    : igb_multicst_remove(igb, mcst_addr);
551 
552 	mutex_exit(&igb->gen_lock);
553 
554 	return (result);
555 }
556 
557 /*
558  * Pass on M_IOCTL messages passed to the DLD, and support
559  * private IOCTLs for debugging and ndd.
560  */
561 void
562 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
563 {
564 	igb_t *igb = (igb_t *)arg;
565 	struct iocblk *iocp;
566 	enum ioc_reply status;
567 
568 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
569 	iocp->ioc_error = 0;
570 
571 	switch (iocp->ioc_cmd) {
572 	case LB_GET_INFO_SIZE:
573 	case LB_GET_INFO:
574 	case LB_GET_MODE:
575 	case LB_SET_MODE:
576 		status = igb_loopback_ioctl(igb, iocp, mp);
577 		break;
578 
579 	case ND_GET:
580 	case ND_SET:
581 		status = igb_nd_ioctl(igb, q, mp, iocp);
582 		break;
583 
584 	default:
585 		status = IOC_INVAL;
586 		break;
587 	}
588 
589 	/*
590 	 * Decide how to reply
591 	 */
592 	switch (status) {
593 	default:
594 	case IOC_INVAL:
595 		/*
596 		 * Error, reply with a NAK and EINVAL or the specified error
597 		 */
598 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
599 		    EINVAL : iocp->ioc_error);
600 		break;
601 
602 	case IOC_DONE:
603 		/*
604 		 * OK, reply already sent
605 		 */
606 		break;
607 
608 	case IOC_ACK:
609 		/*
610 		 * OK, reply with an ACK
611 		 */
612 		miocack(q, mp, 0, 0);
613 		break;
614 
615 	case IOC_REPLY:
616 		/*
617 		 * OK, send prepared reply as ACK or NAK
618 		 */
619 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
620 		    M_IOCACK : M_IOCNAK;
621 		qreply(q, mp);
622 		break;
623 	}
624 }
625 
626 /*
627  * Add a MAC address to the target RX group.
628  */
629 static int
630 igb_addmac(void *arg, const uint8_t *mac_addr)
631 {
632 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
633 	igb_t *igb = rx_group->igb;
634 	struct e1000_hw *hw = &igb->hw;
635 	int i, slot;
636 
637 	mutex_enter(&igb->gen_lock);
638 
639 	if (igb->igb_state & IGB_SUSPENDED) {
640 		mutex_exit(&igb->gen_lock);
641 		return (ECANCELED);
642 	}
643 
644 	if (igb->unicst_avail == 0) {
645 		/* no slots available */
646 		mutex_exit(&igb->gen_lock);
647 		return (ENOSPC);
648 	}
649 
650 	/*
651 	 * The slots from 0 to igb->num_rx_groups are reserved slots which
652 	 * are 1 to 1 mapped with group index directly. The other slots are
653 	 * shared between the all of groups. While adding a MAC address,
654 	 * it will try to set the reserved slots first, then the shared slots.
655 	 */
656 	slot = -1;
657 	if (igb->unicst_addr[rx_group->index].mac.set == 1) {
658 		/*
659 		 * The reserved slot for current group is used, find the free
660 		 * slots in the shared slots.
661 		 */
662 		for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
663 			if (igb->unicst_addr[i].mac.set == 0) {
664 				slot = i;
665 				break;
666 			}
667 		}
668 	} else
669 		slot = rx_group->index;
670 
671 	if (slot == -1) {
672 		/* no slots available in the shared slots */
673 		mutex_exit(&igb->gen_lock);
674 		return (ENOSPC);
675 	}
676 
677 	/* Set VMDq according to the mode supported by hardware. */
678 	e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
679 
680 	bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
681 	igb->unicst_addr[slot].mac.group_index = rx_group->index;
682 	igb->unicst_addr[slot].mac.set = 1;
683 	igb->unicst_avail--;
684 
685 	mutex_exit(&igb->gen_lock);
686 
687 	return (0);
688 }
689 
690 /*
691  * Remove a MAC address from the specified RX group.
692  */
693 static int
694 igb_remmac(void *arg, const uint8_t *mac_addr)
695 {
696 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
697 	igb_t *igb = rx_group->igb;
698 	struct e1000_hw *hw = &igb->hw;
699 	int slot;
700 
701 	mutex_enter(&igb->gen_lock);
702 
703 	if (igb->igb_state & IGB_SUSPENDED) {
704 		mutex_exit(&igb->gen_lock);
705 		return (ECANCELED);
706 	}
707 
708 	slot = igb_unicst_find(igb, mac_addr);
709 	if (slot == -1) {
710 		mutex_exit(&igb->gen_lock);
711 		return (EINVAL);
712 	}
713 
714 	if (igb->unicst_addr[slot].mac.set == 0) {
715 		mutex_exit(&igb->gen_lock);
716 		return (EINVAL);
717 	}
718 
719 	/* Clear the MAC ddress in the slot */
720 	e1000_rar_clear(hw, slot);
721 	igb->unicst_addr[slot].mac.set = 0;
722 	igb->unicst_avail++;
723 
724 	mutex_exit(&igb->gen_lock);
725 
726 	return (0);
727 }
728 
729 /*
730  * Enable interrupt on the specificed rx ring.
731  */
732 int
733 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
734 {
735 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
736 	igb_t *igb = rx_ring->igb;
737 	struct e1000_hw *hw = &igb->hw;
738 	uint32_t index = rx_ring->index;
739 
740 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
741 		/* Interrupt enabling for MSI-X */
742 		igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
743 		E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
744 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
745 	} else {
746 		ASSERT(index == 0);
747 		/* Interrupt enabling for MSI and legacy */
748 		igb->ims_mask |= E1000_IMS_RXT0;
749 		E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
750 	}
751 
752 	E1000_WRITE_FLUSH(hw);
753 
754 	return (0);
755 }
756 
757 /*
758  * Disable interrupt on the specificed rx ring.
759  */
760 int
761 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
762 {
763 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
764 	igb_t *igb = rx_ring->igb;
765 	struct e1000_hw *hw = &igb->hw;
766 	uint32_t index = rx_ring->index;
767 
768 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
769 		/* Interrupt disabling for MSI-X */
770 		igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
771 		E1000_WRITE_REG(hw, E1000_EIMC,
772 		    (E1000_EICR_RX_QUEUE0 << index));
773 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
774 	} else {
775 		ASSERT(index == 0);
776 		/* Interrupt disabling for MSI and legacy */
777 		igb->ims_mask &= ~E1000_IMS_RXT0;
778 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
779 	}
780 
781 	E1000_WRITE_FLUSH(hw);
782 
783 	return (0);
784 }
785 
786 /*
787  * Get the global ring index by a ring index within a group.
788  */
789 int
790 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
791 {
792 	igb_rx_ring_t *rx_ring;
793 	int i;
794 
795 	for (i = 0; i < igb->num_rx_rings; i++) {
796 		rx_ring = &igb->rx_rings[i];
797 		if (rx_ring->group_index == gindex)
798 			rindex--;
799 		if (rindex < 0)
800 			return (i);
801 	}
802 
803 	return (-1);
804 }
805 
806 static int
807 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
808 {
809 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
810 
811 	mutex_enter(&rx_ring->rx_lock);
812 	rx_ring->ring_gen_num = mr_gen_num;
813 	mutex_exit(&rx_ring->rx_lock);
814 	return (0);
815 }
816 
817 /*
818  * Callback funtion for MAC layer to register all rings.
819  */
820 /* ARGSUSED */
821 void
822 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
823     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
824 {
825 	igb_t *igb = (igb_t *)arg;
826 	mac_intr_t *mintr = &infop->mri_intr;
827 
828 	switch (rtype) {
829 	case MAC_RING_TYPE_RX: {
830 		igb_rx_ring_t *rx_ring;
831 		int global_index;
832 
833 		/*
834 		 * 'index' is the ring index within the group.
835 		 * We need the global ring index by searching in group.
836 		 */
837 		global_index = igb_get_rx_ring_index(igb, rg_index, index);
838 
839 		ASSERT(global_index >= 0);
840 
841 		rx_ring = &igb->rx_rings[global_index];
842 		rx_ring->ring_handle = rh;
843 
844 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
845 		infop->mri_start = igb_ring_start;
846 		infop->mri_stop = NULL;
847 		infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
848 
849 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
850 		mintr->mi_enable = igb_rx_ring_intr_enable;
851 		mintr->mi_disable = igb_rx_ring_intr_disable;
852 
853 		break;
854 	}
855 	case MAC_RING_TYPE_TX: {
856 		ASSERT(index < igb->num_tx_rings);
857 
858 		igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
859 		tx_ring->ring_handle = rh;
860 
861 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
862 		infop->mri_start = NULL;
863 		infop->mri_stop = NULL;
864 		infop->mri_tx = igb_tx_ring_send;
865 
866 		break;
867 	}
868 	default:
869 		break;
870 	}
871 }
872 
873 void
874 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
875     mac_group_info_t *infop, mac_group_handle_t gh)
876 {
877 	igb_t *igb = (igb_t *)arg;
878 
879 	switch (rtype) {
880 	case MAC_RING_TYPE_RX: {
881 		igb_rx_group_t *rx_group;
882 
883 		ASSERT((index >= 0) && (index < igb->num_rx_groups));
884 
885 		rx_group = &igb->rx_groups[index];
886 		rx_group->group_handle = gh;
887 
888 		infop->mgi_driver = (mac_group_driver_t)rx_group;
889 		infop->mgi_start = NULL;
890 		infop->mgi_stop = NULL;
891 		infop->mgi_addmac = igb_addmac;
892 		infop->mgi_remmac = igb_remmac;
893 		infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
894 
895 		break;
896 	}
897 	case MAC_RING_TYPE_TX:
898 		break;
899 	default:
900 		break;
901 	}
902 }
903 
904 /*
905  * Obtain the MAC's capabilities and associated data from
906  * the driver.
907  */
908 boolean_t
909 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
910 {
911 	igb_t *igb = (igb_t *)arg;
912 
913 	switch (cap) {
914 	case MAC_CAPAB_HCKSUM: {
915 		uint32_t *tx_hcksum_flags = cap_data;
916 
917 		/*
918 		 * We advertise our capabilities only if tx hcksum offload is
919 		 * enabled.  On receive, the stack will accept checksummed
920 		 * packets anyway, even if we haven't said we can deliver
921 		 * them.
922 		 */
923 		if (!igb->tx_hcksum_enable)
924 			return (B_FALSE);
925 
926 		*tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
927 		break;
928 	}
929 	case MAC_CAPAB_LSO: {
930 		mac_capab_lso_t *cap_lso = cap_data;
931 
932 		if (igb->lso_enable) {
933 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
934 			cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
935 			break;
936 		} else {
937 			return (B_FALSE);
938 		}
939 	}
940 	case MAC_CAPAB_RINGS: {
941 		mac_capab_rings_t *cap_rings = cap_data;
942 
943 		switch (cap_rings->mr_type) {
944 		case MAC_RING_TYPE_RX:
945 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
946 			cap_rings->mr_rnum = igb->num_rx_rings;
947 			cap_rings->mr_gnum = igb->num_rx_groups;
948 			cap_rings->mr_rget = igb_fill_ring;
949 			cap_rings->mr_gget = igb_fill_group;
950 			cap_rings->mr_gaddring = NULL;
951 			cap_rings->mr_gremring = NULL;
952 
953 			break;
954 		case MAC_RING_TYPE_TX:
955 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
956 			cap_rings->mr_rnum = igb->num_tx_rings;
957 			cap_rings->mr_gnum = 0;
958 			cap_rings->mr_rget = igb_fill_ring;
959 			cap_rings->mr_gget = NULL;
960 
961 			break;
962 		default:
963 			break;
964 		}
965 		break;
966 	}
967 
968 	default:
969 		return (B_FALSE);
970 	}
971 	return (B_TRUE);
972 }
973