1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2007-2012 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29 * Copyright 2016 Joyent, Inc.
30 */
31
32 #include "igb_sw.h"
33
34 static char ident[] = "Intel 1Gb Ethernet";
35 static char igb_version[] = "igb 2.3.8-ish";
36
37 /*
38 * Local function protoypes
39 */
40 static int igb_register_mac(igb_t *);
41 static int igb_identify_hardware(igb_t *);
42 static int igb_regs_map(igb_t *);
43 static void igb_init_properties(igb_t *);
44 static int igb_init_driver_settings(igb_t *);
45 static void igb_init_locks(igb_t *);
46 static void igb_destroy_locks(igb_t *);
47 static int igb_init_mac_address(igb_t *);
48 static int igb_init(igb_t *);
49 static int igb_init_adapter(igb_t *);
50 static void igb_stop_adapter(igb_t *);
51 static int igb_reset(igb_t *);
52 static void igb_tx_clean(igb_t *);
53 static boolean_t igb_tx_drain(igb_t *);
54 static boolean_t igb_rx_drain(igb_t *);
55 static int igb_alloc_rings(igb_t *);
56 static int igb_alloc_rx_data(igb_t *);
57 static void igb_free_rx_data(igb_t *);
58 static void igb_free_rings(igb_t *);
59 static void igb_setup_rings(igb_t *);
60 static void igb_setup_rx(igb_t *);
61 static void igb_setup_tx(igb_t *);
62 static void igb_setup_rx_ring(igb_rx_ring_t *);
63 static void igb_setup_tx_ring(igb_tx_ring_t *);
64 static void igb_setup_rss(igb_t *);
65 static void igb_setup_mac_rss_classify(igb_t *);
66 static void igb_setup_mac_classify(igb_t *);
67 static void igb_init_unicst(igb_t *);
68 static void igb_setup_multicst(igb_t *);
69 static void igb_get_phy_state(igb_t *);
70 static void igb_param_sync(igb_t *);
71 static void igb_get_conf(igb_t *);
72 static int igb_get_prop(igb_t *, char *, int, int, int);
73 static boolean_t igb_is_link_up(igb_t *);
74 static boolean_t igb_link_check(igb_t *);
75 static void igb_local_timer(void *);
76 static void igb_link_timer(void *);
77 static void igb_arm_watchdog_timer(igb_t *);
78 static void igb_start_watchdog_timer(igb_t *);
79 static void igb_restart_watchdog_timer(igb_t *);
80 static void igb_stop_watchdog_timer(igb_t *);
81 static void igb_start_link_timer(igb_t *);
82 static void igb_stop_link_timer(igb_t *);
83 static void igb_disable_adapter_interrupts(igb_t *);
84 static void igb_enable_adapter_interrupts_82575(igb_t *);
85 static void igb_enable_adapter_interrupts_82576(igb_t *);
86 static void igb_enable_adapter_interrupts_82580(igb_t *);
87 static boolean_t is_valid_mac_addr(uint8_t *);
88 static boolean_t igb_stall_check(igb_t *);
89 static boolean_t igb_set_loopback_mode(igb_t *, uint32_t);
90 static void igb_set_external_loopback(igb_t *);
91 static void igb_set_internal_phy_loopback(igb_t *);
92 static void igb_set_internal_serdes_loopback(igb_t *);
93 static boolean_t igb_find_mac_address(igb_t *);
94 static int igb_alloc_intrs(igb_t *);
95 static int igb_alloc_intr_handles(igb_t *, int);
96 static int igb_add_intr_handlers(igb_t *);
97 static void igb_rem_intr_handlers(igb_t *);
98 static void igb_rem_intrs(igb_t *);
99 static int igb_enable_intrs(igb_t *);
100 static int igb_disable_intrs(igb_t *);
101 static void igb_setup_msix_82575(igb_t *);
102 static void igb_setup_msix_82576(igb_t *);
103 static void igb_setup_msix_82580(igb_t *);
104 static uint_t igb_intr_legacy(void *, void *);
105 static uint_t igb_intr_msi(void *, void *);
106 static uint_t igb_intr_rx(void *, void *);
107 static uint_t igb_intr_tx(void *, void *);
108 static uint_t igb_intr_tx_other(void *, void *);
109 static void igb_intr_rx_work(igb_rx_ring_t *);
110 static void igb_intr_tx_work(igb_tx_ring_t *);
111 static void igb_intr_link_work(igb_t *);
112 static void igb_get_driver_control(struct e1000_hw *);
113 static void igb_release_driver_control(struct e1000_hw *);
114
115 static int igb_attach(dev_info_t *, ddi_attach_cmd_t);
116 static int igb_detach(dev_info_t *, ddi_detach_cmd_t);
117 static int igb_resume(dev_info_t *);
118 static int igb_suspend(dev_info_t *);
119 static int igb_quiesce(dev_info_t *);
120 static void igb_unconfigure(dev_info_t *, igb_t *);
121 static int igb_fm_error_cb(dev_info_t *, ddi_fm_error_t *,
122 const void *);
123 static void igb_fm_init(igb_t *);
124 static void igb_fm_fini(igb_t *);
125 static void igb_release_multicast(igb_t *);
126
127 char *igb_priv_props[] = {
128 "_eee_support",
129 "_tx_copy_thresh",
130 "_tx_recycle_thresh",
131 "_tx_overload_thresh",
132 "_tx_resched_thresh",
133 "_rx_copy_thresh",
134 "_rx_limit_per_intr",
135 "_intr_throttling",
136 "_adv_pause_cap",
137 "_adv_asym_pause_cap",
138 NULL
139 };
140
141 static struct cb_ops igb_cb_ops = {
142 nulldev, /* cb_open */
143 nulldev, /* cb_close */
144 nodev, /* cb_strategy */
145 nodev, /* cb_print */
146 nodev, /* cb_dump */
147 nodev, /* cb_read */
148 nodev, /* cb_write */
149 nodev, /* cb_ioctl */
150 nodev, /* cb_devmap */
151 nodev, /* cb_mmap */
152 nodev, /* cb_segmap */
153 nochpoll, /* cb_chpoll */
154 ddi_prop_op, /* cb_prop_op */
155 NULL, /* cb_stream */
156 D_MP | D_HOTPLUG, /* cb_flag */
157 CB_REV, /* cb_rev */
158 nodev, /* cb_aread */
159 nodev /* cb_awrite */
160 };
161
162 static struct dev_ops igb_dev_ops = {
163 DEVO_REV, /* devo_rev */
164 0, /* devo_refcnt */
165 NULL, /* devo_getinfo */
166 nulldev, /* devo_identify */
167 nulldev, /* devo_probe */
168 igb_attach, /* devo_attach */
169 igb_detach, /* devo_detach */
170 nodev, /* devo_reset */
171 &igb_cb_ops, /* devo_cb_ops */
172 NULL, /* devo_bus_ops */
173 ddi_power, /* devo_power */
174 igb_quiesce, /* devo_quiesce */
175 };
176
177 static struct modldrv igb_modldrv = {
178 &mod_driverops, /* Type of module. This one is a driver */
179 ident, /* Discription string */
180 &igb_dev_ops, /* driver ops */
181 };
182
183 static struct modlinkage igb_modlinkage = {
184 MODREV_1, &igb_modldrv, NULL
185 };
186
187 /* Access attributes for register mapping */
188 ddi_device_acc_attr_t igb_regs_acc_attr = {
189 DDI_DEVICE_ATTR_V1,
190 DDI_STRUCTURE_LE_ACC,
191 DDI_STRICTORDER_ACC,
192 DDI_FLAGERR_ACC
193 };
194
195 #define IGB_M_CALLBACK_FLAGS \
196 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
197
198 static mac_callbacks_t igb_m_callbacks = {
199 IGB_M_CALLBACK_FLAGS,
200 igb_m_stat,
201 igb_m_start,
202 igb_m_stop,
203 igb_m_promisc,
204 igb_m_multicst,
205 NULL,
206 NULL,
207 NULL,
208 igb_m_ioctl,
209 igb_m_getcapab,
210 NULL,
211 NULL,
212 igb_m_setprop,
213 igb_m_getprop,
214 igb_m_propinfo
215 };
216
217 /*
218 * Initialize capabilities of each supported adapter type
219 */
220 static adapter_info_t igb_82575_cap = {
221 /* limits */
222 4, /* maximum number of rx queues */
223 1, /* minimum number of rx queues */
224 4, /* default number of rx queues */
225 4, /* maximum number of tx queues */
226 1, /* minimum number of tx queues */
227 4, /* default number of tx queues */
228 65535, /* maximum interrupt throttle rate */
229 0, /* minimum interrupt throttle rate */
230 200, /* default interrupt throttle rate */
231
232 /* function pointers */
233 igb_enable_adapter_interrupts_82575,
234 igb_setup_msix_82575,
235
236 /* capabilities */
237 (IGB_FLAG_HAS_DCA | /* capability flags */
238 IGB_FLAG_VMDQ_POOL),
239
240 0xffc00000 /* mask for RXDCTL register */
241 };
242
243 static adapter_info_t igb_82576_cap = {
244 /* limits */
245 16, /* maximum number of rx queues */
246 1, /* minimum number of rx queues */
247 4, /* default number of rx queues */
248 16, /* maximum number of tx queues */
249 1, /* minimum number of tx queues */
250 4, /* default number of tx queues */
251 65535, /* maximum interrupt throttle rate */
252 0, /* minimum interrupt throttle rate */
253 200, /* default interrupt throttle rate */
254
255 /* function pointers */
256 igb_enable_adapter_interrupts_82576,
257 igb_setup_msix_82576,
258
259 /* capabilities */
260 (IGB_FLAG_HAS_DCA | /* capability flags */
261 IGB_FLAG_VMDQ_POOL |
262 IGB_FLAG_NEED_CTX_IDX),
263
264 0xffe00000 /* mask for RXDCTL register */
265 };
266
267 static adapter_info_t igb_82580_cap = {
268 /* limits */
269 8, /* maximum number of rx queues */
270 1, /* minimum number of rx queues */
271 4, /* default number of rx queues */
272 8, /* maximum number of tx queues */
273 1, /* minimum number of tx queues */
274 4, /* default number of tx queues */
275 65535, /* maximum interrupt throttle rate */
276 0, /* minimum interrupt throttle rate */
277 200, /* default interrupt throttle rate */
278
279 /* function pointers */
280 igb_enable_adapter_interrupts_82580,
281 igb_setup_msix_82580,
282
283 /* capabilities */
284 (IGB_FLAG_HAS_DCA | /* capability flags */
285 IGB_FLAG_VMDQ_POOL |
286 IGB_FLAG_NEED_CTX_IDX),
287
288 0xffe00000 /* mask for RXDCTL register */
289 };
290
291 static adapter_info_t igb_i350_cap = {
292 /* limits */
293 8, /* maximum number of rx queues */
294 1, /* minimum number of rx queues */
295 4, /* default number of rx queues */
296 8, /* maximum number of tx queues */
297 1, /* minimum number of tx queues */
298 4, /* default number of tx queues */
299 65535, /* maximum interrupt throttle rate */
300 0, /* minimum interrupt throttle rate */
301 200, /* default interrupt throttle rate */
302
303 /* function pointers */
304 igb_enable_adapter_interrupts_82580,
305 igb_setup_msix_82580,
306
307 /* capabilities */
308 (IGB_FLAG_HAS_DCA | /* capability flags */
309 IGB_FLAG_VMDQ_POOL |
310 IGB_FLAG_NEED_CTX_IDX),
311
312 0xffe00000 /* mask for RXDCTL register */
313 };
314
315 static adapter_info_t igb_i210_cap = {
316 /* limits */
317 4, /* maximum number of rx queues */
318 1, /* minimum number of rx queues */
319 4, /* default number of rx queues */
320 4, /* maximum number of tx queues */
321 1, /* minimum number of tx queues */
322 4, /* default number of tx queues */
323 65535, /* maximum interrupt throttle rate */
324 0, /* minimum interrupt throttle rate */
325 200, /* default interrupt throttle rate */
326
327 /* function pointers */
328 igb_enable_adapter_interrupts_82580,
329 igb_setup_msix_82580,
330
331 /* capabilities */
332 (IGB_FLAG_HAS_DCA | /* capability flags */
333 IGB_FLAG_VMDQ_POOL |
334 IGB_FLAG_NEED_CTX_IDX),
335
336 0xfff00000 /* mask for RXDCTL register */
337 };
338
339 static adapter_info_t igb_i354_cap = {
340 /* limits */
341 8, /* maximum number of rx queues */
342 1, /* minimum number of rx queues */
343 4, /* default number of rx queues */
344 8, /* maximum number of tx queues */
345 1, /* minimum number of tx queues */
346 4, /* default number of tx queues */
347 65535, /* maximum interrupt throttle rate */
348 0, /* minimum interrupt throttle rate */
349 200, /* default interrupt throttle rate */
350
351 /* function pointers */
352 igb_enable_adapter_interrupts_82580,
353 igb_setup_msix_82580,
354
355 /* capabilities */
356 (IGB_FLAG_HAS_DCA | /* capability flags */
357 IGB_FLAG_VMDQ_POOL |
358 IGB_FLAG_NEED_CTX_IDX),
359
360 0xfff00000 /* mask for RXDCTL register */
361 };
362
363 /*
364 * Module Initialization Functions
365 */
366
367 int
_init(void)368 _init(void)
369 {
370 int status;
371
372 mac_init_ops(&igb_dev_ops, MODULE_NAME);
373
374 status = mod_install(&igb_modlinkage);
375
376 if (status != DDI_SUCCESS) {
377 mac_fini_ops(&igb_dev_ops);
378 }
379
380 return (status);
381 }
382
383 int
_fini(void)384 _fini(void)
385 {
386 int status;
387
388 status = mod_remove(&igb_modlinkage);
389
390 if (status == DDI_SUCCESS) {
391 mac_fini_ops(&igb_dev_ops);
392 }
393
394 return (status);
395
396 }
397
398 int
_info(struct modinfo * modinfop)399 _info(struct modinfo *modinfop)
400 {
401 int status;
402
403 status = mod_info(&igb_modlinkage, modinfop);
404
405 return (status);
406 }
407
408 /*
409 * igb_attach - driver attach
410 *
411 * This function is the device specific initialization entry
412 * point. This entry point is required and must be written.
413 * The DDI_ATTACH command must be provided in the attach entry
414 * point. When attach() is called with cmd set to DDI_ATTACH,
415 * all normal kernel services (such as kmem_alloc(9F)) are
416 * available for use by the driver.
417 *
418 * The attach() function will be called once for each instance
419 * of the device on the system with cmd set to DDI_ATTACH.
420 * Until attach() succeeds, the only driver entry points which
421 * may be called are open(9E) and getinfo(9E).
422 */
423 static int
igb_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)424 igb_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
425 {
426 igb_t *igb;
427 struct igb_osdep *osdep;
428 struct e1000_hw *hw;
429 int instance;
430
431 /*
432 * Check the command and perform corresponding operations
433 */
434 switch (cmd) {
435 default:
436 return (DDI_FAILURE);
437
438 case DDI_RESUME:
439 return (igb_resume(devinfo));
440
441 case DDI_ATTACH:
442 break;
443 }
444
445 /* Get the device instance */
446 instance = ddi_get_instance(devinfo);
447
448 /* Allocate memory for the instance data structure */
449 igb = kmem_zalloc(sizeof (igb_t), KM_SLEEP);
450
451 igb->dip = devinfo;
452 igb->instance = instance;
453
454 hw = &igb->hw;
455 osdep = &igb->osdep;
456 hw->back = osdep;
457 osdep->igb = igb;
458
459 /* Attach the instance pointer to the dev_info data structure */
460 ddi_set_driver_private(devinfo, igb);
461
462
463 /* Initialize for fma support */
464 igb->fm_capabilities = igb_get_prop(igb, "fm-capable",
465 0, 0x0f,
466 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
467 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
468 igb_fm_init(igb);
469 igb->attach_progress |= ATTACH_PROGRESS_FMINIT;
470
471 /*
472 * Map PCI config space registers
473 */
474 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
475 igb_log(igb, IGB_LOG_ERROR, "Failed to map PCI configurations");
476 goto attach_fail;
477 }
478 igb->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
479
480 /*
481 * Identify the chipset family
482 */
483 if (igb_identify_hardware(igb) != IGB_SUCCESS) {
484 igb_log(igb, IGB_LOG_ERROR, "Failed to identify hardware");
485 goto attach_fail;
486 }
487
488 /*
489 * Map device registers
490 */
491 if (igb_regs_map(igb) != IGB_SUCCESS) {
492 igb_log(igb, IGB_LOG_ERROR, "Failed to map device registers");
493 goto attach_fail;
494 }
495 igb->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
496
497 /*
498 * Initialize driver parameters
499 */
500 igb_init_properties(igb);
501 igb->attach_progress |= ATTACH_PROGRESS_PROPS;
502
503 /*
504 * Allocate interrupts
505 */
506 if (igb_alloc_intrs(igb) != IGB_SUCCESS) {
507 igb_log(igb, IGB_LOG_ERROR, "Failed to allocate interrupts");
508 goto attach_fail;
509 }
510 igb->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
511
512 /*
513 * Allocate rx/tx rings based on the ring numbers.
514 * The actual numbers of rx/tx rings are decided by the number of
515 * allocated interrupt vectors, so we should allocate the rings after
516 * interrupts are allocated.
517 */
518 if (igb_alloc_rings(igb) != IGB_SUCCESS) {
519 igb_log(igb, IGB_LOG_ERROR,
520 "Failed to allocate rx/tx rings or groups");
521 goto attach_fail;
522 }
523 igb->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
524
525 /*
526 * Add interrupt handlers
527 */
528 if (igb_add_intr_handlers(igb) != IGB_SUCCESS) {
529 igb_log(igb, IGB_LOG_ERROR, "Failed to add interrupt handlers");
530 goto attach_fail;
531 }
532 igb->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
533
534 /*
535 * Initialize driver parameters
536 */
537 if (igb_init_driver_settings(igb) != IGB_SUCCESS) {
538 igb_log(igb, IGB_LOG_ERROR,
539 "Failed to initialize driver settings");
540 goto attach_fail;
541 }
542
543 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK) {
544 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
545 goto attach_fail;
546 }
547
548 /*
549 * Initialize mutexes for this device.
550 * Do this before enabling the interrupt handler and
551 * register the softint to avoid the condition where
552 * interrupt handler can try using uninitialized mutex
553 */
554 igb_init_locks(igb);
555 igb->attach_progress |= ATTACH_PROGRESS_LOCKS;
556
557 /*
558 * Initialize the adapter
559 */
560 if (igb_init(igb) != IGB_SUCCESS) {
561 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize adapter");
562 goto attach_fail;
563 }
564 igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER;
565
566 /*
567 * Initialize statistics
568 */
569 if (igb_init_stats(igb) != IGB_SUCCESS) {
570 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize statistics");
571 goto attach_fail;
572 }
573 igb->attach_progress |= ATTACH_PROGRESS_STATS;
574
575 /*
576 * Register the driver to the MAC
577 */
578 if (igb_register_mac(igb) != IGB_SUCCESS) {
579 igb_log(igb, IGB_LOG_ERROR, "Failed to register MAC");
580 goto attach_fail;
581 }
582 igb->attach_progress |= ATTACH_PROGRESS_MAC;
583
584 /*
585 * Now that mutex locks are initialized, and the chip is also
586 * initialized, enable interrupts.
587 */
588 if (igb_enable_intrs(igb) != IGB_SUCCESS) {
589 igb_log(igb, IGB_LOG_ERROR, "Failed to enable DDI interrupts");
590 goto attach_fail;
591 }
592 igb->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
593
594 igb_log(igb, IGB_LOG_INFO, "%s", igb_version);
595 atomic_or_32(&igb->igb_state, IGB_INITIALIZED);
596
597 /*
598 * Newer models have Energy Efficient Ethernet, let's disable this by
599 * default.
600 */
601 if (igb->hw.mac.type == e1000_i350)
602 (void) e1000_set_eee_i350(&igb->hw, B_FALSE, B_FALSE);
603 else if (igb->hw.mac.type == e1000_i354)
604 (void) e1000_set_eee_i354(&igb->hw, B_FALSE, B_FALSE);
605
606 return (DDI_SUCCESS);
607
608 attach_fail:
609 igb_unconfigure(devinfo, igb);
610 return (DDI_FAILURE);
611 }
612
613 /*
614 * igb_detach - driver detach
615 *
616 * The detach() function is the complement of the attach routine.
617 * If cmd is set to DDI_DETACH, detach() is used to remove the
618 * state associated with a given instance of a device node
619 * prior to the removal of that instance from the system.
620 *
621 * The detach() function will be called once for each instance
622 * of the device for which there has been a successful attach()
623 * once there are no longer any opens on the device.
624 *
625 * Interrupts routine are disabled, All memory allocated by this
626 * driver are freed.
627 */
628 static int
igb_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)629 igb_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
630 {
631 igb_t *igb;
632
633 /*
634 * Check detach command
635 */
636 switch (cmd) {
637 default:
638 return (DDI_FAILURE);
639
640 case DDI_SUSPEND:
641 return (igb_suspend(devinfo));
642
643 case DDI_DETACH:
644 break;
645 }
646
647
648 /*
649 * Get the pointer to the driver private data structure
650 */
651 igb = (igb_t *)ddi_get_driver_private(devinfo);
652 if (igb == NULL)
653 return (DDI_FAILURE);
654
655 /*
656 * Unregister MAC. If failed, we have to fail the detach
657 */
658 if (mac_unregister(igb->mac_hdl) != 0) {
659 igb_log(igb, IGB_LOG_ERROR, "Failed to unregister MAC");
660 return (DDI_FAILURE);
661 }
662 igb->attach_progress &= ~ATTACH_PROGRESS_MAC;
663
664 /*
665 * If the device is still running, it needs to be stopped first.
666 * This check is necessary because under some specific circumstances,
667 * the detach routine can be called without stopping the interface
668 * first.
669 */
670 mutex_enter(&igb->gen_lock);
671 if (igb->igb_state & IGB_STARTED) {
672 atomic_and_32(&igb->igb_state, ~IGB_STARTED);
673 igb_stop(igb, B_TRUE);
674 mutex_exit(&igb->gen_lock);
675 /* Disable and stop the watchdog timer */
676 igb_disable_watchdog_timer(igb);
677 } else
678 mutex_exit(&igb->gen_lock);
679
680 /*
681 * Check if there are still rx buffers held by the upper layer.
682 * If so, fail the detach.
683 */
684 if (!igb_rx_drain(igb))
685 return (DDI_FAILURE);
686
687 /*
688 * Do the remaining unconfigure routines
689 */
690 igb_unconfigure(devinfo, igb);
691
692 return (DDI_SUCCESS);
693 }
694
695 /*
696 * quiesce(9E) entry point.
697 *
698 * This function is called when the system is single-threaded at high
699 * PIL with preemption disabled. Therefore, this function must not be
700 * blocked.
701 *
702 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
703 * DDI_FAILURE indicates an error condition and should almost never happen.
704 */
705 static int
igb_quiesce(dev_info_t * devinfo)706 igb_quiesce(dev_info_t *devinfo)
707 {
708 igb_t *igb;
709 struct e1000_hw *hw;
710
711 igb = (igb_t *)ddi_get_driver_private(devinfo);
712
713 if (igb == NULL)
714 return (DDI_FAILURE);
715
716 hw = &igb->hw;
717
718 /*
719 * Disable the adapter interrupts
720 */
721 igb_disable_adapter_interrupts(igb);
722
723 /* Tell firmware driver is no longer in control */
724 igb_release_driver_control(hw);
725
726 /*
727 * Reset the chipset
728 */
729 (void) e1000_reset_hw(hw);
730
731 /*
732 * Reset PHY if possible
733 */
734 if (e1000_check_reset_block(hw) == E1000_SUCCESS)
735 (void) e1000_phy_hw_reset(hw);
736
737 return (DDI_SUCCESS);
738 }
739
740 /*
741 * igb_unconfigure - release all resources held by this instance
742 */
743 static void
igb_unconfigure(dev_info_t * devinfo,igb_t * igb)744 igb_unconfigure(dev_info_t *devinfo, igb_t *igb)
745 {
746 /*
747 * Disable interrupt
748 */
749 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
750 (void) igb_disable_intrs(igb);
751 }
752
753 /*
754 * Unregister MAC
755 */
756 if (igb->attach_progress & ATTACH_PROGRESS_MAC) {
757 (void) mac_unregister(igb->mac_hdl);
758 }
759
760 /*
761 * Free statistics
762 */
763 if (igb->attach_progress & ATTACH_PROGRESS_STATS) {
764 kstat_delete((kstat_t *)igb->igb_ks);
765 }
766
767 /*
768 * Remove interrupt handlers
769 */
770 if (igb->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
771 igb_rem_intr_handlers(igb);
772 }
773
774 /*
775 * Remove interrupts
776 */
777 if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
778 igb_rem_intrs(igb);
779 }
780
781 /*
782 * Remove driver properties
783 */
784 if (igb->attach_progress & ATTACH_PROGRESS_PROPS) {
785 (void) ddi_prop_remove_all(devinfo);
786 }
787
788 /*
789 * Stop the adapter
790 */
791 if (igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) {
792 mutex_enter(&igb->gen_lock);
793 igb_stop_adapter(igb);
794 mutex_exit(&igb->gen_lock);
795 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
796 ddi_fm_service_impact(igb->dip, DDI_SERVICE_UNAFFECTED);
797 }
798
799 /*
800 * Free multicast table
801 */
802 igb_release_multicast(igb);
803
804 /*
805 * Free register handle
806 */
807 if (igb->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
808 if (igb->osdep.reg_handle != NULL)
809 ddi_regs_map_free(&igb->osdep.reg_handle);
810 }
811
812 /*
813 * Free PCI config handle
814 */
815 if (igb->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
816 if (igb->osdep.cfg_handle != NULL)
817 pci_config_teardown(&igb->osdep.cfg_handle);
818 }
819
820 /*
821 * Free locks
822 */
823 if (igb->attach_progress & ATTACH_PROGRESS_LOCKS) {
824 igb_destroy_locks(igb);
825 }
826
827 /*
828 * Free the rx/tx rings
829 */
830 if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
831 igb_free_rings(igb);
832 }
833
834 /*
835 * Remove FMA
836 */
837 if (igb->attach_progress & ATTACH_PROGRESS_FMINIT) {
838 igb_fm_fini(igb);
839 }
840
841 /*
842 * Free the driver data structure
843 */
844 kmem_free(igb, sizeof (igb_t));
845
846 ddi_set_driver_private(devinfo, NULL);
847 }
848
849 /*
850 * igb_register_mac - Register the driver and its function pointers with
851 * the GLD interface
852 */
853 static int
igb_register_mac(igb_t * igb)854 igb_register_mac(igb_t *igb)
855 {
856 struct e1000_hw *hw = &igb->hw;
857 mac_register_t *mac;
858 int status;
859
860 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
861 return (IGB_FAILURE);
862
863 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
864 mac->m_driver = igb;
865 mac->m_dip = igb->dip;
866 mac->m_src_addr = hw->mac.addr;
867 mac->m_callbacks = &igb_m_callbacks;
868 mac->m_min_sdu = 0;
869 mac->m_max_sdu = igb->max_frame_size -
870 sizeof (struct ether_vlan_header) - ETHERFCSL;
871 mac->m_margin = VLAN_TAGSZ;
872 mac->m_priv_props = igb_priv_props;
873 mac->m_v12n = MAC_VIRT_LEVEL1;
874
875 status = mac_register(mac, &igb->mac_hdl);
876
877 mac_free(mac);
878
879 return ((status == 0) ? IGB_SUCCESS : IGB_FAILURE);
880 }
881
882 /*
883 * igb_identify_hardware - Identify the type of the chipset
884 */
885 static int
igb_identify_hardware(igb_t * igb)886 igb_identify_hardware(igb_t *igb)
887 {
888 struct e1000_hw *hw = &igb->hw;
889 struct igb_osdep *osdep = &igb->osdep;
890
891 /*
892 * Get the device id
893 */
894 hw->vendor_id =
895 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
896 hw->device_id =
897 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
898 hw->revision_id =
899 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
900 hw->subsystem_device_id =
901 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
902 hw->subsystem_vendor_id =
903 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
904
905 /*
906 * Set the mac type of the adapter based on the device id
907 */
908 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
909 return (IGB_FAILURE);
910 }
911
912 /*
913 * Install adapter capabilities based on mac type
914 */
915 switch (hw->mac.type) {
916 case e1000_82575:
917 igb->capab = &igb_82575_cap;
918 break;
919 case e1000_82576:
920 igb->capab = &igb_82576_cap;
921 break;
922 case e1000_82580:
923 igb->capab = &igb_82580_cap;
924 break;
925 case e1000_i350:
926 igb->capab = &igb_i350_cap;
927 break;
928 case e1000_i210:
929 case e1000_i211:
930 igb->capab = &igb_i210_cap;
931 break;
932 case e1000_i354:
933 igb->capab = &igb_i354_cap;
934 break;
935 default:
936 return (IGB_FAILURE);
937 }
938
939 return (IGB_SUCCESS);
940 }
941
942 /*
943 * igb_regs_map - Map the device registers
944 */
945 static int
igb_regs_map(igb_t * igb)946 igb_regs_map(igb_t *igb)
947 {
948 dev_info_t *devinfo = igb->dip;
949 struct e1000_hw *hw = &igb->hw;
950 struct igb_osdep *osdep = &igb->osdep;
951 off_t mem_size;
952
953 /*
954 * First get the size of device registers to be mapped.
955 */
956 if (ddi_dev_regsize(devinfo, IGB_ADAPTER_REGSET, &mem_size) !=
957 DDI_SUCCESS) {
958 return (IGB_FAILURE);
959 }
960
961 /*
962 * Call ddi_regs_map_setup() to map registers
963 */
964 if ((ddi_regs_map_setup(devinfo, IGB_ADAPTER_REGSET,
965 (caddr_t *)&hw->hw_addr, 0,
966 mem_size, &igb_regs_acc_attr,
967 &osdep->reg_handle)) != DDI_SUCCESS) {
968 return (IGB_FAILURE);
969 }
970
971 return (IGB_SUCCESS);
972 }
973
974 /*
975 * igb_init_properties - Initialize driver properties
976 */
977 static void
igb_init_properties(igb_t * igb)978 igb_init_properties(igb_t *igb)
979 {
980 /*
981 * Get conf file properties, including link settings
982 * jumbo frames, ring number, descriptor number, etc.
983 */
984 igb_get_conf(igb);
985 }
986
987 /*
988 * igb_init_driver_settings - Initialize driver settings
989 *
990 * The settings include hardware function pointers, bus information,
991 * rx/tx rings settings, link state, and any other parameters that
992 * need to be setup during driver initialization.
993 */
994 static int
igb_init_driver_settings(igb_t * igb)995 igb_init_driver_settings(igb_t *igb)
996 {
997 struct e1000_hw *hw = &igb->hw;
998 igb_rx_ring_t *rx_ring;
999 igb_tx_ring_t *tx_ring;
1000 uint32_t rx_size;
1001 uint32_t tx_size;
1002 int i;
1003
1004 /*
1005 * Initialize chipset specific hardware function pointers
1006 */
1007 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
1008 return (IGB_FAILURE);
1009 }
1010
1011 /*
1012 * Get bus information
1013 */
1014 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
1015 return (IGB_FAILURE);
1016 }
1017
1018 /*
1019 * Get the system page size
1020 */
1021 igb->page_size = ddi_ptob(igb->dip, (ulong_t)1);
1022
1023 /*
1024 * Set rx buffer size
1025 * The IP header alignment room is counted in the calculation.
1026 * The rx buffer size is in unit of 1K that is required by the
1027 * chipset hardware.
1028 */
1029 rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1030 igb->rx_buf_size = ((rx_size >> 10) +
1031 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1032
1033 /*
1034 * Set tx buffer size
1035 */
1036 tx_size = igb->max_frame_size;
1037 igb->tx_buf_size = ((tx_size >> 10) +
1038 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1039
1040 /*
1041 * Initialize rx/tx rings parameters
1042 */
1043 for (i = 0; i < igb->num_rx_rings; i++) {
1044 rx_ring = &igb->rx_rings[i];
1045 rx_ring->index = i;
1046 rx_ring->igb = igb;
1047 }
1048
1049 for (i = 0; i < igb->num_tx_rings; i++) {
1050 tx_ring = &igb->tx_rings[i];
1051 tx_ring->index = i;
1052 tx_ring->igb = igb;
1053 if (igb->tx_head_wb_enable)
1054 tx_ring->tx_recycle = igb_tx_recycle_head_wb;
1055 else
1056 tx_ring->tx_recycle = igb_tx_recycle_legacy;
1057
1058 tx_ring->ring_size = igb->tx_ring_size;
1059 tx_ring->free_list_size = igb->tx_ring_size +
1060 (igb->tx_ring_size >> 1);
1061 }
1062
1063 /*
1064 * Initialize values of interrupt throttling rates
1065 */
1066 for (i = 1; i < MAX_NUM_EITR; i++)
1067 igb->intr_throttling[i] = igb->intr_throttling[0];
1068
1069 /*
1070 * The initial link state should be "unknown"
1071 */
1072 igb->link_state = LINK_STATE_UNKNOWN;
1073
1074 return (IGB_SUCCESS);
1075 }
1076
1077 /*
1078 * igb_init_locks - Initialize locks
1079 */
1080 static void
igb_init_locks(igb_t * igb)1081 igb_init_locks(igb_t *igb)
1082 {
1083 igb_rx_ring_t *rx_ring;
1084 igb_tx_ring_t *tx_ring;
1085 int i;
1086
1087 for (i = 0; i < igb->num_rx_rings; i++) {
1088 rx_ring = &igb->rx_rings[i];
1089 mutex_init(&rx_ring->rx_lock, NULL,
1090 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1091 }
1092
1093 for (i = 0; i < igb->num_tx_rings; i++) {
1094 tx_ring = &igb->tx_rings[i];
1095 mutex_init(&tx_ring->tx_lock, NULL,
1096 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1097 mutex_init(&tx_ring->recycle_lock, NULL,
1098 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1099 mutex_init(&tx_ring->tcb_head_lock, NULL,
1100 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1101 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1102 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1103 }
1104
1105 mutex_init(&igb->gen_lock, NULL,
1106 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1107
1108 mutex_init(&igb->watchdog_lock, NULL,
1109 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1110
1111 mutex_init(&igb->link_lock, NULL,
1112 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1113 }
1114
1115 /*
1116 * igb_destroy_locks - Destroy locks
1117 */
1118 static void
igb_destroy_locks(igb_t * igb)1119 igb_destroy_locks(igb_t *igb)
1120 {
1121 igb_rx_ring_t *rx_ring;
1122 igb_tx_ring_t *tx_ring;
1123 int i;
1124
1125 for (i = 0; i < igb->num_rx_rings; i++) {
1126 rx_ring = &igb->rx_rings[i];
1127 mutex_destroy(&rx_ring->rx_lock);
1128 }
1129
1130 for (i = 0; i < igb->num_tx_rings; i++) {
1131 tx_ring = &igb->tx_rings[i];
1132 mutex_destroy(&tx_ring->tx_lock);
1133 mutex_destroy(&tx_ring->recycle_lock);
1134 mutex_destroy(&tx_ring->tcb_head_lock);
1135 mutex_destroy(&tx_ring->tcb_tail_lock);
1136 }
1137
1138 mutex_destroy(&igb->gen_lock);
1139 mutex_destroy(&igb->watchdog_lock);
1140 mutex_destroy(&igb->link_lock);
1141 }
1142
1143 static int
igb_resume(dev_info_t * devinfo)1144 igb_resume(dev_info_t *devinfo)
1145 {
1146 igb_t *igb;
1147
1148 igb = (igb_t *)ddi_get_driver_private(devinfo);
1149 if (igb == NULL)
1150 return (DDI_FAILURE);
1151
1152 mutex_enter(&igb->gen_lock);
1153
1154 /*
1155 * Enable interrupts
1156 */
1157 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1158 if (igb_enable_intrs(igb) != IGB_SUCCESS) {
1159 igb_log(igb, IGB_LOG_ERROR,
1160 "Failed to enable DDI interrupts");
1161 mutex_exit(&igb->gen_lock);
1162 return (DDI_FAILURE);
1163 }
1164 }
1165
1166 if (igb->igb_state & IGB_STARTED) {
1167 if (igb_start(igb, B_FALSE) != IGB_SUCCESS) {
1168 mutex_exit(&igb->gen_lock);
1169 return (DDI_FAILURE);
1170 }
1171
1172 /*
1173 * Enable and start the watchdog timer
1174 */
1175 igb_enable_watchdog_timer(igb);
1176 }
1177
1178 atomic_and_32(&igb->igb_state, ~IGB_SUSPENDED);
1179
1180 mutex_exit(&igb->gen_lock);
1181
1182 return (DDI_SUCCESS);
1183 }
1184
1185 static int
igb_suspend(dev_info_t * devinfo)1186 igb_suspend(dev_info_t *devinfo)
1187 {
1188 igb_t *igb;
1189
1190 igb = (igb_t *)ddi_get_driver_private(devinfo);
1191 if (igb == NULL)
1192 return (DDI_FAILURE);
1193
1194 mutex_enter(&igb->gen_lock);
1195
1196 atomic_or_32(&igb->igb_state, IGB_SUSPENDED);
1197
1198 /*
1199 * Disable interrupts
1200 */
1201 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1202 (void) igb_disable_intrs(igb);
1203 }
1204
1205 if (!(igb->igb_state & IGB_STARTED)) {
1206 mutex_exit(&igb->gen_lock);
1207 return (DDI_SUCCESS);
1208 }
1209
1210 igb_stop(igb, B_FALSE);
1211
1212 mutex_exit(&igb->gen_lock);
1213
1214 /*
1215 * Disable and stop the watchdog timer
1216 */
1217 igb_disable_watchdog_timer(igb);
1218
1219 return (DDI_SUCCESS);
1220 }
1221
1222 static int
igb_init(igb_t * igb)1223 igb_init(igb_t *igb)
1224 {
1225 mutex_enter(&igb->gen_lock);
1226
1227 /*
1228 * Initilize the adapter
1229 */
1230 if (igb_init_adapter(igb) != IGB_SUCCESS) {
1231 mutex_exit(&igb->gen_lock);
1232 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1233 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1234 return (IGB_FAILURE);
1235 }
1236
1237 mutex_exit(&igb->gen_lock);
1238
1239 return (IGB_SUCCESS);
1240 }
1241
1242 /*
1243 * igb_init_mac_address - Initialize the default MAC address
1244 *
1245 * On success, the MAC address is entered in the igb->hw.mac.addr
1246 * and hw->mac.perm_addr fields and the adapter's RAR(0) receive
1247 * address register.
1248 *
1249 * Important side effects:
1250 * 1. adapter is reset - this is required to put it in a known state.
1251 * 2. all of non-volatile memory (NVM) is read & checksummed - NVM is where
1252 * MAC address and all default settings are stored, so a valid checksum
1253 * is required.
1254 */
1255 static int
igb_init_mac_address(igb_t * igb)1256 igb_init_mac_address(igb_t *igb)
1257 {
1258 struct e1000_hw *hw = &igb->hw;
1259
1260 ASSERT(mutex_owned(&igb->gen_lock));
1261
1262 /*
1263 * Reset chipset to put the hardware in a known state
1264 * before we try to get MAC address from NVM.
1265 */
1266 if (e1000_reset_hw(hw) != E1000_SUCCESS) {
1267 igb_log(igb, IGB_LOG_ERROR, "Adapter reset failed.");
1268 goto init_mac_fail;
1269 }
1270
1271 /*
1272 * NVM validation
1273 */
1274 if (((igb->hw.mac.type != e1000_i210) &&
1275 (igb->hw.mac.type != e1000_i211)) &&
1276 (e1000_validate_nvm_checksum(hw) < 0)) {
1277 /*
1278 * Some PCI-E parts fail the first check due to
1279 * the link being in sleep state. Call it again,
1280 * if it fails a second time its a real issue.
1281 */
1282 if (e1000_validate_nvm_checksum(hw) < 0) {
1283 igb_log(igb, IGB_LOG_ERROR,
1284 "Invalid NVM checksum. Please contact "
1285 "the vendor to update the NVM.");
1286 goto init_mac_fail;
1287 }
1288 }
1289
1290 /*
1291 * Get the mac address
1292 * This function should handle SPARC case correctly.
1293 */
1294 if (!igb_find_mac_address(igb)) {
1295 igb_log(igb, IGB_LOG_ERROR, "Failed to get the mac address");
1296 goto init_mac_fail;
1297 }
1298
1299 /* Validate mac address */
1300 if (!is_valid_mac_addr(hw->mac.addr)) {
1301 igb_log(igb, IGB_LOG_ERROR, "Invalid mac address");
1302 goto init_mac_fail;
1303 }
1304
1305 return (IGB_SUCCESS);
1306
1307 init_mac_fail:
1308 return (IGB_FAILURE);
1309 }
1310
1311 /*
1312 * igb_init_adapter - Initialize the adapter
1313 */
1314 static int
igb_init_adapter(igb_t * igb)1315 igb_init_adapter(igb_t *igb)
1316 {
1317 struct e1000_hw *hw = &igb->hw;
1318 uint32_t pba;
1319 int oemid[2];
1320 uint16_t nvmword;
1321 uint32_t hwm;
1322 uint32_t default_mtu;
1323 u8 pbanum[E1000_PBANUM_LENGTH];
1324 char eepromver[5]; /* f.ff */
1325 int i;
1326
1327 ASSERT(mutex_owned(&igb->gen_lock));
1328
1329 /*
1330 * In order to obtain the default MAC address, this will reset the
1331 * adapter and validate the NVM that the address and many other
1332 * default settings come from.
1333 */
1334 if (igb_init_mac_address(igb) != IGB_SUCCESS) {
1335 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize MAC address");
1336 goto init_adapter_fail;
1337 }
1338
1339 /*
1340 * Packet Buffer Allocation (PBA)
1341 * Writing PBA sets the receive portion of the buffer
1342 * the remainder is used for the transmit buffer.
1343 */
1344 switch (hw->mac.type) {
1345 case e1000_82575:
1346 pba = E1000_PBA_32K;
1347 break;
1348 case e1000_82576:
1349 pba = E1000_READ_REG(hw, E1000_RXPBS);
1350 pba &= E1000_RXPBS_SIZE_MASK_82576;
1351 break;
1352 case e1000_82580:
1353 case e1000_i350:
1354 case e1000_i354:
1355 pba = E1000_READ_REG(hw, E1000_RXPBS);
1356 pba = e1000_rxpbs_adjust_82580(pba);
1357 break;
1358 case e1000_i210:
1359 case e1000_i211:
1360 pba = E1000_PBA_34K;
1361 default:
1362 break;
1363 }
1364
1365 /* Special needs in case of Jumbo frames */
1366 default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU,
1367 MIN_MTU, MAX_MTU, DEFAULT_MTU);
1368 if ((hw->mac.type == e1000_82575) && (default_mtu > ETHERMTU)) {
1369 u32 tx_space, min_tx, min_rx;
1370 pba = E1000_READ_REG(hw, E1000_PBA);
1371 tx_space = pba >> 16;
1372 pba &= 0xffff;
1373 min_tx = (igb->max_frame_size +
1374 sizeof (struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
1375 min_tx = roundup(min_tx, 1024);
1376 min_tx >>= 10;
1377 min_rx = igb->max_frame_size;
1378 min_rx = roundup(min_rx, 1024);
1379 min_rx >>= 10;
1380 if (tx_space < min_tx &&
1381 ((min_tx - tx_space) < pba)) {
1382 pba = pba - (min_tx - tx_space);
1383 /*
1384 * if short on rx space, rx wins
1385 * and must trump tx adjustment
1386 */
1387 if (pba < min_rx)
1388 pba = min_rx;
1389 }
1390 E1000_WRITE_REG(hw, E1000_PBA, pba);
1391 }
1392
1393 DEBUGOUT1("igb_init: pba=%dK", pba);
1394
1395 /*
1396 * These parameters control the automatic generation (Tx) and
1397 * response (Rx) to Ethernet PAUSE frames.
1398 * - High water mark should allow for at least two frames to be
1399 * received after sending an XOFF.
1400 * - Low water mark works best when it is very near the high water mark.
1401 * This allows the receiver to restart by sending XON when it has
1402 * drained a bit.
1403 */
1404 hwm = min(((pba << 10) * 9 / 10),
1405 ((pba << 10) - 2 * igb->max_frame_size));
1406
1407 if (hw->mac.type < e1000_82576) {
1408 hw->fc.high_water = hwm & 0xFFF8; /* 8-byte granularity */
1409 hw->fc.low_water = hw->fc.high_water - 8;
1410 } else {
1411 hw->fc.high_water = hwm & 0xFFF0; /* 16-byte granularity */
1412 hw->fc.low_water = hw->fc.high_water - 16;
1413 }
1414
1415 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1416 hw->fc.send_xon = B_TRUE;
1417
1418 (void) e1000_validate_mdi_setting(hw);
1419
1420 /*
1421 * Reset the chipset hardware the second time to put PBA settings
1422 * into effect.
1423 */
1424 if (e1000_reset_hw(hw) != E1000_SUCCESS) {
1425 igb_log(igb, IGB_LOG_ERROR, "Second reset failed");
1426 goto init_adapter_fail;
1427 }
1428
1429 /*
1430 * Don't wait for auto-negotiation to complete
1431 */
1432 hw->phy.autoneg_wait_to_complete = B_FALSE;
1433
1434 /*
1435 * Copper options
1436 */
1437 if (hw->phy.media_type == e1000_media_type_copper) {
1438 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1439 hw->phy.disable_polarity_correction = B_FALSE;
1440 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
1441 }
1442
1443 /*
1444 * Initialize link settings
1445 */
1446 (void) igb_setup_link(igb, B_FALSE);
1447
1448 /*
1449 * Configure/Initialize hardware
1450 */
1451 if (e1000_init_hw(hw) != E1000_SUCCESS) {
1452 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize hardware");
1453 goto init_adapter_fail;
1454 }
1455
1456 /*
1457 * Start the link setup timer
1458 */
1459 igb_start_link_timer(igb);
1460
1461 /*
1462 * Disable wakeup control by default
1463 */
1464 E1000_WRITE_REG(hw, E1000_WUC, 0);
1465
1466 /*
1467 * Record phy info in hw struct
1468 */
1469 (void) e1000_get_phy_info(hw);
1470
1471 /*
1472 * Make sure driver has control
1473 */
1474 igb_get_driver_control(hw);
1475
1476 /*
1477 * Restore LED settings to the default from EEPROM
1478 * to meet the standard for Sun platforms.
1479 */
1480 (void) e1000_cleanup_led(hw);
1481
1482 /*
1483 * Setup MSI-X interrupts
1484 */
1485 if (igb->intr_type == DDI_INTR_TYPE_MSIX)
1486 igb->capab->setup_msix(igb);
1487
1488 /*
1489 * Initialize unicast addresses.
1490 */
1491 igb_init_unicst(igb);
1492
1493 /*
1494 * Setup and initialize the mctable structures.
1495 */
1496 igb_setup_multicst(igb);
1497
1498 /*
1499 * Set interrupt throttling rate
1500 */
1501 for (i = 0; i < igb->intr_cnt; i++)
1502 E1000_WRITE_REG(hw, E1000_EITR(i), igb->intr_throttling[i]);
1503
1504 /*
1505 * Read identifying information and place in devinfo.
1506 */
1507 nvmword = 0xffff;
1508 (void) e1000_read_nvm(&igb->hw, NVM_OEM_OFFSET_0, 1, &nvmword);
1509 oemid[0] = (int)nvmword;
1510 (void) e1000_read_nvm(&igb->hw, NVM_OEM_OFFSET_1, 1, &nvmword);
1511 oemid[1] = (int)nvmword;
1512 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, igb->dip,
1513 "oem-identifier", oemid, 2);
1514
1515 pbanum[0] = '\0';
1516 (void) e1000_read_pba_string(&igb->hw, pbanum, sizeof (pbanum));
1517 if (*pbanum != '\0') {
1518 (void) ddi_prop_update_string(DDI_DEV_T_NONE, igb->dip,
1519 "printed-board-assembly", (char *)pbanum);
1520 }
1521
1522 nvmword = 0xffff;
1523 (void) e1000_read_nvm(&igb->hw, NVM_VERSION, 1, &nvmword);
1524 if ((nvmword & 0xf00) == 0) {
1525 (void) snprintf(eepromver, sizeof (eepromver), "%x.%x",
1526 (nvmword & 0xf000) >> 12, (nvmword & 0xff));
1527 (void) ddi_prop_update_string(DDI_DEV_T_NONE, igb->dip,
1528 "nvm-version", eepromver);
1529 }
1530
1531 /*
1532 * Save the state of the phy
1533 */
1534 igb_get_phy_state(igb);
1535
1536 igb_param_sync(igb);
1537
1538 return (IGB_SUCCESS);
1539
1540 init_adapter_fail:
1541 /*
1542 * Reset PHY if possible
1543 */
1544 if (e1000_check_reset_block(hw) == E1000_SUCCESS)
1545 (void) e1000_phy_hw_reset(hw);
1546
1547 return (IGB_FAILURE);
1548 }
1549
1550 /*
1551 * igb_stop_adapter - Stop the adapter
1552 */
1553 static void
igb_stop_adapter(igb_t * igb)1554 igb_stop_adapter(igb_t *igb)
1555 {
1556 struct e1000_hw *hw = &igb->hw;
1557
1558 ASSERT(mutex_owned(&igb->gen_lock));
1559
1560 /* Stop the link setup timer */
1561 igb_stop_link_timer(igb);
1562
1563 /* Tell firmware driver is no longer in control */
1564 igb_release_driver_control(hw);
1565
1566 /*
1567 * Reset the chipset
1568 */
1569 if (e1000_reset_hw(hw) != E1000_SUCCESS) {
1570 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1571 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1572 }
1573
1574 /*
1575 * e1000_phy_hw_reset is not needed here, MAC reset above is sufficient
1576 */
1577 }
1578
1579 /*
1580 * igb_reset - Reset the chipset and restart the driver.
1581 *
1582 * It involves stopping and re-starting the chipset,
1583 * and re-configuring the rx/tx rings.
1584 */
1585 static int
igb_reset(igb_t * igb)1586 igb_reset(igb_t *igb)
1587 {
1588 int i;
1589
1590 mutex_enter(&igb->gen_lock);
1591
1592 ASSERT(igb->igb_state & IGB_STARTED);
1593 atomic_and_32(&igb->igb_state, ~IGB_STARTED);
1594
1595 /*
1596 * Disable the adapter interrupts to stop any rx/tx activities
1597 * before draining pending data and resetting hardware.
1598 */
1599 igb_disable_adapter_interrupts(igb);
1600
1601 /*
1602 * Drain the pending transmit packets
1603 */
1604 (void) igb_tx_drain(igb);
1605
1606 for (i = 0; i < igb->num_rx_rings; i++)
1607 mutex_enter(&igb->rx_rings[i].rx_lock);
1608 for (i = 0; i < igb->num_tx_rings; i++)
1609 mutex_enter(&igb->tx_rings[i].tx_lock);
1610
1611 /*
1612 * Stop the adapter
1613 */
1614 igb_stop_adapter(igb);
1615
1616 /*
1617 * Clean the pending tx data/resources
1618 */
1619 igb_tx_clean(igb);
1620
1621 /*
1622 * Start the adapter
1623 */
1624 if (igb_init_adapter(igb) != IGB_SUCCESS) {
1625 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1626 goto reset_failure;
1627 }
1628
1629 /*
1630 * Setup the rx/tx rings
1631 */
1632 igb->tx_ring_init = B_FALSE;
1633 igb_setup_rings(igb);
1634
1635 atomic_and_32(&igb->igb_state, ~(IGB_ERROR | IGB_STALL));
1636
1637 /*
1638 * Enable adapter interrupts
1639 * The interrupts must be enabled after the driver state is START
1640 */
1641 igb->capab->enable_intr(igb);
1642
1643 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK)
1644 goto reset_failure;
1645
1646 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
1647 goto reset_failure;
1648
1649 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1650 mutex_exit(&igb->tx_rings[i].tx_lock);
1651 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1652 mutex_exit(&igb->rx_rings[i].rx_lock);
1653
1654 atomic_or_32(&igb->igb_state, IGB_STARTED);
1655
1656 mutex_exit(&igb->gen_lock);
1657
1658 return (IGB_SUCCESS);
1659
1660 reset_failure:
1661 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1662 mutex_exit(&igb->tx_rings[i].tx_lock);
1663 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1664 mutex_exit(&igb->rx_rings[i].rx_lock);
1665
1666 mutex_exit(&igb->gen_lock);
1667
1668 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1669
1670 return (IGB_FAILURE);
1671 }
1672
1673 /*
1674 * igb_tx_clean - Clean the pending transmit packets and DMA resources
1675 */
1676 static void
igb_tx_clean(igb_t * igb)1677 igb_tx_clean(igb_t *igb)
1678 {
1679 igb_tx_ring_t *tx_ring;
1680 tx_control_block_t *tcb;
1681 link_list_t pending_list;
1682 uint32_t desc_num;
1683 int i, j;
1684
1685 LINK_LIST_INIT(&pending_list);
1686
1687 for (i = 0; i < igb->num_tx_rings; i++) {
1688 tx_ring = &igb->tx_rings[i];
1689
1690 mutex_enter(&tx_ring->recycle_lock);
1691
1692 /*
1693 * Clean the pending tx data - the pending packets in the
1694 * work_list that have no chances to be transmitted again.
1695 *
1696 * We must ensure the chipset is stopped or the link is down
1697 * before cleaning the transmit packets.
1698 */
1699 desc_num = 0;
1700 for (j = 0; j < tx_ring->ring_size; j++) {
1701 tcb = tx_ring->work_list[j];
1702 if (tcb != NULL) {
1703 desc_num += tcb->desc_num;
1704
1705 tx_ring->work_list[j] = NULL;
1706
1707 igb_free_tcb(tcb);
1708
1709 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1710 }
1711 }
1712
1713 if (desc_num > 0) {
1714 atomic_add_32(&tx_ring->tbd_free, desc_num);
1715 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1716
1717 /*
1718 * Reset the head and tail pointers of the tbd ring;
1719 * Reset the head write-back if it is enabled.
1720 */
1721 tx_ring->tbd_head = 0;
1722 tx_ring->tbd_tail = 0;
1723 if (igb->tx_head_wb_enable)
1724 *tx_ring->tbd_head_wb = 0;
1725
1726 E1000_WRITE_REG(&igb->hw, E1000_TDH(tx_ring->index), 0);
1727 E1000_WRITE_REG(&igb->hw, E1000_TDT(tx_ring->index), 0);
1728 }
1729
1730 mutex_exit(&tx_ring->recycle_lock);
1731
1732 /*
1733 * Add the tx control blocks in the pending list to
1734 * the free list.
1735 */
1736 igb_put_free_list(tx_ring, &pending_list);
1737 }
1738 }
1739
1740 /*
1741 * igb_tx_drain - Drain the tx rings to allow pending packets to be transmitted
1742 */
1743 static boolean_t
igb_tx_drain(igb_t * igb)1744 igb_tx_drain(igb_t *igb)
1745 {
1746 igb_tx_ring_t *tx_ring;
1747 boolean_t done;
1748 int i, j;
1749
1750 /*
1751 * Wait for a specific time to allow pending tx packets
1752 * to be transmitted.
1753 *
1754 * Check the counter tbd_free to see if transmission is done.
1755 * No lock protection is needed here.
1756 *
1757 * Return B_TRUE if all pending packets have been transmitted;
1758 * Otherwise return B_FALSE;
1759 */
1760 for (i = 0; i < TX_DRAIN_TIME; i++) {
1761
1762 done = B_TRUE;
1763 for (j = 0; j < igb->num_tx_rings; j++) {
1764 tx_ring = &igb->tx_rings[j];
1765 done = done &&
1766 (tx_ring->tbd_free == tx_ring->ring_size);
1767 }
1768
1769 if (done)
1770 break;
1771
1772 msec_delay(1);
1773 }
1774
1775 return (done);
1776 }
1777
1778 /*
1779 * igb_rx_drain - Wait for all rx buffers to be released by upper layer
1780 */
1781 static boolean_t
igb_rx_drain(igb_t * igb)1782 igb_rx_drain(igb_t *igb)
1783 {
1784 boolean_t done;
1785 int i;
1786
1787 /*
1788 * Polling the rx free list to check if those rx buffers held by
1789 * the upper layer are released.
1790 *
1791 * Check the counter rcb_free to see if all pending buffers are
1792 * released. No lock protection is needed here.
1793 *
1794 * Return B_TRUE if all pending buffers have been released;
1795 * Otherwise return B_FALSE;
1796 */
1797 for (i = 0; i < RX_DRAIN_TIME; i++) {
1798 done = (igb->rcb_pending == 0);
1799
1800 if (done)
1801 break;
1802
1803 msec_delay(1);
1804 }
1805
1806 return (done);
1807 }
1808
1809 /*
1810 * igb_start - Start the driver/chipset
1811 */
1812 int
igb_start(igb_t * igb,boolean_t alloc_buffer)1813 igb_start(igb_t *igb, boolean_t alloc_buffer)
1814 {
1815 int i;
1816
1817 ASSERT(mutex_owned(&igb->gen_lock));
1818
1819 if (alloc_buffer) {
1820 if (igb_alloc_rx_data(igb) != IGB_SUCCESS) {
1821 igb_log(igb, IGB_LOG_ERROR,
1822 "Failed to allocate software receive rings");
1823 return (IGB_FAILURE);
1824 }
1825
1826 /* Allocate buffers for all the rx/tx rings */
1827 if (igb_alloc_dma(igb) != IGB_SUCCESS) {
1828 igb_log(igb, IGB_LOG_ERROR,
1829 "Failed to allocate DMA resource");
1830 return (IGB_FAILURE);
1831 }
1832
1833 igb->tx_ring_init = B_TRUE;
1834 } else {
1835 igb->tx_ring_init = B_FALSE;
1836 }
1837
1838 for (i = 0; i < igb->num_rx_rings; i++)
1839 mutex_enter(&igb->rx_rings[i].rx_lock);
1840 for (i = 0; i < igb->num_tx_rings; i++)
1841 mutex_enter(&igb->tx_rings[i].tx_lock);
1842
1843 /*
1844 * Start the adapter
1845 */
1846 if ((igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) == 0) {
1847 if (igb_init_adapter(igb) != IGB_SUCCESS) {
1848 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1849 goto start_failure;
1850 }
1851 igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER;
1852 }
1853
1854 /*
1855 * Setup the rx/tx rings
1856 */
1857 igb_setup_rings(igb);
1858
1859 /*
1860 * Enable adapter interrupts
1861 * The interrupts must be enabled after the driver state is START
1862 */
1863 igb->capab->enable_intr(igb);
1864
1865 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK)
1866 goto start_failure;
1867
1868 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
1869 goto start_failure;
1870
1871 if (igb->hw.mac.type == e1000_i350)
1872 (void) e1000_set_eee_i350(&igb->hw, B_FALSE, B_FALSE);
1873 else if (igb->hw.mac.type == e1000_i354)
1874 (void) e1000_set_eee_i354(&igb->hw, B_FALSE, B_FALSE);
1875
1876 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1877 mutex_exit(&igb->tx_rings[i].tx_lock);
1878 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1879 mutex_exit(&igb->rx_rings[i].rx_lock);
1880
1881 return (IGB_SUCCESS);
1882
1883 start_failure:
1884 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1885 mutex_exit(&igb->tx_rings[i].tx_lock);
1886 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1887 mutex_exit(&igb->rx_rings[i].rx_lock);
1888
1889 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1890
1891 return (IGB_FAILURE);
1892 }
1893
1894 /*
1895 * igb_stop - Stop the driver/chipset
1896 */
1897 void
igb_stop(igb_t * igb,boolean_t free_buffer)1898 igb_stop(igb_t *igb, boolean_t free_buffer)
1899 {
1900 int i;
1901
1902 ASSERT(mutex_owned(&igb->gen_lock));
1903
1904 igb->attach_progress &= ~ATTACH_PROGRESS_INIT_ADAPTER;
1905
1906 /*
1907 * Disable the adapter interrupts
1908 */
1909 igb_disable_adapter_interrupts(igb);
1910
1911 /*
1912 * Drain the pending tx packets
1913 */
1914 (void) igb_tx_drain(igb);
1915
1916 for (i = 0; i < igb->num_rx_rings; i++)
1917 mutex_enter(&igb->rx_rings[i].rx_lock);
1918 for (i = 0; i < igb->num_tx_rings; i++)
1919 mutex_enter(&igb->tx_rings[i].tx_lock);
1920
1921 /*
1922 * Stop the adapter
1923 */
1924 igb_stop_adapter(igb);
1925
1926 /*
1927 * Clean the pending tx data/resources
1928 */
1929 igb_tx_clean(igb);
1930
1931 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1932 mutex_exit(&igb->tx_rings[i].tx_lock);
1933 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1934 mutex_exit(&igb->rx_rings[i].rx_lock);
1935
1936 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
1937 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1938
1939 if (igb->link_state == LINK_STATE_UP) {
1940 igb->link_state = LINK_STATE_UNKNOWN;
1941 mac_link_update(igb->mac_hdl, igb->link_state);
1942 }
1943
1944 if (free_buffer) {
1945 /*
1946 * Release the DMA/memory resources of rx/tx rings
1947 */
1948 igb_free_dma(igb);
1949 igb_free_rx_data(igb);
1950 }
1951 }
1952
1953 /*
1954 * igb_alloc_rings - Allocate memory space for rx/tx rings
1955 */
1956 static int
igb_alloc_rings(igb_t * igb)1957 igb_alloc_rings(igb_t *igb)
1958 {
1959 /*
1960 * Allocate memory space for rx rings
1961 */
1962 igb->rx_rings = kmem_zalloc(
1963 sizeof (igb_rx_ring_t) * igb->num_rx_rings,
1964 KM_NOSLEEP);
1965
1966 if (igb->rx_rings == NULL) {
1967 return (IGB_FAILURE);
1968 }
1969
1970 /*
1971 * Allocate memory space for tx rings
1972 */
1973 igb->tx_rings = kmem_zalloc(
1974 sizeof (igb_tx_ring_t) * igb->num_tx_rings,
1975 KM_NOSLEEP);
1976
1977 if (igb->tx_rings == NULL) {
1978 kmem_free(igb->rx_rings,
1979 sizeof (igb_rx_ring_t) * igb->num_rx_rings);
1980 igb->rx_rings = NULL;
1981 return (IGB_FAILURE);
1982 }
1983
1984 /*
1985 * Allocate memory space for rx ring groups
1986 */
1987 igb->rx_groups = kmem_zalloc(
1988 sizeof (igb_rx_group_t) * igb->num_rx_groups,
1989 KM_NOSLEEP);
1990
1991 if (igb->rx_groups == NULL) {
1992 kmem_free(igb->rx_rings,
1993 sizeof (igb_rx_ring_t) * igb->num_rx_rings);
1994 kmem_free(igb->tx_rings,
1995 sizeof (igb_tx_ring_t) * igb->num_tx_rings);
1996 igb->rx_rings = NULL;
1997 igb->tx_rings = NULL;
1998 return (IGB_FAILURE);
1999 }
2000
2001 return (IGB_SUCCESS);
2002 }
2003
2004 /*
2005 * igb_free_rings - Free the memory space of rx/tx rings.
2006 */
2007 static void
igb_free_rings(igb_t * igb)2008 igb_free_rings(igb_t *igb)
2009 {
2010 if (igb->rx_rings != NULL) {
2011 kmem_free(igb->rx_rings,
2012 sizeof (igb_rx_ring_t) * igb->num_rx_rings);
2013 igb->rx_rings = NULL;
2014 }
2015
2016 if (igb->tx_rings != NULL) {
2017 kmem_free(igb->tx_rings,
2018 sizeof (igb_tx_ring_t) * igb->num_tx_rings);
2019 igb->tx_rings = NULL;
2020 }
2021
2022 if (igb->rx_groups != NULL) {
2023 kmem_free(igb->rx_groups,
2024 sizeof (igb_rx_group_t) * igb->num_rx_groups);
2025 igb->rx_groups = NULL;
2026 }
2027 }
2028
2029 static int
igb_alloc_rx_data(igb_t * igb)2030 igb_alloc_rx_data(igb_t *igb)
2031 {
2032 igb_rx_ring_t *rx_ring;
2033 int i;
2034
2035 for (i = 0; i < igb->num_rx_rings; i++) {
2036 rx_ring = &igb->rx_rings[i];
2037 if (igb_alloc_rx_ring_data(rx_ring) != IGB_SUCCESS)
2038 goto alloc_rx_rings_failure;
2039 }
2040 return (IGB_SUCCESS);
2041
2042 alloc_rx_rings_failure:
2043 igb_free_rx_data(igb);
2044 return (IGB_FAILURE);
2045 }
2046
2047 static void
igb_free_rx_data(igb_t * igb)2048 igb_free_rx_data(igb_t *igb)
2049 {
2050 igb_rx_ring_t *rx_ring;
2051 igb_rx_data_t *rx_data;
2052 int i;
2053
2054 for (i = 0; i < igb->num_rx_rings; i++) {
2055 rx_ring = &igb->rx_rings[i];
2056
2057 mutex_enter(&igb->rx_pending_lock);
2058 rx_data = rx_ring->rx_data;
2059
2060 if (rx_data != NULL) {
2061 rx_data->flag |= IGB_RX_STOPPED;
2062
2063 if (rx_data->rcb_pending == 0) {
2064 igb_free_rx_ring_data(rx_data);
2065 rx_ring->rx_data = NULL;
2066 }
2067 }
2068
2069 mutex_exit(&igb->rx_pending_lock);
2070 }
2071 }
2072
2073 /*
2074 * igb_setup_rings - Setup rx/tx rings
2075 */
2076 static void
igb_setup_rings(igb_t * igb)2077 igb_setup_rings(igb_t *igb)
2078 {
2079 /*
2080 * Setup the rx/tx rings, including the following:
2081 *
2082 * 1. Setup the descriptor ring and the control block buffers;
2083 * 2. Initialize necessary registers for receive/transmit;
2084 * 3. Initialize software pointers/parameters for receive/transmit;
2085 */
2086 igb_setup_rx(igb);
2087
2088 igb_setup_tx(igb);
2089 }
2090
2091 static void
igb_setup_rx_ring(igb_rx_ring_t * rx_ring)2092 igb_setup_rx_ring(igb_rx_ring_t *rx_ring)
2093 {
2094 igb_t *igb = rx_ring->igb;
2095 igb_rx_data_t *rx_data = rx_ring->rx_data;
2096 struct e1000_hw *hw = &igb->hw;
2097 rx_control_block_t *rcb;
2098 union e1000_adv_rx_desc *rbd;
2099 uint32_t size;
2100 uint32_t buf_low;
2101 uint32_t buf_high;
2102 uint32_t rxdctl;
2103 int i;
2104
2105 ASSERT(mutex_owned(&rx_ring->rx_lock));
2106 ASSERT(mutex_owned(&igb->gen_lock));
2107
2108 /*
2109 * Initialize descriptor ring with buffer addresses
2110 */
2111 for (i = 0; i < igb->rx_ring_size; i++) {
2112 rcb = rx_data->work_list[i];
2113 rbd = &rx_data->rbd_ring[i];
2114
2115 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2116 rbd->read.hdr_addr = NULL;
2117 }
2118
2119 /*
2120 * Initialize the base address registers
2121 */
2122 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2123 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2124 E1000_WRITE_REG(hw, E1000_RDBAH(rx_ring->index), buf_high);
2125 E1000_WRITE_REG(hw, E1000_RDBAL(rx_ring->index), buf_low);
2126
2127 /*
2128 * Initialize the length register
2129 */
2130 size = rx_data->ring_size * sizeof (union e1000_adv_rx_desc);
2131 E1000_WRITE_REG(hw, E1000_RDLEN(rx_ring->index), size);
2132
2133 /*
2134 * Initialize buffer size & descriptor type
2135 */
2136 E1000_WRITE_REG(hw, E1000_SRRCTL(rx_ring->index),
2137 ((igb->rx_buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) |
2138 E1000_SRRCTL_DESCTYPE_ADV_ONEBUF));
2139
2140 /*
2141 * Setup the Receive Descriptor Control Register (RXDCTL)
2142 */
2143 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rx_ring->index));
2144 rxdctl &= igb->capab->rxdctl_mask;
2145 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2146 rxdctl |= 16; /* pthresh */
2147 rxdctl |= 8 << 8; /* hthresh */
2148 rxdctl |= 1 << 16; /* wthresh */
2149 E1000_WRITE_REG(hw, E1000_RXDCTL(rx_ring->index), rxdctl);
2150
2151 rx_data->rbd_next = 0;
2152 }
2153
2154 static void
igb_setup_rx(igb_t * igb)2155 igb_setup_rx(igb_t *igb)
2156 {
2157 igb_rx_ring_t *rx_ring;
2158 igb_rx_data_t *rx_data;
2159 igb_rx_group_t *rx_group;
2160 struct e1000_hw *hw = &igb->hw;
2161 uint32_t rctl, rxcsum;
2162 uint32_t ring_per_group;
2163 int i;
2164
2165 /*
2166 * Setup the Receive Control Register (RCTL), and enable the
2167 * receiver. The initial configuration is to: enable the receiver,
2168 * accept broadcasts, discard bad packets, accept long packets,
2169 * disable VLAN filter checking, and set receive buffer size to
2170 * 2k. For 82575, also set the receive descriptor minimum
2171 * threshold size to 1/2 the ring.
2172 */
2173 rctl = E1000_READ_REG(hw, E1000_RCTL);
2174
2175 /*
2176 * Clear the field used for wakeup control. This driver doesn't do
2177 * wakeup but leave this here for completeness.
2178 */
2179 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2180 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2181
2182 rctl |= (E1000_RCTL_EN | /* Enable Receive Unit */
2183 E1000_RCTL_BAM | /* Accept Broadcast Packets */
2184 E1000_RCTL_LPE | /* Large Packet Enable */
2185 /* Multicast filter offset */
2186 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT) |
2187 E1000_RCTL_RDMTS_HALF | /* rx descriptor threshold */
2188 E1000_RCTL_SECRC); /* Strip Ethernet CRC */
2189
2190 for (i = 0; i < igb->num_rx_groups; i++) {
2191 rx_group = &igb->rx_groups[i];
2192 rx_group->index = i;
2193 rx_group->igb = igb;
2194 }
2195
2196 /*
2197 * Set up all rx descriptor rings - must be called before receive unit
2198 * enabled.
2199 */
2200 ring_per_group = igb->num_rx_rings / igb->num_rx_groups;
2201 for (i = 0; i < igb->num_rx_rings; i++) {
2202 rx_ring = &igb->rx_rings[i];
2203 igb_setup_rx_ring(rx_ring);
2204
2205 /*
2206 * Map a ring to a group by assigning a group index
2207 */
2208 rx_ring->group_index = i / ring_per_group;
2209 }
2210
2211 /*
2212 * Setup the Rx Long Packet Max Length register
2213 */
2214 E1000_WRITE_REG(hw, E1000_RLPML, igb->max_frame_size);
2215
2216 /*
2217 * Hardware checksum settings
2218 */
2219 if (igb->rx_hcksum_enable) {
2220 rxcsum =
2221 E1000_RXCSUM_TUOFL | /* TCP/UDP checksum */
2222 E1000_RXCSUM_IPOFL; /* IP checksum */
2223
2224 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2225 }
2226
2227 /*
2228 * Setup classify and RSS for multiple receive queues
2229 */
2230 switch (igb->vmdq_mode) {
2231 case E1000_VMDQ_OFF:
2232 /*
2233 * One ring group, only RSS is needed when more than
2234 * one ring enabled.
2235 */
2236 if (igb->num_rx_rings > 1)
2237 igb_setup_rss(igb);
2238 break;
2239 case E1000_VMDQ_MAC:
2240 /*
2241 * Multiple groups, each group has one ring,
2242 * only the MAC classification is needed.
2243 */
2244 igb_setup_mac_classify(igb);
2245 break;
2246 case E1000_VMDQ_MAC_RSS:
2247 /*
2248 * Multiple groups and multiple rings, both
2249 * MAC classification and RSS are needed.
2250 */
2251 igb_setup_mac_rss_classify(igb);
2252 break;
2253 }
2254
2255 /*
2256 * Enable the receive unit - must be done after all
2257 * the rx setup above.
2258 */
2259 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2260
2261 /*
2262 * Initialize all adapter ring head & tail pointers - must
2263 * be done after receive unit is enabled
2264 */
2265 for (i = 0; i < igb->num_rx_rings; i++) {
2266 rx_ring = &igb->rx_rings[i];
2267 rx_data = rx_ring->rx_data;
2268 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2269 E1000_WRITE_REG(hw, E1000_RDT(i), rx_data->ring_size - 1);
2270 }
2271
2272 /*
2273 * 82575 with manageability enabled needs a special flush to make
2274 * sure the fifos start clean.
2275 */
2276 if ((hw->mac.type == e1000_82575) &&
2277 (E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) {
2278 e1000_rx_fifo_flush_82575(hw);
2279 }
2280 }
2281
2282 static void
igb_setup_tx_ring(igb_tx_ring_t * tx_ring)2283 igb_setup_tx_ring(igb_tx_ring_t *tx_ring)
2284 {
2285 igb_t *igb = tx_ring->igb;
2286 struct e1000_hw *hw = &igb->hw;
2287 uint32_t size;
2288 uint32_t buf_low;
2289 uint32_t buf_high;
2290 uint32_t reg_val;
2291
2292 ASSERT(mutex_owned(&tx_ring->tx_lock));
2293 ASSERT(mutex_owned(&igb->gen_lock));
2294
2295
2296 /*
2297 * Initialize the length register
2298 */
2299 size = tx_ring->ring_size * sizeof (union e1000_adv_tx_desc);
2300 E1000_WRITE_REG(hw, E1000_TDLEN(tx_ring->index), size);
2301
2302 /*
2303 * Initialize the base address registers
2304 */
2305 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2306 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2307 E1000_WRITE_REG(hw, E1000_TDBAL(tx_ring->index), buf_low);
2308 E1000_WRITE_REG(hw, E1000_TDBAH(tx_ring->index), buf_high);
2309
2310 /*
2311 * Setup head & tail pointers
2312 */
2313 E1000_WRITE_REG(hw, E1000_TDH(tx_ring->index), 0);
2314 E1000_WRITE_REG(hw, E1000_TDT(tx_ring->index), 0);
2315
2316 /*
2317 * Setup head write-back
2318 */
2319 if (igb->tx_head_wb_enable) {
2320 /*
2321 * The memory of the head write-back is allocated using
2322 * the extra tbd beyond the tail of the tbd ring.
2323 */
2324 tx_ring->tbd_head_wb = (uint32_t *)
2325 ((uintptr_t)tx_ring->tbd_area.address + size);
2326 *tx_ring->tbd_head_wb = 0;
2327
2328 buf_low = (uint32_t)
2329 (tx_ring->tbd_area.dma_address + size);
2330 buf_high = (uint32_t)
2331 ((tx_ring->tbd_area.dma_address + size) >> 32);
2332
2333 /* Set the head write-back enable bit */
2334 buf_low |= E1000_TX_HEAD_WB_ENABLE;
2335
2336 E1000_WRITE_REG(hw, E1000_TDWBAL(tx_ring->index), buf_low);
2337 E1000_WRITE_REG(hw, E1000_TDWBAH(tx_ring->index), buf_high);
2338
2339 /*
2340 * Turn off relaxed ordering for head write back or it will
2341 * cause problems with the tx recycling
2342 */
2343 reg_val = E1000_READ_REG(hw,
2344 E1000_DCA_TXCTRL(tx_ring->index));
2345 reg_val &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
2346 E1000_WRITE_REG(hw,
2347 E1000_DCA_TXCTRL(tx_ring->index), reg_val);
2348 } else {
2349 tx_ring->tbd_head_wb = NULL;
2350 }
2351
2352 tx_ring->tbd_head = 0;
2353 tx_ring->tbd_tail = 0;
2354 tx_ring->tbd_free = tx_ring->ring_size;
2355
2356 if (igb->tx_ring_init == B_TRUE) {
2357 tx_ring->tcb_head = 0;
2358 tx_ring->tcb_tail = 0;
2359 tx_ring->tcb_free = tx_ring->free_list_size;
2360 }
2361
2362 /*
2363 * Enable TXDCTL per queue
2364 */
2365 reg_val = E1000_READ_REG(hw, E1000_TXDCTL(tx_ring->index));
2366 reg_val |= E1000_TXDCTL_QUEUE_ENABLE;
2367 E1000_WRITE_REG(hw, E1000_TXDCTL(tx_ring->index), reg_val);
2368
2369 /*
2370 * Initialize hardware checksum offload settings
2371 */
2372 bzero(&tx_ring->tx_context, sizeof (tx_context_t));
2373 }
2374
2375 static void
igb_setup_tx(igb_t * igb)2376 igb_setup_tx(igb_t *igb)
2377 {
2378 igb_tx_ring_t *tx_ring;
2379 struct e1000_hw *hw = &igb->hw;
2380 uint32_t reg_val;
2381 int i;
2382
2383 for (i = 0; i < igb->num_tx_rings; i++) {
2384 tx_ring = &igb->tx_rings[i];
2385 igb_setup_tx_ring(tx_ring);
2386 }
2387
2388 /*
2389 * Setup the Transmit Control Register (TCTL)
2390 */
2391 reg_val = E1000_READ_REG(hw, E1000_TCTL);
2392 reg_val &= ~E1000_TCTL_CT;
2393 reg_val |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2394 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2395
2396 /* Enable transmits */
2397 reg_val |= E1000_TCTL_EN;
2398
2399 E1000_WRITE_REG(hw, E1000_TCTL, reg_val);
2400 }
2401
2402 /*
2403 * igb_setup_rss - Setup receive-side scaling feature
2404 */
2405 static void
igb_setup_rss(igb_t * igb)2406 igb_setup_rss(igb_t *igb)
2407 {
2408 struct e1000_hw *hw = &igb->hw;
2409 uint32_t i, mrqc, rxcsum;
2410 int shift = 0;
2411 uint32_t random;
2412 union e1000_reta {
2413 uint32_t dword;
2414 uint8_t bytes[4];
2415 } reta;
2416
2417 /* Setup the Redirection Table */
2418 if (hw->mac.type == e1000_82576) {
2419 shift = 3;
2420 } else if (hw->mac.type == e1000_82575) {
2421 shift = 6;
2422 }
2423 for (i = 0; i < (32 * 4); i++) {
2424 reta.bytes[i & 3] = (i % igb->num_rx_rings) << shift;
2425 if ((i & 3) == 3) {
2426 E1000_WRITE_REG(hw,
2427 (E1000_RETA(0) + (i & ~3)), reta.dword);
2428 }
2429 }
2430
2431 /* Fill out hash function seeds */
2432 for (i = 0; i < 10; i++) {
2433 (void) random_get_pseudo_bytes((uint8_t *)&random,
2434 sizeof (uint32_t));
2435 E1000_WRITE_REG(hw, E1000_RSSRK(i), random);
2436 }
2437
2438 /* Setup the Multiple Receive Queue Control register */
2439 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2440 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2441 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2442 E1000_MRQC_RSS_FIELD_IPV6 |
2443 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2444 E1000_MRQC_RSS_FIELD_IPV4_UDP |
2445 E1000_MRQC_RSS_FIELD_IPV6_UDP |
2446 E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2447 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2448
2449 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2450
2451 /*
2452 * Disable Packet Checksum to enable RSS for multiple receive queues.
2453 *
2454 * The Packet Checksum is not ethernet CRC. It is another kind of
2455 * checksum offloading provided by the 82575 chipset besides the IP
2456 * header checksum offloading and the TCP/UDP checksum offloading.
2457 * The Packet Checksum is by default computed over the entire packet
2458 * from the first byte of the DA through the last byte of the CRC,
2459 * including the Ethernet and IP headers.
2460 *
2461 * It is a hardware limitation that Packet Checksum is mutually
2462 * exclusive with RSS.
2463 */
2464 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2465 rxcsum |= E1000_RXCSUM_PCSD;
2466 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2467 }
2468
2469 /*
2470 * igb_setup_mac_rss_classify - Setup MAC classification and rss
2471 */
2472 static void
igb_setup_mac_rss_classify(igb_t * igb)2473 igb_setup_mac_rss_classify(igb_t *igb)
2474 {
2475 struct e1000_hw *hw = &igb->hw;
2476 uint32_t i, mrqc, vmdctl, rxcsum;
2477 uint32_t ring_per_group;
2478 int shift_group0, shift_group1;
2479 uint32_t random;
2480 union e1000_reta {
2481 uint32_t dword;
2482 uint8_t bytes[4];
2483 } reta;
2484
2485 ring_per_group = igb->num_rx_rings / igb->num_rx_groups;
2486
2487 /* Setup the Redirection Table, it is shared between two groups */
2488 shift_group0 = 2;
2489 shift_group1 = 6;
2490 for (i = 0; i < (32 * 4); i++) {
2491 reta.bytes[i & 3] = ((i % ring_per_group) << shift_group0) |
2492 ((ring_per_group + (i % ring_per_group)) << shift_group1);
2493 if ((i & 3) == 3) {
2494 E1000_WRITE_REG(hw,
2495 (E1000_RETA(0) + (i & ~3)), reta.dword);
2496 }
2497 }
2498
2499 /* Fill out hash function seeds */
2500 for (i = 0; i < 10; i++) {
2501 (void) random_get_pseudo_bytes((uint8_t *)&random,
2502 sizeof (uint32_t));
2503 E1000_WRITE_REG(hw, E1000_RSSRK(i), random);
2504 }
2505
2506 /*
2507 * Setup the Multiple Receive Queue Control register,
2508 * enable VMDq based on packet destination MAC address and RSS.
2509 */
2510 mrqc = E1000_MRQC_ENABLE_VMDQ_MAC_RSS_GROUP;
2511 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2512 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2513 E1000_MRQC_RSS_FIELD_IPV6 |
2514 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2515 E1000_MRQC_RSS_FIELD_IPV4_UDP |
2516 E1000_MRQC_RSS_FIELD_IPV6_UDP |
2517 E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2518 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2519
2520 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2521
2522
2523 /* Define the default group and default queues */
2524 vmdctl = E1000_VMDQ_MAC_GROUP_DEFAULT_QUEUE;
2525 E1000_WRITE_REG(hw, E1000_VT_CTL, vmdctl);
2526
2527 /*
2528 * Disable Packet Checksum to enable RSS for multiple receive queues.
2529 *
2530 * The Packet Checksum is not ethernet CRC. It is another kind of
2531 * checksum offloading provided by the 82575 chipset besides the IP
2532 * header checksum offloading and the TCP/UDP checksum offloading.
2533 * The Packet Checksum is by default computed over the entire packet
2534 * from the first byte of the DA through the last byte of the CRC,
2535 * including the Ethernet and IP headers.
2536 *
2537 * It is a hardware limitation that Packet Checksum is mutually
2538 * exclusive with RSS.
2539 */
2540 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2541 rxcsum |= E1000_RXCSUM_PCSD;
2542 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2543 }
2544
2545 /*
2546 * igb_setup_mac_classify - Setup MAC classification feature
2547 */
2548 static void
igb_setup_mac_classify(igb_t * igb)2549 igb_setup_mac_classify(igb_t *igb)
2550 {
2551 struct e1000_hw *hw = &igb->hw;
2552 uint32_t mrqc, rxcsum;
2553
2554 /*
2555 * Setup the Multiple Receive Queue Control register,
2556 * enable VMDq based on packet destination MAC address.
2557 */
2558 mrqc = E1000_MRQC_ENABLE_VMDQ_MAC_GROUP;
2559 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2560
2561 /*
2562 * Disable Packet Checksum to enable RSS for multiple receive queues.
2563 *
2564 * The Packet Checksum is not ethernet CRC. It is another kind of
2565 * checksum offloading provided by the 82575 chipset besides the IP
2566 * header checksum offloading and the TCP/UDP checksum offloading.
2567 * The Packet Checksum is by default computed over the entire packet
2568 * from the first byte of the DA through the last byte of the CRC,
2569 * including the Ethernet and IP headers.
2570 *
2571 * It is a hardware limitation that Packet Checksum is mutually
2572 * exclusive with RSS.
2573 */
2574 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2575 rxcsum |= E1000_RXCSUM_PCSD;
2576 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2577
2578 }
2579
2580 /*
2581 * igb_init_unicst - Initialize the unicast addresses
2582 */
2583 static void
igb_init_unicst(igb_t * igb)2584 igb_init_unicst(igb_t *igb)
2585 {
2586 struct e1000_hw *hw = &igb->hw;
2587 int slot;
2588
2589 /*
2590 * Here we should consider two situations:
2591 *
2592 * 1. Chipset is initialized the first time
2593 * Initialize the multiple unicast addresses, and
2594 * save the default MAC address.
2595 *
2596 * 2. Chipset is reset
2597 * Recover the multiple unicast addresses from the
2598 * software data structure to the RAR registers.
2599 */
2600
2601 /*
2602 * Clear the default MAC address in the RAR0 rgister,
2603 * which is loaded from EEPROM when system boot or chipreset,
2604 * this will cause the conficts with add_mac/rem_mac entry
2605 * points when VMDq is enabled. For this reason, the RAR0
2606 * must be cleared for both cases mentioned above.
2607 */
2608 e1000_rar_clear(hw, 0);
2609
2610 if (!igb->unicst_init) {
2611
2612 /* Initialize the multiple unicast addresses */
2613 igb->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2614 igb->unicst_avail = igb->unicst_total;
2615
2616 for (slot = 0; slot < igb->unicst_total; slot++)
2617 igb->unicst_addr[slot].mac.set = 0;
2618
2619 igb->unicst_init = B_TRUE;
2620 } else {
2621 /* Re-configure the RAR registers */
2622 for (slot = 0; slot < igb->unicst_total; slot++) {
2623 (void) e1000_rar_set_vmdq(hw,
2624 igb->unicst_addr[slot].mac.addr,
2625 slot, igb->vmdq_mode,
2626 igb->unicst_addr[slot].mac.group_index);
2627 }
2628 }
2629 }
2630
2631 /*
2632 * igb_unicst_find - Find the slot for the specified unicast address
2633 */
2634 int
igb_unicst_find(igb_t * igb,const uint8_t * mac_addr)2635 igb_unicst_find(igb_t *igb, const uint8_t *mac_addr)
2636 {
2637 int slot;
2638
2639 ASSERT(mutex_owned(&igb->gen_lock));
2640
2641 for (slot = 0; slot < igb->unicst_total; slot++) {
2642 if (bcmp(igb->unicst_addr[slot].mac.addr,
2643 mac_addr, ETHERADDRL) == 0)
2644 return (slot);
2645 }
2646
2647 return (-1);
2648 }
2649
2650 /*
2651 * igb_unicst_set - Set the unicast address to the specified slot
2652 */
2653 int
igb_unicst_set(igb_t * igb,const uint8_t * mac_addr,int slot)2654 igb_unicst_set(igb_t *igb, const uint8_t *mac_addr,
2655 int slot)
2656 {
2657 struct e1000_hw *hw = &igb->hw;
2658
2659 ASSERT(mutex_owned(&igb->gen_lock));
2660
2661 /*
2662 * Save the unicast address in the software data structure
2663 */
2664 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
2665
2666 /*
2667 * Set the unicast address to the RAR register
2668 */
2669 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2670
2671 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
2672 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
2673 return (EIO);
2674 }
2675
2676 return (0);
2677 }
2678
2679 /*
2680 * igb_multicst_add - Add a multicst address
2681 */
2682 int
igb_multicst_add(igb_t * igb,const uint8_t * multiaddr)2683 igb_multicst_add(igb_t *igb, const uint8_t *multiaddr)
2684 {
2685 struct ether_addr *new_table;
2686 size_t new_len;
2687 size_t old_len;
2688
2689 ASSERT(mutex_owned(&igb->gen_lock));
2690
2691 if ((multiaddr[0] & 01) == 0) {
2692 igb_log(igb, IGB_LOG_ERROR, "Illegal multicast address");
2693 return (EINVAL);
2694 }
2695
2696 if (igb->mcast_count >= igb->mcast_max_num) {
2697 igb_log(igb, IGB_LOG_ERROR,
2698 "Adapter requested more than %d mcast addresses",
2699 igb->mcast_max_num);
2700 return (ENOENT);
2701 }
2702
2703 if (igb->mcast_count == igb->mcast_alloc_count) {
2704 old_len = igb->mcast_alloc_count *
2705 sizeof (struct ether_addr);
2706 new_len = (igb->mcast_alloc_count + MCAST_ALLOC_COUNT) *
2707 sizeof (struct ether_addr);
2708
2709 new_table = kmem_alloc(new_len, KM_NOSLEEP);
2710 if (new_table == NULL) {
2711 igb_log(igb, IGB_LOG_ERROR,
2712 "Not enough memory to alloc mcast table");
2713 return (ENOMEM);
2714 }
2715
2716 if (igb->mcast_table != NULL) {
2717 bcopy(igb->mcast_table, new_table, old_len);
2718 kmem_free(igb->mcast_table, old_len);
2719 }
2720 igb->mcast_alloc_count += MCAST_ALLOC_COUNT;
2721 igb->mcast_table = new_table;
2722 }
2723
2724 bcopy(multiaddr,
2725 &igb->mcast_table[igb->mcast_count], ETHERADDRL);
2726 igb->mcast_count++;
2727
2728 /*
2729 * Update the multicast table in the hardware
2730 */
2731 igb_setup_multicst(igb);
2732
2733 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
2734 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
2735 return (EIO);
2736 }
2737
2738 return (0);
2739 }
2740
2741 /*
2742 * igb_multicst_remove - Remove a multicst address
2743 */
2744 int
igb_multicst_remove(igb_t * igb,const uint8_t * multiaddr)2745 igb_multicst_remove(igb_t *igb, const uint8_t *multiaddr)
2746 {
2747 struct ether_addr *new_table;
2748 size_t new_len;
2749 size_t old_len;
2750 int i;
2751
2752 ASSERT(mutex_owned(&igb->gen_lock));
2753
2754 for (i = 0; i < igb->mcast_count; i++) {
2755 if (bcmp(multiaddr, &igb->mcast_table[i],
2756 ETHERADDRL) == 0) {
2757 for (i++; i < igb->mcast_count; i++) {
2758 igb->mcast_table[i - 1] =
2759 igb->mcast_table[i];
2760 }
2761 igb->mcast_count--;
2762 break;
2763 }
2764 }
2765
2766 if ((igb->mcast_alloc_count - igb->mcast_count) >
2767 MCAST_ALLOC_COUNT) {
2768 old_len = igb->mcast_alloc_count *
2769 sizeof (struct ether_addr);
2770 new_len = (igb->mcast_alloc_count - MCAST_ALLOC_COUNT) *
2771 sizeof (struct ether_addr);
2772
2773 new_table = kmem_alloc(new_len, KM_NOSLEEP);
2774 if (new_table != NULL) {
2775 bcopy(igb->mcast_table, new_table, new_len);
2776 kmem_free(igb->mcast_table, old_len);
2777 igb->mcast_alloc_count -= MCAST_ALLOC_COUNT;
2778 igb->mcast_table = new_table;
2779 }
2780 }
2781
2782 /*
2783 * Update the multicast table in the hardware
2784 */
2785 igb_setup_multicst(igb);
2786
2787 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
2788 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
2789 return (EIO);
2790 }
2791
2792 return (0);
2793 }
2794
2795 static void
igb_release_multicast(igb_t * igb)2796 igb_release_multicast(igb_t *igb)
2797 {
2798 if (igb->mcast_table != NULL) {
2799 kmem_free(igb->mcast_table,
2800 igb->mcast_alloc_count * sizeof (struct ether_addr));
2801 igb->mcast_table = NULL;
2802 }
2803 }
2804
2805 /*
2806 * igb_setup_multicast - setup multicast data structures
2807 *
2808 * This routine initializes all of the multicast related structures
2809 * and save them in the hardware registers.
2810 */
2811 static void
igb_setup_multicst(igb_t * igb)2812 igb_setup_multicst(igb_t *igb)
2813 {
2814 uint8_t *mc_addr_list;
2815 uint32_t mc_addr_count;
2816 struct e1000_hw *hw = &igb->hw;
2817
2818 ASSERT(mutex_owned(&igb->gen_lock));
2819 ASSERT(igb->mcast_count <= igb->mcast_max_num);
2820
2821 mc_addr_list = (uint8_t *)igb->mcast_table;
2822 mc_addr_count = igb->mcast_count;
2823
2824 /*
2825 * Update the multicase addresses to the MTA registers
2826 */
2827 e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2828 }
2829
2830 /*
2831 * igb_get_conf - Get driver configurations set in driver.conf
2832 *
2833 * This routine gets user-configured values out of the configuration
2834 * file igb.conf.
2835 *
2836 * For each configurable value, there is a minimum, a maximum, and a
2837 * default.
2838 * If user does not configure a value, use the default.
2839 * If user configures below the minimum, use the minumum.
2840 * If user configures above the maximum, use the maxumum.
2841 */
2842 static void
igb_get_conf(igb_t * igb)2843 igb_get_conf(igb_t *igb)
2844 {
2845 struct e1000_hw *hw = &igb->hw;
2846 uint32_t default_mtu;
2847 uint32_t flow_control;
2848 uint32_t ring_per_group;
2849 int i;
2850
2851 /*
2852 * igb driver supports the following user configurations:
2853 *
2854 * Link configurations:
2855 * adv_autoneg_cap
2856 * adv_1000fdx_cap
2857 * adv_100fdx_cap
2858 * adv_100hdx_cap
2859 * adv_10fdx_cap
2860 * adv_10hdx_cap
2861 * Note: 1000hdx is not supported.
2862 *
2863 * Jumbo frame configuration:
2864 * default_mtu
2865 *
2866 * Ethernet flow control configuration:
2867 * flow_control
2868 *
2869 * Multiple rings configurations:
2870 * tx_queue_number
2871 * tx_ring_size
2872 * rx_queue_number
2873 * rx_ring_size
2874 *
2875 * Call igb_get_prop() to get the value for a specific
2876 * configuration parameter.
2877 */
2878
2879 /*
2880 * Link configurations
2881 */
2882 igb->param_adv_autoneg_cap = igb_get_prop(igb,
2883 PROP_ADV_AUTONEG_CAP, 0, 1, 1);
2884 igb->param_adv_1000fdx_cap = igb_get_prop(igb,
2885 PROP_ADV_1000FDX_CAP, 0, 1, 1);
2886 igb->param_adv_100fdx_cap = igb_get_prop(igb,
2887 PROP_ADV_100FDX_CAP, 0, 1, 1);
2888 igb->param_adv_100hdx_cap = igb_get_prop(igb,
2889 PROP_ADV_100HDX_CAP, 0, 1, 1);
2890 igb->param_adv_10fdx_cap = igb_get_prop(igb,
2891 PROP_ADV_10FDX_CAP, 0, 1, 1);
2892 igb->param_adv_10hdx_cap = igb_get_prop(igb,
2893 PROP_ADV_10HDX_CAP, 0, 1, 1);
2894
2895 /*
2896 * Jumbo frame configurations
2897 */
2898 default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU,
2899 MIN_MTU, MAX_MTU, DEFAULT_MTU);
2900
2901 igb->max_frame_size = default_mtu +
2902 sizeof (struct ether_vlan_header) + ETHERFCSL;
2903
2904 /*
2905 * Ethernet flow control configuration
2906 */
2907 flow_control = igb_get_prop(igb, PROP_FLOW_CONTROL,
2908 e1000_fc_none, 4, e1000_fc_full);
2909 if (flow_control == 4)
2910 flow_control = e1000_fc_default;
2911
2912 hw->fc.requested_mode = flow_control;
2913
2914 /*
2915 * Multiple rings configurations
2916 */
2917 igb->tx_ring_size = igb_get_prop(igb, PROP_TX_RING_SIZE,
2918 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2919 igb->rx_ring_size = igb_get_prop(igb, PROP_RX_RING_SIZE,
2920 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2921
2922 igb->mr_enable = igb_get_prop(igb, PROP_MR_ENABLE, 0, 1, 0);
2923 igb->num_rx_groups = igb_get_prop(igb, PROP_RX_GROUP_NUM,
2924 MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2925 /*
2926 * Currently we do not support VMDq for 82576 and 82580.
2927 * If it is e1000_82576, set num_rx_groups to 1.
2928 */
2929 if (hw->mac.type >= e1000_82576)
2930 igb->num_rx_groups = 1;
2931
2932 if (igb->mr_enable) {
2933 igb->num_tx_rings = igb->capab->def_tx_que_num;
2934 igb->num_rx_rings = igb->capab->def_rx_que_num;
2935 } else {
2936 igb->num_tx_rings = 1;
2937 igb->num_rx_rings = 1;
2938
2939 if (igb->num_rx_groups > 1) {
2940 igb_log(igb, IGB_LOG_ERROR,
2941 "Invalid rx groups number. Please enable multiple "
2942 "rings first");
2943 igb->num_rx_groups = 1;
2944 }
2945 }
2946
2947 /*
2948 * Check the divisibility between rx rings and rx groups.
2949 */
2950 for (i = igb->num_rx_groups; i > 0; i--) {
2951 if ((igb->num_rx_rings % i) == 0)
2952 break;
2953 }
2954 if (i != igb->num_rx_groups) {
2955 igb_log(igb, IGB_LOG_ERROR,
2956 "Invalid rx groups number. Downgrade the rx group "
2957 "number to %d.", i);
2958 igb->num_rx_groups = i;
2959 }
2960
2961 /*
2962 * Get the ring number per group.
2963 */
2964 ring_per_group = igb->num_rx_rings / igb->num_rx_groups;
2965
2966 if (igb->num_rx_groups == 1) {
2967 /*
2968 * One rx ring group, the rx ring number is num_rx_rings.
2969 */
2970 igb->vmdq_mode = E1000_VMDQ_OFF;
2971 } else if (ring_per_group == 1) {
2972 /*
2973 * Multiple rx groups, each group has one rx ring.
2974 */
2975 igb->vmdq_mode = E1000_VMDQ_MAC;
2976 } else {
2977 /*
2978 * Multiple groups and multiple rings.
2979 */
2980 igb->vmdq_mode = E1000_VMDQ_MAC_RSS;
2981 }
2982
2983 /*
2984 * Tunable used to force an interrupt type. The only use is
2985 * for testing of the lesser interrupt types.
2986 * 0 = don't force interrupt type
2987 * 1 = force interrupt type MSIX
2988 * 2 = force interrupt type MSI
2989 * 3 = force interrupt type Legacy
2990 */
2991 igb->intr_force = igb_get_prop(igb, PROP_INTR_FORCE,
2992 IGB_INTR_NONE, IGB_INTR_LEGACY, IGB_INTR_NONE);
2993
2994 igb->tx_hcksum_enable = igb_get_prop(igb, PROP_TX_HCKSUM_ENABLE,
2995 0, 1, 1);
2996 igb->rx_hcksum_enable = igb_get_prop(igb, PROP_RX_HCKSUM_ENABLE,
2997 0, 1, 1);
2998 igb->lso_enable = igb_get_prop(igb, PROP_LSO_ENABLE,
2999 0, 1, 1);
3000 igb->tx_head_wb_enable = igb_get_prop(igb, PROP_TX_HEAD_WB_ENABLE,
3001 0, 1, 1);
3002
3003 /*
3004 * igb LSO needs the tx h/w checksum support.
3005 * Here LSO will be disabled if tx h/w checksum has been disabled.
3006 */
3007 if (igb->tx_hcksum_enable == B_FALSE)
3008 igb->lso_enable = B_FALSE;
3009
3010 igb->tx_copy_thresh = igb_get_prop(igb, PROP_TX_COPY_THRESHOLD,
3011 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3012 DEFAULT_TX_COPY_THRESHOLD);
3013 igb->tx_recycle_thresh = igb_get_prop(igb, PROP_TX_RECYCLE_THRESHOLD,
3014 MIN_TX_RECYCLE_THRESHOLD, MAX_TX_RECYCLE_THRESHOLD,
3015 DEFAULT_TX_RECYCLE_THRESHOLD);
3016 igb->tx_overload_thresh = igb_get_prop(igb, PROP_TX_OVERLOAD_THRESHOLD,
3017 MIN_TX_OVERLOAD_THRESHOLD, MAX_TX_OVERLOAD_THRESHOLD,
3018 DEFAULT_TX_OVERLOAD_THRESHOLD);
3019 igb->tx_resched_thresh = igb_get_prop(igb, PROP_TX_RESCHED_THRESHOLD,
3020 MIN_TX_RESCHED_THRESHOLD,
3021 MIN(igb->tx_ring_size, MAX_TX_RESCHED_THRESHOLD),
3022 igb->tx_ring_size > DEFAULT_TX_RESCHED_THRESHOLD ?
3023 DEFAULT_TX_RESCHED_THRESHOLD : DEFAULT_TX_RESCHED_THRESHOLD_LOW);
3024
3025 igb->rx_copy_thresh = igb_get_prop(igb, PROP_RX_COPY_THRESHOLD,
3026 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3027 DEFAULT_RX_COPY_THRESHOLD);
3028 igb->rx_limit_per_intr = igb_get_prop(igb, PROP_RX_LIMIT_PER_INTR,
3029 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3030 DEFAULT_RX_LIMIT_PER_INTR);
3031
3032 igb->intr_throttling[0] = igb_get_prop(igb, PROP_INTR_THROTTLING,
3033 igb->capab->min_intr_throttle,
3034 igb->capab->max_intr_throttle,
3035 igb->capab->def_intr_throttle);
3036
3037 /*
3038 * Max number of multicast addresses
3039 */
3040 igb->mcast_max_num =
3041 igb_get_prop(igb, PROP_MCAST_MAX_NUM,
3042 MIN_MCAST_NUM, MAX_MCAST_NUM, DEFAULT_MCAST_NUM);
3043 }
3044
3045 /*
3046 * igb_get_prop - Get a property value out of the configuration file igb.conf
3047 *
3048 * Caller provides the name of the property, a default value, a minimum
3049 * value, and a maximum value.
3050 *
3051 * Return configured value of the property, with default, minimum and
3052 * maximum properly applied.
3053 */
3054 static int
igb_get_prop(igb_t * igb,char * propname,int minval,int maxval,int defval)3055 igb_get_prop(igb_t *igb,
3056 char *propname, /* name of the property */
3057 int minval, /* minimum acceptable value */
3058 int maxval, /* maximim acceptable value */
3059 int defval) /* default value */
3060 {
3061 int value;
3062
3063 /*
3064 * Call ddi_prop_get_int() to read the conf settings
3065 */
3066 value = ddi_prop_get_int(DDI_DEV_T_ANY, igb->dip,
3067 DDI_PROP_DONTPASS, propname, defval);
3068
3069 if (value > maxval)
3070 value = maxval;
3071
3072 if (value < minval)
3073 value = minval;
3074
3075 return (value);
3076 }
3077
3078 /*
3079 * igb_setup_link - Using the link properties to setup the link
3080 */
3081 int
igb_setup_link(igb_t * igb,boolean_t setup_hw)3082 igb_setup_link(igb_t *igb, boolean_t setup_hw)
3083 {
3084 struct e1000_mac_info *mac;
3085 struct e1000_phy_info *phy;
3086 boolean_t invalid;
3087
3088 mac = &igb->hw.mac;
3089 phy = &igb->hw.phy;
3090 invalid = B_FALSE;
3091
3092 if (igb->param_adv_autoneg_cap == 1) {
3093 mac->autoneg = B_TRUE;
3094 phy->autoneg_advertised = 0;
3095
3096 /*
3097 * 1000hdx is not supported for autonegotiation
3098 */
3099 if (igb->param_adv_1000fdx_cap == 1)
3100 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
3101
3102 if (igb->param_adv_100fdx_cap == 1)
3103 phy->autoneg_advertised |= ADVERTISE_100_FULL;
3104
3105 if (igb->param_adv_100hdx_cap == 1)
3106 phy->autoneg_advertised |= ADVERTISE_100_HALF;
3107
3108 if (igb->param_adv_10fdx_cap == 1)
3109 phy->autoneg_advertised |= ADVERTISE_10_FULL;
3110
3111 if (igb->param_adv_10hdx_cap == 1)
3112 phy->autoneg_advertised |= ADVERTISE_10_HALF;
3113
3114 if (phy->autoneg_advertised == 0)
3115 invalid = B_TRUE;
3116 } else {
3117 mac->autoneg = B_FALSE;
3118
3119 /*
3120 * 1000fdx and 1000hdx are not supported for forced link
3121 */
3122 if (igb->param_adv_100fdx_cap == 1)
3123 mac->forced_speed_duplex = ADVERTISE_100_FULL;
3124 else if (igb->param_adv_100hdx_cap == 1)
3125 mac->forced_speed_duplex = ADVERTISE_100_HALF;
3126 else if (igb->param_adv_10fdx_cap == 1)
3127 mac->forced_speed_duplex = ADVERTISE_10_FULL;
3128 else if (igb->param_adv_10hdx_cap == 1)
3129 mac->forced_speed_duplex = ADVERTISE_10_HALF;
3130 else
3131 invalid = B_TRUE;
3132 }
3133
3134 if (invalid) {
3135 igb_log(igb, IGB_LOG_INFO, "Invalid link settings. Setup "
3136 "link to autonegotiation with full link capabilities.");
3137 mac->autoneg = B_TRUE;
3138 phy->autoneg_advertised = ADVERTISE_1000_FULL |
3139 ADVERTISE_100_FULL | ADVERTISE_100_HALF |
3140 ADVERTISE_10_FULL | ADVERTISE_10_HALF;
3141 }
3142
3143 if (setup_hw) {
3144 if (e1000_setup_link(&igb->hw) != E1000_SUCCESS)
3145 return (IGB_FAILURE);
3146 }
3147
3148 return (IGB_SUCCESS);
3149 }
3150
3151
3152 /*
3153 * igb_is_link_up - Check if the link is up
3154 */
3155 static boolean_t
igb_is_link_up(igb_t * igb)3156 igb_is_link_up(igb_t *igb)
3157 {
3158 struct e1000_hw *hw = &igb->hw;
3159 boolean_t link_up = B_FALSE;
3160
3161 ASSERT(mutex_owned(&igb->gen_lock));
3162
3163 /*
3164 * get_link_status is set in the interrupt handler on link-status-change
3165 * or rx sequence error interrupt. get_link_status will stay
3166 * false until the e1000_check_for_link establishes link only
3167 * for copper adapters.
3168 */
3169 switch (hw->phy.media_type) {
3170 case e1000_media_type_copper:
3171 if (hw->mac.get_link_status) {
3172 (void) e1000_check_for_link(hw);
3173 link_up = !hw->mac.get_link_status;
3174 } else {
3175 link_up = B_TRUE;
3176 }
3177 break;
3178 case e1000_media_type_fiber:
3179 (void) e1000_check_for_link(hw);
3180 link_up = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
3181 break;
3182 case e1000_media_type_internal_serdes:
3183 (void) e1000_check_for_link(hw);
3184 link_up = hw->mac.serdes_has_link;
3185 break;
3186 }
3187
3188 return (link_up);
3189 }
3190
3191 /*
3192 * igb_link_check - Link status processing
3193 */
3194 static boolean_t
igb_link_check(igb_t * igb)3195 igb_link_check(igb_t *igb)
3196 {
3197 struct e1000_hw *hw = &igb->hw;
3198 uint16_t speed = 0, duplex = 0;
3199 boolean_t link_changed = B_FALSE;
3200
3201 ASSERT(mutex_owned(&igb->gen_lock));
3202
3203 if (igb_is_link_up(igb)) {
3204 /*
3205 * The Link is up, check whether it was marked as down earlier
3206 */
3207 if (igb->link_state != LINK_STATE_UP) {
3208 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
3209 igb->link_speed = speed;
3210 igb->link_duplex = duplex;
3211 igb->link_state = LINK_STATE_UP;
3212 link_changed = B_TRUE;
3213 if (!igb->link_complete)
3214 igb_stop_link_timer(igb);
3215 }
3216 } else if (igb->link_complete) {
3217 if (igb->link_state != LINK_STATE_DOWN) {
3218 igb->link_speed = 0;
3219 igb->link_duplex = 0;
3220 igb->link_state = LINK_STATE_DOWN;
3221 link_changed = B_TRUE;
3222 }
3223 }
3224
3225 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
3226 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
3227 return (B_FALSE);
3228 }
3229
3230 return (link_changed);
3231 }
3232
3233 /*
3234 * igb_local_timer - driver watchdog function
3235 *
3236 * This function will handle the hardware stall check, link status
3237 * check and other routines.
3238 */
3239 static void
igb_local_timer(void * arg)3240 igb_local_timer(void *arg)
3241 {
3242 igb_t *igb = (igb_t *)arg;
3243 boolean_t link_changed = B_FALSE;
3244
3245 if (igb->igb_state & IGB_ERROR) {
3246 igb->reset_count++;
3247 if (igb_reset(igb) == IGB_SUCCESS)
3248 ddi_fm_service_impact(igb->dip, DDI_SERVICE_RESTORED);
3249
3250 igb_restart_watchdog_timer(igb);
3251 return;
3252 }
3253
3254 if (igb_stall_check(igb) || (igb->igb_state & IGB_STALL)) {
3255 igb_fm_ereport(igb, DDI_FM_DEVICE_STALL);
3256 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
3257 igb->reset_count++;
3258 if (igb_reset(igb) == IGB_SUCCESS)
3259 ddi_fm_service_impact(igb->dip, DDI_SERVICE_RESTORED);
3260
3261 igb_restart_watchdog_timer(igb);
3262 return;
3263 }
3264
3265 mutex_enter(&igb->gen_lock);
3266 if (!(igb->igb_state & IGB_SUSPENDED) && (igb->igb_state & IGB_STARTED))
3267 link_changed = igb_link_check(igb);
3268 mutex_exit(&igb->gen_lock);
3269
3270 if (link_changed)
3271 mac_link_update(igb->mac_hdl, igb->link_state);
3272
3273 igb_restart_watchdog_timer(igb);
3274 }
3275
3276 /*
3277 * igb_link_timer - link setup timer function
3278 *
3279 * It is called when the timer for link setup is expired, which indicates
3280 * the completion of the link setup. The link state will not be updated
3281 * until the link setup is completed. And the link state will not be sent
3282 * to the upper layer through mac_link_update() in this function. It will
3283 * be updated in the local timer routine or the interrupts service routine
3284 * after the interface is started (plumbed).
3285 */
3286 static void
igb_link_timer(void * arg)3287 igb_link_timer(void *arg)
3288 {
3289 igb_t *igb = (igb_t *)arg;
3290
3291 mutex_enter(&igb->link_lock);
3292 igb->link_complete = B_TRUE;
3293 igb->link_tid = 0;
3294 mutex_exit(&igb->link_lock);
3295 }
3296 /*
3297 * igb_stall_check - check for transmit stall
3298 *
3299 * This function checks if the adapter is stalled (in transmit).
3300 *
3301 * It is called each time the watchdog timeout is invoked.
3302 * If the transmit descriptor reclaim continuously fails,
3303 * the watchdog value will increment by 1. If the watchdog
3304 * value exceeds the threshold, the igb is assumed to
3305 * have stalled and need to be reset.
3306 */
3307 static boolean_t
igb_stall_check(igb_t * igb)3308 igb_stall_check(igb_t *igb)
3309 {
3310 igb_tx_ring_t *tx_ring;
3311 struct e1000_hw *hw = &igb->hw;
3312 boolean_t result;
3313 int i;
3314
3315 if (igb->link_state != LINK_STATE_UP)
3316 return (B_FALSE);
3317
3318 /*
3319 * If any tx ring is stalled, we'll reset the chipset
3320 */
3321 result = B_FALSE;
3322 for (i = 0; i < igb->num_tx_rings; i++) {
3323 tx_ring = &igb->tx_rings[i];
3324
3325 if (tx_ring->recycle_fail > 0)
3326 tx_ring->stall_watchdog++;
3327 else
3328 tx_ring->stall_watchdog = 0;
3329
3330 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3331 result = B_TRUE;
3332 if (hw->mac.type == e1000_82580) {
3333 hw->dev_spec._82575.global_device_reset
3334 = B_TRUE;
3335 }
3336 break;
3337 }
3338 }
3339
3340 if (result) {
3341 tx_ring->stall_watchdog = 0;
3342 tx_ring->recycle_fail = 0;
3343 }
3344
3345 return (result);
3346 }
3347
3348
3349 /*
3350 * is_valid_mac_addr - Check if the mac address is valid
3351 */
3352 static boolean_t
is_valid_mac_addr(uint8_t * mac_addr)3353 is_valid_mac_addr(uint8_t *mac_addr)
3354 {
3355 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3356 const uint8_t addr_test2[6] =
3357 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3358
3359 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3360 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3361 return (B_FALSE);
3362
3363 return (B_TRUE);
3364 }
3365
3366 static boolean_t
igb_find_mac_address(igb_t * igb)3367 igb_find_mac_address(igb_t *igb)
3368 {
3369 struct e1000_hw *hw = &igb->hw;
3370 #ifdef __sparc
3371 uchar_t *bytes;
3372 struct ether_addr sysaddr;
3373 uint_t nelts;
3374 int err;
3375 boolean_t found = B_FALSE;
3376
3377 /*
3378 * The "vendor's factory-set address" may already have
3379 * been extracted from the chip, but if the property
3380 * "local-mac-address" is set we use that instead.
3381 *
3382 * We check whether it looks like an array of 6
3383 * bytes (which it should, if OBP set it). If we can't
3384 * make sense of it this way, we'll ignore it.
3385 */
3386 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip,
3387 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3388 if (err == DDI_PROP_SUCCESS) {
3389 if (nelts == ETHERADDRL) {
3390 while (nelts--)
3391 hw->mac.addr[nelts] = bytes[nelts];
3392 found = B_TRUE;
3393 }
3394 ddi_prop_free(bytes);
3395 }
3396
3397 /*
3398 * Look up the OBP property "local-mac-address?". If the user has set
3399 * 'local-mac-address? = false', use "the system address" instead.
3400 */
3401 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, 0,
3402 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3403 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3404 if (localetheraddr(NULL, &sysaddr) != 0) {
3405 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3406 found = B_TRUE;
3407 }
3408 }
3409 ddi_prop_free(bytes);
3410 }
3411
3412 /*
3413 * Finally(!), if there's a valid "mac-address" property (created
3414 * if we netbooted from this interface), we must use this instead
3415 * of any of the above to ensure that the NFS/install server doesn't
3416 * get confused by the address changing as Solaris takes over!
3417 */
3418 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip,
3419 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3420 if (err == DDI_PROP_SUCCESS) {
3421 if (nelts == ETHERADDRL) {
3422 while (nelts--)
3423 hw->mac.addr[nelts] = bytes[nelts];
3424 found = B_TRUE;
3425 }
3426 ddi_prop_free(bytes);
3427 }
3428
3429 if (found) {
3430 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3431 return (B_TRUE);
3432 }
3433 #endif
3434
3435 /*
3436 * Read the device MAC address from the EEPROM
3437 */
3438 if (e1000_read_mac_addr(hw) != E1000_SUCCESS)
3439 return (B_FALSE);
3440
3441 return (B_TRUE);
3442 }
3443
3444 #pragma inline(igb_arm_watchdog_timer)
3445
3446 static void
igb_arm_watchdog_timer(igb_t * igb)3447 igb_arm_watchdog_timer(igb_t *igb)
3448 {
3449 /*
3450 * Fire a watchdog timer
3451 */
3452 igb->watchdog_tid =
3453 timeout(igb_local_timer,
3454 (void *)igb, 1 * drv_usectohz(1000000));
3455
3456 }
3457
3458 /*
3459 * igb_enable_watchdog_timer - Enable and start the driver watchdog timer
3460 */
3461 void
igb_enable_watchdog_timer(igb_t * igb)3462 igb_enable_watchdog_timer(igb_t *igb)
3463 {
3464 mutex_enter(&igb->watchdog_lock);
3465
3466 if (!igb->watchdog_enable) {
3467 igb->watchdog_enable = B_TRUE;
3468 igb->watchdog_start = B_TRUE;
3469 igb_arm_watchdog_timer(igb);
3470 }
3471
3472 mutex_exit(&igb->watchdog_lock);
3473
3474 }
3475
3476 /*
3477 * igb_disable_watchdog_timer - Disable and stop the driver watchdog timer
3478 */
3479 void
igb_disable_watchdog_timer(igb_t * igb)3480 igb_disable_watchdog_timer(igb_t *igb)
3481 {
3482 timeout_id_t tid;
3483
3484 mutex_enter(&igb->watchdog_lock);
3485
3486 igb->watchdog_enable = B_FALSE;
3487 igb->watchdog_start = B_FALSE;
3488 tid = igb->watchdog_tid;
3489 igb->watchdog_tid = 0;
3490
3491 mutex_exit(&igb->watchdog_lock);
3492
3493 if (tid != 0)
3494 (void) untimeout(tid);
3495
3496 }
3497
3498 /*
3499 * igb_start_watchdog_timer - Start the driver watchdog timer
3500 */
3501 static void
igb_start_watchdog_timer(igb_t * igb)3502 igb_start_watchdog_timer(igb_t *igb)
3503 {
3504 mutex_enter(&igb->watchdog_lock);
3505
3506 if (igb->watchdog_enable) {
3507 if (!igb->watchdog_start) {
3508 igb->watchdog_start = B_TRUE;
3509 igb_arm_watchdog_timer(igb);
3510 }
3511 }
3512
3513 mutex_exit(&igb->watchdog_lock);
3514 }
3515
3516 /*
3517 * igb_restart_watchdog_timer - Restart the driver watchdog timer
3518 */
3519 static void
igb_restart_watchdog_timer(igb_t * igb)3520 igb_restart_watchdog_timer(igb_t *igb)
3521 {
3522 mutex_enter(&igb->watchdog_lock);
3523
3524 if (igb->watchdog_start)
3525 igb_arm_watchdog_timer(igb);
3526
3527 mutex_exit(&igb->watchdog_lock);
3528 }
3529
3530 /*
3531 * igb_stop_watchdog_timer - Stop the driver watchdog timer
3532 */
3533 static void
igb_stop_watchdog_timer(igb_t * igb)3534 igb_stop_watchdog_timer(igb_t *igb)
3535 {
3536 timeout_id_t tid;
3537
3538 mutex_enter(&igb->watchdog_lock);
3539
3540 igb->watchdog_start = B_FALSE;
3541 tid = igb->watchdog_tid;
3542 igb->watchdog_tid = 0;
3543
3544 mutex_exit(&igb->watchdog_lock);
3545
3546 if (tid != 0)
3547 (void) untimeout(tid);
3548 }
3549
3550 /*
3551 * igb_start_link_timer - Start the link setup timer
3552 */
3553 static void
igb_start_link_timer(struct igb * igb)3554 igb_start_link_timer(struct igb *igb)
3555 {
3556 struct e1000_hw *hw = &igb->hw;
3557 clock_t link_timeout;
3558
3559 if (hw->mac.autoneg)
3560 link_timeout = PHY_AUTO_NEG_LIMIT *
3561 drv_usectohz(100000);
3562 else
3563 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
3564
3565 mutex_enter(&igb->link_lock);
3566 if (hw->phy.autoneg_wait_to_complete) {
3567 igb->link_complete = B_TRUE;
3568 } else {
3569 igb->link_complete = B_FALSE;
3570 igb->link_tid = timeout(igb_link_timer, (void *)igb,
3571 link_timeout);
3572 }
3573 mutex_exit(&igb->link_lock);
3574 }
3575
3576 /*
3577 * igb_stop_link_timer - Stop the link setup timer
3578 */
3579 static void
igb_stop_link_timer(struct igb * igb)3580 igb_stop_link_timer(struct igb *igb)
3581 {
3582 timeout_id_t tid;
3583
3584 mutex_enter(&igb->link_lock);
3585 igb->link_complete = B_TRUE;
3586 tid = igb->link_tid;
3587 igb->link_tid = 0;
3588 mutex_exit(&igb->link_lock);
3589
3590 if (tid != 0)
3591 (void) untimeout(tid);
3592 }
3593
3594 /*
3595 * igb_disable_adapter_interrupts - Clear/disable all hardware interrupts
3596 */
3597 static void
igb_disable_adapter_interrupts(igb_t * igb)3598 igb_disable_adapter_interrupts(igb_t *igb)
3599 {
3600 struct e1000_hw *hw = &igb->hw;
3601
3602 /*
3603 * Set the IMC register to mask all the interrupts,
3604 * including the tx interrupts.
3605 */
3606 E1000_WRITE_REG(hw, E1000_IMC, ~0);
3607 E1000_WRITE_REG(hw, E1000_IAM, 0);
3608
3609 /*
3610 * Additional disabling for MSI-X
3611 */
3612 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3613 E1000_WRITE_REG(hw, E1000_EIMC, ~0);
3614 E1000_WRITE_REG(hw, E1000_EIAC, 0);
3615 E1000_WRITE_REG(hw, E1000_EIAM, 0);
3616 }
3617
3618 E1000_WRITE_FLUSH(hw);
3619 }
3620
3621 /*
3622 * igb_enable_adapter_interrupts_82580 - Enable NIC interrupts for 82580
3623 */
3624 static void
igb_enable_adapter_interrupts_82580(igb_t * igb)3625 igb_enable_adapter_interrupts_82580(igb_t *igb)
3626 {
3627 struct e1000_hw *hw = &igb->hw;
3628
3629 /* Clear any pending interrupts */
3630 (void) E1000_READ_REG(hw, E1000_ICR);
3631 igb->ims_mask |= E1000_IMS_DRSTA;
3632
3633 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3634
3635 /* Interrupt enabling for MSI-X */
3636 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
3637 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
3638 igb->ims_mask = (E1000_IMS_LSC | E1000_IMS_DRSTA);
3639 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
3640 } else { /* Interrupt enabling for MSI and legacy */
3641 E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID);
3642 igb->ims_mask = IMS_ENABLE_MASK | E1000_IMS_TXQE;
3643 igb->ims_mask |= E1000_IMS_DRSTA;
3644 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
3645 }
3646
3647 /* Disable auto-mask for ICR interrupt bits */
3648 E1000_WRITE_REG(hw, E1000_IAM, 0);
3649
3650 E1000_WRITE_FLUSH(hw);
3651 }
3652
3653 /*
3654 * igb_enable_adapter_interrupts_82576 - Enable NIC interrupts for 82576
3655 */
3656 static void
igb_enable_adapter_interrupts_82576(igb_t * igb)3657 igb_enable_adapter_interrupts_82576(igb_t *igb)
3658 {
3659 struct e1000_hw *hw = &igb->hw;
3660
3661 /* Clear any pending interrupts */
3662 (void) E1000_READ_REG(hw, E1000_ICR);
3663
3664 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3665
3666 /* Interrupt enabling for MSI-X */
3667 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
3668 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
3669 igb->ims_mask = E1000_IMS_LSC;
3670 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
3671 } else {
3672 /* Interrupt enabling for MSI and legacy */
3673 E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID);
3674 igb->ims_mask = IMS_ENABLE_MASK | E1000_IMS_TXQE;
3675 E1000_WRITE_REG(hw, E1000_IMS,
3676 (IMS_ENABLE_MASK | E1000_IMS_TXQE));
3677 }
3678
3679 /* Disable auto-mask for ICR interrupt bits */
3680 E1000_WRITE_REG(hw, E1000_IAM, 0);
3681
3682 E1000_WRITE_FLUSH(hw);
3683 }
3684
3685 /*
3686 * igb_enable_adapter_interrupts_82575 - Enable NIC interrupts for 82575
3687 */
3688 static void
igb_enable_adapter_interrupts_82575(igb_t * igb)3689 igb_enable_adapter_interrupts_82575(igb_t *igb)
3690 {
3691 struct e1000_hw *hw = &igb->hw;
3692 uint32_t reg;
3693
3694 /* Clear any pending interrupts */
3695 (void) E1000_READ_REG(hw, E1000_ICR);
3696
3697 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3698 /* Interrupt enabling for MSI-X */
3699 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
3700 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
3701 igb->ims_mask = E1000_IMS_LSC;
3702 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
3703
3704 /* Enable MSI-X PBA support */
3705 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3706 reg |= E1000_CTRL_EXT_PBA_CLR;
3707
3708 /* Non-selective interrupt clear-on-read */
3709 reg |= E1000_CTRL_EXT_IRCA; /* Called NSICR in the EAS */
3710
3711 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3712 } else {
3713 /* Interrupt enabling for MSI and legacy */
3714 igb->ims_mask = IMS_ENABLE_MASK;
3715 E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
3716 }
3717
3718 E1000_WRITE_FLUSH(hw);
3719 }
3720
3721 /*
3722 * Loopback Support
3723 */
3724 static lb_property_t lb_normal =
3725 { normal, "normal", IGB_LB_NONE };
3726 static lb_property_t lb_external =
3727 { external, "External", IGB_LB_EXTERNAL };
3728 static lb_property_t lb_phy =
3729 { internal, "PHY", IGB_LB_INTERNAL_PHY };
3730 static lb_property_t lb_serdes =
3731 { internal, "SerDes", IGB_LB_INTERNAL_SERDES };
3732
3733 enum ioc_reply
igb_loopback_ioctl(igb_t * igb,struct iocblk * iocp,mblk_t * mp)3734 igb_loopback_ioctl(igb_t *igb, struct iocblk *iocp, mblk_t *mp)
3735 {
3736 lb_info_sz_t *lbsp;
3737 lb_property_t *lbpp;
3738 struct e1000_hw *hw;
3739 uint32_t *lbmp;
3740 uint32_t size;
3741 uint32_t value;
3742
3743 hw = &igb->hw;
3744
3745 if (mp->b_cont == NULL)
3746 return (IOC_INVAL);
3747
3748 switch (iocp->ioc_cmd) {
3749 default:
3750 return (IOC_INVAL);
3751
3752 case LB_GET_INFO_SIZE:
3753 size = sizeof (lb_info_sz_t);
3754 if (iocp->ioc_count != size)
3755 return (IOC_INVAL);
3756
3757 value = sizeof (lb_normal);
3758 if (hw->phy.media_type == e1000_media_type_copper)
3759 value += sizeof (lb_phy);
3760 else
3761 value += sizeof (lb_serdes);
3762 value += sizeof (lb_external);
3763
3764 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3765 *lbsp = value;
3766 break;
3767
3768 case LB_GET_INFO:
3769 value = sizeof (lb_normal);
3770 if (hw->phy.media_type == e1000_media_type_copper)
3771 value += sizeof (lb_phy);
3772 else
3773 value += sizeof (lb_serdes);
3774 value += sizeof (lb_external);
3775
3776 size = value;
3777 if (iocp->ioc_count != size)
3778 return (IOC_INVAL);
3779
3780 value = 0;
3781 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3782
3783 lbpp[value++] = lb_normal;
3784 if (hw->phy.media_type == e1000_media_type_copper)
3785 lbpp[value++] = lb_phy;
3786 else
3787 lbpp[value++] = lb_serdes;
3788 lbpp[value++] = lb_external;
3789 break;
3790
3791 case LB_GET_MODE:
3792 size = sizeof (uint32_t);
3793 if (iocp->ioc_count != size)
3794 return (IOC_INVAL);
3795
3796 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3797 *lbmp = igb->loopback_mode;
3798 break;
3799
3800 case LB_SET_MODE:
3801 size = 0;
3802 if (iocp->ioc_count != sizeof (uint32_t))
3803 return (IOC_INVAL);
3804
3805 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3806 if (!igb_set_loopback_mode(igb, *lbmp))
3807 return (IOC_INVAL);
3808 break;
3809 }
3810
3811 iocp->ioc_count = size;
3812 iocp->ioc_error = 0;
3813
3814 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
3815 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
3816 return (IOC_INVAL);
3817 }
3818
3819 return (IOC_REPLY);
3820 }
3821
3822 /*
3823 * igb_set_loopback_mode - Setup loopback based on the loopback mode
3824 */
3825 static boolean_t
igb_set_loopback_mode(igb_t * igb,uint32_t mode)3826 igb_set_loopback_mode(igb_t *igb, uint32_t mode)
3827 {
3828 struct e1000_hw *hw;
3829 int i;
3830
3831 if (mode == igb->loopback_mode)
3832 return (B_TRUE);
3833
3834 hw = &igb->hw;
3835
3836 igb->loopback_mode = mode;
3837
3838 if (mode == IGB_LB_NONE) {
3839 /* Reset the chip */
3840 hw->phy.autoneg_wait_to_complete = B_TRUE;
3841 (void) igb_reset(igb);
3842 hw->phy.autoneg_wait_to_complete = B_FALSE;
3843 return (B_TRUE);
3844 }
3845
3846 mutex_enter(&igb->gen_lock);
3847
3848 switch (mode) {
3849 default:
3850 mutex_exit(&igb->gen_lock);
3851 return (B_FALSE);
3852
3853 case IGB_LB_EXTERNAL:
3854 igb_set_external_loopback(igb);
3855 break;
3856
3857 case IGB_LB_INTERNAL_PHY:
3858 igb_set_internal_phy_loopback(igb);
3859 break;
3860
3861 case IGB_LB_INTERNAL_SERDES:
3862 igb_set_internal_serdes_loopback(igb);
3863 break;
3864 }
3865
3866 mutex_exit(&igb->gen_lock);
3867
3868 /*
3869 * When external loopback is set, wait up to 1000ms to get the link up.
3870 * According to test, 1000ms can work and it's an experimental value.
3871 */
3872 if (mode == IGB_LB_EXTERNAL) {
3873 for (i = 0; i <= 10; i++) {
3874 mutex_enter(&igb->gen_lock);
3875 (void) igb_link_check(igb);
3876 mutex_exit(&igb->gen_lock);
3877
3878 if (igb->link_state == LINK_STATE_UP)
3879 break;
3880
3881 msec_delay(100);
3882 }
3883
3884 if (igb->link_state != LINK_STATE_UP) {
3885 /*
3886 * Does not support external loopback.
3887 * Reset driver to loopback none.
3888 */
3889 igb->loopback_mode = IGB_LB_NONE;
3890
3891 /* Reset the chip */
3892 hw->phy.autoneg_wait_to_complete = B_TRUE;
3893 (void) igb_reset(igb);
3894 hw->phy.autoneg_wait_to_complete = B_FALSE;
3895
3896 igb_log(igb, IGB_LOG_INFO, "Set external loopback "
3897 "failed, reset to loopback none.");
3898
3899 return (B_FALSE);
3900 }
3901 }
3902
3903 return (B_TRUE);
3904 }
3905
3906 /*
3907 * igb_set_external_loopback - Set the external loopback mode
3908 */
3909 static void
igb_set_external_loopback(igb_t * igb)3910 igb_set_external_loopback(igb_t *igb)
3911 {
3912 struct e1000_hw *hw;
3913 uint32_t ctrl_ext;
3914
3915 hw = &igb->hw;
3916
3917 /* Set link mode to PHY (00b) in the Extended Control register */
3918 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3919 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
3920 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3921
3922 (void) e1000_write_phy_reg(hw, 0x0, 0x0140);
3923 (void) e1000_write_phy_reg(hw, 0x9, 0x1a00);
3924 (void) e1000_write_phy_reg(hw, 0x12, 0x1610);
3925 (void) e1000_write_phy_reg(hw, 0x1f37, 0x3f1c);
3926 }
3927
3928 /*
3929 * igb_set_internal_phy_loopback - Set the internal PHY loopback mode
3930 */
3931 static void
igb_set_internal_phy_loopback(igb_t * igb)3932 igb_set_internal_phy_loopback(igb_t *igb)
3933 {
3934 struct e1000_hw *hw;
3935 uint32_t ctrl_ext;
3936 uint16_t phy_ctrl;
3937 uint16_t phy_pconf;
3938
3939 hw = &igb->hw;
3940
3941 /* Set link mode to PHY (00b) in the Extended Control register */
3942 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3943 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
3944 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3945
3946 /*
3947 * Set PHY control register (0x4140):
3948 * Set full duplex mode
3949 * Set loopback bit
3950 * Clear auto-neg enable bit
3951 * Set PHY speed
3952 */
3953 phy_ctrl = MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000 | MII_CR_LOOPBACK;
3954 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
3955
3956 /* Set the link disable bit in the Port Configuration register */
3957 (void) e1000_read_phy_reg(hw, 0x10, &phy_pconf);
3958 phy_pconf |= (uint16_t)1 << 14;
3959 (void) e1000_write_phy_reg(hw, 0x10, phy_pconf);
3960 }
3961
3962 /*
3963 * igb_set_internal_serdes_loopback - Set the internal SerDes loopback mode
3964 */
3965 static void
igb_set_internal_serdes_loopback(igb_t * igb)3966 igb_set_internal_serdes_loopback(igb_t *igb)
3967 {
3968 struct e1000_hw *hw;
3969 uint32_t ctrl_ext;
3970 uint32_t ctrl;
3971 uint32_t pcs_lctl;
3972 uint32_t connsw;
3973
3974 hw = &igb->hw;
3975
3976 /* Set link mode to SerDes (11b) in the Extended Control register */
3977 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3978 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
3979 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3980
3981 /* Configure the SerDes to loopback */
3982 E1000_WRITE_REG(hw, E1000_SCTL, 0x410);
3983
3984 /* Set Device Control register */
3985 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3986 ctrl |= (E1000_CTRL_FD | /* Force full duplex */
3987 E1000_CTRL_SLU); /* Force link up */
3988 ctrl &= ~(E1000_CTRL_RFCE | /* Disable receive flow control */
3989 E1000_CTRL_TFCE | /* Disable transmit flow control */
3990 E1000_CTRL_LRST); /* Clear link reset */
3991 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3992
3993 /* Set PCS Link Control register */
3994 pcs_lctl = E1000_READ_REG(hw, E1000_PCS_LCTL);
3995 pcs_lctl |= (E1000_PCS_LCTL_FORCE_LINK |
3996 E1000_PCS_LCTL_FSD |
3997 E1000_PCS_LCTL_FDV_FULL |
3998 E1000_PCS_LCTL_FLV_LINK_UP);
3999 pcs_lctl &= ~E1000_PCS_LCTL_AN_ENABLE;
4000 E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_lctl);
4001
4002 /* Set the Copper/Fiber Switch Control - CONNSW register */
4003 connsw = E1000_READ_REG(hw, E1000_CONNSW);
4004 connsw &= ~E1000_CONNSW_ENRGSRC;
4005 E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
4006 }
4007
4008 #pragma inline(igb_intr_rx_work)
4009 /*
4010 * igb_intr_rx_work - rx processing of ISR
4011 */
4012 static void
igb_intr_rx_work(igb_rx_ring_t * rx_ring)4013 igb_intr_rx_work(igb_rx_ring_t *rx_ring)
4014 {
4015 mblk_t *mp;
4016
4017 mutex_enter(&rx_ring->rx_lock);
4018 mp = igb_rx(rx_ring, IGB_NO_POLL);
4019 mutex_exit(&rx_ring->rx_lock);
4020
4021 if (mp != NULL)
4022 mac_rx_ring(rx_ring->igb->mac_hdl, rx_ring->ring_handle, mp,
4023 rx_ring->ring_gen_num);
4024 }
4025
4026 #pragma inline(igb_intr_tx_work)
4027 /*
4028 * igb_intr_tx_work - tx processing of ISR
4029 */
4030 static void
igb_intr_tx_work(igb_tx_ring_t * tx_ring)4031 igb_intr_tx_work(igb_tx_ring_t *tx_ring)
4032 {
4033 igb_t *igb = tx_ring->igb;
4034
4035 /* Recycle the tx descriptors */
4036 tx_ring->tx_recycle(tx_ring);
4037
4038 /* Schedule the re-transmit */
4039 if (tx_ring->reschedule &&
4040 (tx_ring->tbd_free >= igb->tx_resched_thresh)) {
4041 tx_ring->reschedule = B_FALSE;
4042 mac_tx_ring_update(tx_ring->igb->mac_hdl, tx_ring->ring_handle);
4043 IGB_DEBUG_STAT(tx_ring->stat_reschedule);
4044 }
4045 }
4046
4047 #pragma inline(igb_intr_link_work)
4048 /*
4049 * igb_intr_link_work - link-status-change processing of ISR
4050 */
4051 static void
igb_intr_link_work(igb_t * igb)4052 igb_intr_link_work(igb_t *igb)
4053 {
4054 boolean_t link_changed;
4055
4056 igb_stop_watchdog_timer(igb);
4057
4058 mutex_enter(&igb->gen_lock);
4059
4060 /*
4061 * Because we got a link-status-change interrupt, force
4062 * e1000_check_for_link() to look at phy
4063 */
4064 igb->hw.mac.get_link_status = B_TRUE;
4065
4066 /* igb_link_check takes care of link status change */
4067 link_changed = igb_link_check(igb);
4068
4069 /* Get new phy state */
4070 igb_get_phy_state(igb);
4071
4072 mutex_exit(&igb->gen_lock);
4073
4074 if (link_changed)
4075 mac_link_update(igb->mac_hdl, igb->link_state);
4076
4077 igb_start_watchdog_timer(igb);
4078 }
4079
4080 /*
4081 * igb_intr_legacy - Interrupt handler for legacy interrupts
4082 */
4083 static uint_t
igb_intr_legacy(void * arg1,void * arg2)4084 igb_intr_legacy(void *arg1, void *arg2)
4085 {
4086 igb_t *igb = (igb_t *)arg1;
4087 igb_tx_ring_t *tx_ring;
4088 uint32_t icr;
4089 mblk_t *mp;
4090 boolean_t tx_reschedule;
4091 boolean_t link_changed;
4092 uint_t result;
4093
4094 _NOTE(ARGUNUSED(arg2));
4095
4096 mutex_enter(&igb->gen_lock);
4097
4098 if (igb->igb_state & IGB_SUSPENDED) {
4099 mutex_exit(&igb->gen_lock);
4100 return (DDI_INTR_UNCLAIMED);
4101 }
4102
4103 mp = NULL;
4104 tx_reschedule = B_FALSE;
4105 link_changed = B_FALSE;
4106 icr = E1000_READ_REG(&igb->hw, E1000_ICR);
4107
4108 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
4109 mutex_exit(&igb->gen_lock);
4110 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
4111 atomic_or_32(&igb->igb_state, IGB_ERROR);
4112 return (DDI_INTR_UNCLAIMED);
4113 }
4114
4115 if (icr & E1000_ICR_INT_ASSERTED) {
4116 /*
4117 * E1000_ICR_INT_ASSERTED bit was set:
4118 * Read(Clear) the ICR, claim this interrupt,
4119 * look for work to do.
4120 */
4121 ASSERT(igb->num_rx_rings == 1);
4122 ASSERT(igb->num_tx_rings == 1);
4123
4124 /* Make sure all interrupt causes cleared */
4125 (void) E1000_READ_REG(&igb->hw, E1000_EICR);
4126
4127 if (icr & E1000_ICR_RXT0) {
4128 mp = igb_rx(&igb->rx_rings[0], IGB_NO_POLL);
4129 }
4130
4131 if (icr & E1000_ICR_TXDW) {
4132 tx_ring = &igb->tx_rings[0];
4133
4134 /* Recycle the tx descriptors */
4135 tx_ring->tx_recycle(tx_ring);
4136
4137 /* Schedule the re-transmit */
4138 tx_reschedule = (tx_ring->reschedule &&
4139 (tx_ring->tbd_free >= igb->tx_resched_thresh));
4140 }
4141
4142 if (icr & E1000_ICR_LSC) {
4143 /*
4144 * Because we got a link-status-change interrupt, force
4145 * e1000_check_for_link() to look at phy
4146 */
4147 igb->hw.mac.get_link_status = B_TRUE;
4148
4149 /* igb_link_check takes care of link status change */
4150 link_changed = igb_link_check(igb);
4151
4152 /* Get new phy state */
4153 igb_get_phy_state(igb);
4154 }
4155
4156 if (icr & E1000_ICR_DRSTA) {
4157 /* 82580 Full Device Reset needed */
4158 atomic_or_32(&igb->igb_state, IGB_STALL);
4159 }
4160
4161 result = DDI_INTR_CLAIMED;
4162 } else {
4163 /*
4164 * E1000_ICR_INT_ASSERTED bit was not set:
4165 * Don't claim this interrupt.
4166 */
4167 result = DDI_INTR_UNCLAIMED;
4168 }
4169
4170 mutex_exit(&igb->gen_lock);
4171
4172 /*
4173 * Do the following work outside of the gen_lock
4174 */
4175 if (mp != NULL)
4176 mac_rx(igb->mac_hdl, NULL, mp);
4177
4178 if (tx_reschedule) {
4179 tx_ring->reschedule = B_FALSE;
4180 mac_tx_ring_update(igb->mac_hdl, tx_ring->ring_handle);
4181 IGB_DEBUG_STAT(tx_ring->stat_reschedule);
4182 }
4183
4184 if (link_changed)
4185 mac_link_update(igb->mac_hdl, igb->link_state);
4186
4187 return (result);
4188 }
4189
4190 /*
4191 * igb_intr_msi - Interrupt handler for MSI
4192 */
4193 static uint_t
igb_intr_msi(void * arg1,void * arg2)4194 igb_intr_msi(void *arg1, void *arg2)
4195 {
4196 igb_t *igb = (igb_t *)arg1;
4197 uint32_t icr;
4198
4199 _NOTE(ARGUNUSED(arg2));
4200
4201 icr = E1000_READ_REG(&igb->hw, E1000_ICR);
4202
4203 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
4204 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
4205 atomic_or_32(&igb->igb_state, IGB_ERROR);
4206 return (DDI_INTR_CLAIMED);
4207 }
4208
4209 /* Make sure all interrupt causes cleared */
4210 (void) E1000_READ_REG(&igb->hw, E1000_EICR);
4211
4212 /*
4213 * For MSI interrupt, we have only one vector,
4214 * so we have only one rx ring and one tx ring enabled.
4215 */
4216 ASSERT(igb->num_rx_rings == 1);
4217 ASSERT(igb->num_tx_rings == 1);
4218
4219 if (icr & E1000_ICR_RXT0) {
4220 igb_intr_rx_work(&igb->rx_rings[0]);
4221 }
4222
4223 if (icr & E1000_ICR_TXDW) {
4224 igb_intr_tx_work(&igb->tx_rings[0]);
4225 }
4226
4227 if (icr & E1000_ICR_LSC) {
4228 igb_intr_link_work(igb);
4229 }
4230
4231 if (icr & E1000_ICR_DRSTA) {
4232 /* 82580 Full Device Reset needed */
4233 atomic_or_32(&igb->igb_state, IGB_STALL);
4234 }
4235
4236 return (DDI_INTR_CLAIMED);
4237 }
4238
4239 /*
4240 * igb_intr_rx - Interrupt handler for rx
4241 */
4242 static uint_t
igb_intr_rx(void * arg1,void * arg2)4243 igb_intr_rx(void *arg1, void *arg2)
4244 {
4245 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)arg1;
4246
4247 _NOTE(ARGUNUSED(arg2));
4248
4249 /*
4250 * Only used via MSI-X vector so don't check cause bits
4251 * and only clean the given ring.
4252 */
4253 igb_intr_rx_work(rx_ring);
4254
4255 return (DDI_INTR_CLAIMED);
4256 }
4257
4258 /*
4259 * igb_intr_tx - Interrupt handler for tx
4260 */
4261 static uint_t
igb_intr_tx(void * arg1,void * arg2)4262 igb_intr_tx(void *arg1, void *arg2)
4263 {
4264 igb_tx_ring_t *tx_ring = (igb_tx_ring_t *)arg1;
4265
4266 _NOTE(ARGUNUSED(arg2));
4267
4268 /*
4269 * Only used via MSI-X vector so don't check cause bits
4270 * and only clean the given ring.
4271 */
4272 igb_intr_tx_work(tx_ring);
4273
4274 return (DDI_INTR_CLAIMED);
4275 }
4276
4277 /*
4278 * igb_intr_tx_other - Interrupt handler for both tx and other
4279 *
4280 */
4281 static uint_t
igb_intr_tx_other(void * arg1,void * arg2)4282 igb_intr_tx_other(void *arg1, void *arg2)
4283 {
4284 igb_t *igb = (igb_t *)arg1;
4285 uint32_t icr;
4286
4287 _NOTE(ARGUNUSED(arg2));
4288
4289 icr = E1000_READ_REG(&igb->hw, E1000_ICR);
4290
4291 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
4292 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
4293 atomic_or_32(&igb->igb_state, IGB_ERROR);
4294 return (DDI_INTR_CLAIMED);
4295 }
4296
4297 /*
4298 * Look for tx reclaiming work first. Remember, in the
4299 * case of only interrupt sharing, only one tx ring is
4300 * used
4301 */
4302 igb_intr_tx_work(&igb->tx_rings[0]);
4303
4304 /*
4305 * Check for "other" causes.
4306 */
4307 if (icr & E1000_ICR_LSC) {
4308 igb_intr_link_work(igb);
4309 }
4310
4311 /*
4312 * The DOUTSYNC bit indicates a tx packet dropped because
4313 * DMA engine gets "out of sync". There isn't a real fix
4314 * for this. The Intel recommendation is to count the number
4315 * of occurrences so user can detect when it is happening.
4316 * The issue is non-fatal and there's no recovery action
4317 * available.
4318 */
4319 if (icr & E1000_ICR_DOUTSYNC) {
4320 IGB_STAT(igb->dout_sync);
4321 }
4322
4323 if (icr & E1000_ICR_DRSTA) {
4324 /* 82580 Full Device Reset needed */
4325 atomic_or_32(&igb->igb_state, IGB_STALL);
4326 }
4327
4328 return (DDI_INTR_CLAIMED);
4329 }
4330
4331 /*
4332 * igb_alloc_intrs - Allocate interrupts for the driver
4333 *
4334 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4335 * if not successful, try Legacy.
4336 * igb->intr_force can be used to force sequence to start with
4337 * any of the 3 types.
4338 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4339 */
4340 static int
igb_alloc_intrs(igb_t * igb)4341 igb_alloc_intrs(igb_t *igb)
4342 {
4343 dev_info_t *devinfo;
4344 int intr_types;
4345 int rc;
4346
4347 devinfo = igb->dip;
4348
4349 /* Get supported interrupt types */
4350 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4351
4352 if (rc != DDI_SUCCESS) {
4353 igb_log(igb, IGB_LOG_ERROR,
4354 "Get supported interrupt types failed: %d", rc);
4355 return (IGB_FAILURE);
4356 }
4357 igb_log(igb, IGB_LOG_INFO, "Supported interrupt types: %x",
4358 intr_types);
4359
4360 igb->intr_type = 0;
4361
4362 /* Install MSI-X interrupts */
4363 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4364 (igb->intr_force <= IGB_INTR_MSIX)) {
4365 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_MSIX);
4366
4367 if (rc == IGB_SUCCESS)
4368 return (IGB_SUCCESS);
4369
4370 igb_log(igb, IGB_LOG_INFO,
4371 "Allocate MSI-X failed, trying MSI interrupts...");
4372 }
4373
4374 /* MSI-X not used, force rings to 1 */
4375 igb->num_rx_rings = 1;
4376 igb->num_tx_rings = 1;
4377 igb_log(igb, IGB_LOG_INFO,
4378 "MSI-X not used, force rx and tx queue number to 1");
4379
4380 /* Install MSI interrupts */
4381 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4382 (igb->intr_force <= IGB_INTR_MSI)) {
4383 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_MSI);
4384
4385 if (rc == IGB_SUCCESS)
4386 return (IGB_SUCCESS);
4387
4388 igb_log(igb, IGB_LOG_INFO,
4389 "Allocate MSI failed, trying Legacy interrupts...");
4390 }
4391
4392 /* Install legacy interrupts */
4393 if (intr_types & DDI_INTR_TYPE_FIXED) {
4394 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_FIXED);
4395
4396 if (rc == IGB_SUCCESS)
4397 return (IGB_SUCCESS);
4398
4399 igb_log(igb, IGB_LOG_INFO,
4400 "Allocate Legacy interrupts failed");
4401 }
4402
4403 /* If none of the 3 types succeeded, return failure */
4404 return (IGB_FAILURE);
4405 }
4406
4407 /*
4408 * igb_alloc_intr_handles - Allocate interrupt handles.
4409 *
4410 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4411 * if fewer than 2 handles are available, return failure.
4412 * Upon success, this sets the number of Rx rings to a number that
4413 * matches the handles available for Rx interrupts.
4414 */
4415 static int
igb_alloc_intr_handles(igb_t * igb,int intr_type)4416 igb_alloc_intr_handles(igb_t *igb, int intr_type)
4417 {
4418 dev_info_t *devinfo;
4419 int orig, request, count, avail, actual;
4420 int diff, minimum;
4421 int rc;
4422
4423 devinfo = igb->dip;
4424
4425 switch (intr_type) {
4426 case DDI_INTR_TYPE_FIXED:
4427 request = 1; /* Request 1 legacy interrupt handle */
4428 minimum = 1;
4429 igb_log(igb, IGB_LOG_INFO, "interrupt type: legacy");
4430 break;
4431
4432 case DDI_INTR_TYPE_MSI:
4433 request = 1; /* Request 1 MSI interrupt handle */
4434 minimum = 1;
4435 igb_log(igb, IGB_LOG_INFO, "interrupt type: MSI");
4436 break;
4437
4438 case DDI_INTR_TYPE_MSIX:
4439 /*
4440 * Number of vectors for the adapter is
4441 * # rx rings + # tx rings
4442 * One of tx vectors is for tx & other
4443 */
4444 request = igb->num_rx_rings + igb->num_tx_rings;
4445 orig = request;
4446 minimum = 2;
4447 igb_log(igb, IGB_LOG_INFO, "interrupt type: MSI-X");
4448 break;
4449
4450 default:
4451 igb_log(igb, IGB_LOG_INFO,
4452 "invalid call to igb_alloc_intr_handles(): %d\n",
4453 intr_type);
4454 return (IGB_FAILURE);
4455 }
4456 igb_log(igb, IGB_LOG_INFO,
4457 "interrupt handles requested: %d minimum: %d",
4458 request, minimum);
4459
4460 /*
4461 * Get number of supported interrupts
4462 */
4463 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4464 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4465 igb_log(igb, IGB_LOG_INFO,
4466 "Get supported interrupt number failed. "
4467 "Return: %d, count: %d", rc, count);
4468 return (IGB_FAILURE);
4469 }
4470 igb_log(igb, IGB_LOG_INFO, "interrupts supported: %d", count);
4471
4472 /*
4473 * Get number of available interrupts
4474 */
4475 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4476 if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4477 igb_log(igb, IGB_LOG_INFO,
4478 "Get available interrupt number failed. "
4479 "Return: %d, available: %d", rc, avail);
4480 return (IGB_FAILURE);
4481 }
4482 igb_log(igb, IGB_LOG_INFO, "interrupts available: %d", avail);
4483
4484 if (avail < request) {
4485 igb_log(igb, IGB_LOG_INFO,
4486 "Request %d handles, %d available",
4487 request, avail);
4488 request = avail;
4489 }
4490
4491 actual = 0;
4492 igb->intr_cnt = 0;
4493
4494 /*
4495 * Allocate an array of interrupt handles
4496 */
4497 igb->intr_size = request * sizeof (ddi_intr_handle_t);
4498 igb->htable = kmem_alloc(igb->intr_size, KM_SLEEP);
4499
4500 rc = ddi_intr_alloc(devinfo, igb->htable, intr_type, 0,
4501 request, &actual, DDI_INTR_ALLOC_NORMAL);
4502 if (rc != DDI_SUCCESS) {
4503 igb_log(igb, IGB_LOG_INFO, "Allocate interrupts failed. "
4504 "return: %d, request: %d, actual: %d",
4505 rc, request, actual);
4506 goto alloc_handle_fail;
4507 }
4508 igb_log(igb, IGB_LOG_INFO, "interrupts actually allocated: %d", actual);
4509
4510 igb->intr_cnt = actual;
4511
4512 if (actual < minimum) {
4513 igb_log(igb, IGB_LOG_INFO,
4514 "Insufficient interrupt handles allocated: %d",
4515 actual);
4516 goto alloc_handle_fail;
4517 }
4518
4519 /*
4520 * For MSI-X, actual might force us to reduce number of tx & rx rings
4521 */
4522 if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4523 diff = orig - actual;
4524 if (diff < igb->num_tx_rings) {
4525 igb_log(igb, IGB_LOG_INFO,
4526 "MSI-X vectors force Tx queue number to %d",
4527 igb->num_tx_rings - diff);
4528 igb->num_tx_rings -= diff;
4529 } else {
4530 igb_log(igb, IGB_LOG_INFO,
4531 "MSI-X vectors force Tx queue number to 1");
4532 igb->num_tx_rings = 1;
4533
4534 igb_log(igb, IGB_LOG_INFO,
4535 "MSI-X vectors force Rx queue number to %d",
4536 actual - 1);
4537 igb->num_rx_rings = actual - 1;
4538 }
4539 }
4540
4541 /*
4542 * Get priority for first vector, assume remaining are all the same
4543 */
4544 rc = ddi_intr_get_pri(igb->htable[0], &igb->intr_pri);
4545 if (rc != DDI_SUCCESS) {
4546 igb_log(igb, IGB_LOG_INFO,
4547 "Get interrupt priority failed: %d", rc);
4548 goto alloc_handle_fail;
4549 }
4550
4551 rc = ddi_intr_get_cap(igb->htable[0], &igb->intr_cap);
4552 if (rc != DDI_SUCCESS) {
4553 igb_log(igb, IGB_LOG_INFO,
4554 "Get interrupt cap failed: %d", rc);
4555 goto alloc_handle_fail;
4556 }
4557
4558 igb->intr_type = intr_type;
4559
4560 return (IGB_SUCCESS);
4561
4562 alloc_handle_fail:
4563 igb_rem_intrs(igb);
4564
4565 return (IGB_FAILURE);
4566 }
4567
4568 /*
4569 * igb_add_intr_handlers - Add interrupt handlers based on the interrupt type
4570 *
4571 * Before adding the interrupt handlers, the interrupt vectors have
4572 * been allocated, and the rx/tx rings have also been allocated.
4573 */
4574 static int
igb_add_intr_handlers(igb_t * igb)4575 igb_add_intr_handlers(igb_t *igb)
4576 {
4577 igb_rx_ring_t *rx_ring;
4578 igb_tx_ring_t *tx_ring;
4579 int vector;
4580 int rc;
4581 int i;
4582
4583 vector = 0;
4584
4585 switch (igb->intr_type) {
4586 case DDI_INTR_TYPE_MSIX:
4587 /* Add interrupt handler for tx + other */
4588 tx_ring = &igb->tx_rings[0];
4589 rc = ddi_intr_add_handler(igb->htable[vector],
4590 (ddi_intr_handler_t *)igb_intr_tx_other,
4591 (void *)igb, NULL);
4592
4593 if (rc != DDI_SUCCESS) {
4594 igb_log(igb, IGB_LOG_INFO,
4595 "Add tx/other interrupt handler failed: %d", rc);
4596 return (IGB_FAILURE);
4597 }
4598 tx_ring->intr_vector = vector;
4599 vector++;
4600
4601 /* Add interrupt handler for each rx ring */
4602 for (i = 0; i < igb->num_rx_rings; i++) {
4603 rx_ring = &igb->rx_rings[i];
4604
4605 rc = ddi_intr_add_handler(igb->htable[vector],
4606 (ddi_intr_handler_t *)igb_intr_rx,
4607 (void *)rx_ring, NULL);
4608
4609 if (rc != DDI_SUCCESS) {
4610 igb_log(igb, IGB_LOG_INFO,
4611 "Add rx interrupt handler failed. "
4612 "return: %d, rx ring: %d", rc, i);
4613 for (vector--; vector >= 0; vector--) {
4614 (void) ddi_intr_remove_handler(
4615 igb->htable[vector]);
4616 }
4617 return (IGB_FAILURE);
4618 }
4619
4620 rx_ring->intr_vector = vector;
4621
4622 vector++;
4623 }
4624
4625 /* Add interrupt handler for each tx ring from 2nd ring */
4626 for (i = 1; i < igb->num_tx_rings; i++) {
4627 tx_ring = &igb->tx_rings[i];
4628
4629 rc = ddi_intr_add_handler(igb->htable[vector],
4630 (ddi_intr_handler_t *)igb_intr_tx,
4631 (void *)tx_ring, NULL);
4632
4633 if (rc != DDI_SUCCESS) {
4634 igb_log(igb, IGB_LOG_INFO,
4635 "Add tx interrupt handler failed. "
4636 "return: %d, tx ring: %d", rc, i);
4637 for (vector--; vector >= 0; vector--) {
4638 (void) ddi_intr_remove_handler(
4639 igb->htable[vector]);
4640 }
4641 return (IGB_FAILURE);
4642 }
4643
4644 tx_ring->intr_vector = vector;
4645
4646 vector++;
4647 }
4648
4649 break;
4650
4651 case DDI_INTR_TYPE_MSI:
4652 /* Add interrupt handlers for the only vector */
4653 rc = ddi_intr_add_handler(igb->htable[vector],
4654 (ddi_intr_handler_t *)igb_intr_msi,
4655 (void *)igb, NULL);
4656
4657 if (rc != DDI_SUCCESS) {
4658 igb_log(igb, IGB_LOG_INFO,
4659 "Add MSI interrupt handler failed: %d", rc);
4660 return (IGB_FAILURE);
4661 }
4662
4663 rx_ring = &igb->rx_rings[0];
4664 rx_ring->intr_vector = vector;
4665
4666 vector++;
4667 break;
4668
4669 case DDI_INTR_TYPE_FIXED:
4670 /* Add interrupt handlers for the only vector */
4671 rc = ddi_intr_add_handler(igb->htable[vector],
4672 (ddi_intr_handler_t *)igb_intr_legacy,
4673 (void *)igb, NULL);
4674
4675 if (rc != DDI_SUCCESS) {
4676 igb_log(igb, IGB_LOG_INFO,
4677 "Add legacy interrupt handler failed: %d", rc);
4678 return (IGB_FAILURE);
4679 }
4680
4681 rx_ring = &igb->rx_rings[0];
4682 rx_ring->intr_vector = vector;
4683
4684 vector++;
4685 break;
4686
4687 default:
4688 return (IGB_FAILURE);
4689 }
4690
4691 ASSERT(vector == igb->intr_cnt);
4692
4693 return (IGB_SUCCESS);
4694 }
4695
4696 /*
4697 * igb_setup_msix_82575 - setup 82575 adapter to use MSI-X interrupts
4698 *
4699 * For each vector enabled on the adapter, Set the MSIXBM register accordingly
4700 */
4701 static void
igb_setup_msix_82575(igb_t * igb)4702 igb_setup_msix_82575(igb_t *igb)
4703 {
4704 uint32_t eims = 0;
4705 int i, vector;
4706 struct e1000_hw *hw = &igb->hw;
4707
4708 /*
4709 * Set vector for tx ring 0 and other causes.
4710 * NOTE assumption that it is vector 0.
4711 */
4712 vector = 0;
4713
4714 igb->eims_mask = E1000_EICR_TX_QUEUE0 | E1000_EICR_OTHER;
4715 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), igb->eims_mask);
4716 vector++;
4717
4718 for (i = 0; i < igb->num_rx_rings; i++) {
4719 /*
4720 * Set vector for each rx ring
4721 */
4722 eims = (E1000_EICR_RX_QUEUE0 << i);
4723 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), eims);
4724
4725 /*
4726 * Accumulate bits to enable in
4727 * igb_enable_adapter_interrupts_82575()
4728 */
4729 igb->eims_mask |= eims;
4730
4731 vector++;
4732 }
4733
4734 for (i = 1; i < igb->num_tx_rings; i++) {
4735 /*
4736 * Set vector for each tx ring from 2nd tx ring
4737 */
4738 eims = (E1000_EICR_TX_QUEUE0 << i);
4739 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), eims);
4740
4741 /*
4742 * Accumulate bits to enable in
4743 * igb_enable_adapter_interrupts_82575()
4744 */
4745 igb->eims_mask |= eims;
4746
4747 vector++;
4748 }
4749
4750 ASSERT(vector == igb->intr_cnt);
4751
4752 /*
4753 * Disable IAM for ICR interrupt bits
4754 */
4755 E1000_WRITE_REG(hw, E1000_IAM, 0);
4756 E1000_WRITE_FLUSH(hw);
4757 }
4758
4759 /*
4760 * igb_setup_msix_82576 - setup 82576 adapter to use MSI-X interrupts
4761 *
4762 * 82576 uses a table based method for assigning vectors. Each queue has a
4763 * single entry in the table to which we write a vector number along with a
4764 * "valid" bit. The entry is a single byte in a 4-byte register. Vectors
4765 * take a different position in the 4-byte register depending on whether
4766 * they are numbered above or below 8.
4767 */
4768 static void
igb_setup_msix_82576(igb_t * igb)4769 igb_setup_msix_82576(igb_t *igb)
4770 {
4771 struct e1000_hw *hw = &igb->hw;
4772 uint32_t ivar, index, vector;
4773 int i;
4774
4775 /* must enable msi-x capability before IVAR settings */
4776 E1000_WRITE_REG(hw, E1000_GPIE,
4777 (E1000_GPIE_MSIX_MODE | E1000_GPIE_PBA | E1000_GPIE_NSICR));
4778
4779 /*
4780 * Set vector for tx ring 0 and other causes.
4781 * NOTE assumption that it is vector 0.
4782 * This is also interdependent with installation of interrupt service
4783 * routines in igb_add_intr_handlers().
4784 */
4785
4786 /* assign "other" causes to vector 0 */
4787 vector = 0;
4788 ivar = ((vector | E1000_IVAR_VALID) << 8);
4789 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
4790
4791 /* assign tx ring 0 to vector 0 */
4792 ivar = ((vector | E1000_IVAR_VALID) << 8);
4793 E1000_WRITE_REG(hw, E1000_IVAR0, ivar);
4794
4795 /* prepare to enable tx & other interrupt causes */
4796 igb->eims_mask = (1 << vector);
4797
4798 vector ++;
4799 for (i = 0; i < igb->num_rx_rings; i++) {
4800 /*
4801 * Set vector for each rx ring
4802 */
4803 index = (i & 0x7);
4804 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4805
4806 if (i < 8) {
4807 /* vector goes into low byte of register */
4808 ivar = ivar & 0xFFFFFF00;
4809 ivar |= (vector | E1000_IVAR_VALID);
4810 } else {
4811 /* vector goes into third byte of register */
4812 ivar = ivar & 0xFF00FFFF;
4813 ivar |= ((vector | E1000_IVAR_VALID) << 16);
4814 }
4815 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
4816
4817 /* Accumulate interrupt-cause bits to enable */
4818 igb->eims_mask |= (1 << vector);
4819
4820 vector ++;
4821 }
4822
4823 for (i = 1; i < igb->num_tx_rings; i++) {
4824 /*
4825 * Set vector for each tx ring from 2nd tx ring.
4826 * Note assumption that tx vectors numericall follow rx vectors.
4827 */
4828 index = (i & 0x7);
4829 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4830
4831 if (i < 8) {
4832 /* vector goes into second byte of register */
4833 ivar = ivar & 0xFFFF00FF;
4834 ivar |= ((vector | E1000_IVAR_VALID) << 8);
4835 } else {
4836 /* vector goes into fourth byte of register */
4837 ivar = ivar & 0x00FFFFFF;
4838 ivar |= (vector | E1000_IVAR_VALID) << 24;
4839 }
4840 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
4841
4842 /* Accumulate interrupt-cause bits to enable */
4843 igb->eims_mask |= (1 << vector);
4844
4845 vector ++;
4846 }
4847
4848 ASSERT(vector == igb->intr_cnt);
4849 }
4850
4851 /*
4852 * igb_setup_msix_82580 - setup 82580 adapter to use MSI-X interrupts
4853 *
4854 * 82580 uses same table approach at 82576 but has fewer entries. Each
4855 * queue has a single entry in the table to which we write a vector number
4856 * along with a "valid" bit. Vectors take a different position in the
4857 * register depending on * whether * they are numbered above or below 4.
4858 */
4859 static void
igb_setup_msix_82580(igb_t * igb)4860 igb_setup_msix_82580(igb_t *igb)
4861 {
4862 struct e1000_hw *hw = &igb->hw;
4863 uint32_t ivar, index, vector;
4864 int i;
4865
4866 /* must enable msi-x capability before IVAR settings */
4867 E1000_WRITE_REG(hw, E1000_GPIE, (E1000_GPIE_MSIX_MODE |
4868 E1000_GPIE_PBA | E1000_GPIE_NSICR | E1000_GPIE_EIAME));
4869 /*
4870 * Set vector for tx ring 0 and other causes.
4871 * NOTE assumption that it is vector 0.
4872 * This is also interdependent with installation of interrupt service
4873 * routines in igb_add_intr_handlers().
4874 */
4875
4876 /* assign "other" causes to vector 0 */
4877 vector = 0;
4878 ivar = ((vector | E1000_IVAR_VALID) << 8);
4879 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
4880
4881 /* assign tx ring 0 to vector 0 */
4882 ivar = ((vector | E1000_IVAR_VALID) << 8);
4883 E1000_WRITE_REG(hw, E1000_IVAR0, ivar);
4884
4885 /* prepare to enable tx & other interrupt causes */
4886 igb->eims_mask = (1 << vector);
4887
4888 vector ++;
4889
4890 for (i = 0; i < igb->num_rx_rings; i++) {
4891 /*
4892 * Set vector for each rx ring
4893 */
4894 index = (i >> 1);
4895 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4896
4897 if (i & 1) {
4898 /* vector goes into third byte of register */
4899 ivar = ivar & 0xFF00FFFF;
4900 ivar |= ((vector | E1000_IVAR_VALID) << 16);
4901 } else {
4902 /* vector goes into low byte of register */
4903 ivar = ivar & 0xFFFFFF00;
4904 ivar |= (vector | E1000_IVAR_VALID);
4905 }
4906 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
4907
4908 /* Accumulate interrupt-cause bits to enable */
4909 igb->eims_mask |= (1 << vector);
4910
4911 vector ++;
4912 }
4913
4914 for (i = 1; i < igb->num_tx_rings; i++) {
4915 /*
4916 * Set vector for each tx ring from 2nd tx ring.
4917 * Note assumption that tx vectors numericall follow rx vectors.
4918 */
4919 index = (i >> 1);
4920 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
4921
4922 if (i & 1) {
4923 /* vector goes into high byte of register */
4924 ivar = ivar & 0x00FFFFFF;
4925 ivar |= ((vector | E1000_IVAR_VALID) << 24);
4926 } else {
4927 /* vector goes into second byte of register */
4928 ivar = ivar & 0xFFFF00FF;
4929 ivar |= (vector | E1000_IVAR_VALID) << 8;
4930 }
4931 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
4932
4933 /* Accumulate interrupt-cause bits to enable */
4934 igb->eims_mask |= (1 << vector);
4935
4936 vector ++;
4937 }
4938 ASSERT(vector == igb->intr_cnt);
4939 }
4940
4941 /*
4942 * igb_rem_intr_handlers - remove the interrupt handlers
4943 */
4944 static void
igb_rem_intr_handlers(igb_t * igb)4945 igb_rem_intr_handlers(igb_t *igb)
4946 {
4947 int i;
4948 int rc;
4949
4950 for (i = 0; i < igb->intr_cnt; i++) {
4951 rc = ddi_intr_remove_handler(igb->htable[i]);
4952 if (rc != DDI_SUCCESS) {
4953 igb_log(igb, IGB_LOG_INFO,
4954 "Remove intr handler failed: %d", rc);
4955 }
4956 }
4957 }
4958
4959 /*
4960 * igb_rem_intrs - remove the allocated interrupts
4961 */
4962 static void
igb_rem_intrs(igb_t * igb)4963 igb_rem_intrs(igb_t *igb)
4964 {
4965 int i;
4966 int rc;
4967
4968 for (i = 0; i < igb->intr_cnt; i++) {
4969 rc = ddi_intr_free(igb->htable[i]);
4970 if (rc != DDI_SUCCESS) {
4971 igb_log(igb, IGB_LOG_INFO,
4972 "Free intr failed: %d", rc);
4973 }
4974 }
4975
4976 kmem_free(igb->htable, igb->intr_size);
4977 igb->htable = NULL;
4978 }
4979
4980 /*
4981 * igb_enable_intrs - enable all the ddi interrupts
4982 */
4983 static int
igb_enable_intrs(igb_t * igb)4984 igb_enable_intrs(igb_t *igb)
4985 {
4986 int i;
4987 int rc;
4988
4989 /* Enable interrupts */
4990 if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) {
4991 /* Call ddi_intr_block_enable() for MSI */
4992 rc = ddi_intr_block_enable(igb->htable, igb->intr_cnt);
4993 if (rc != DDI_SUCCESS) {
4994 igb_log(igb, IGB_LOG_ERROR,
4995 "Enable block intr failed: %d", rc);
4996 return (IGB_FAILURE);
4997 }
4998 } else {
4999 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
5000 for (i = 0; i < igb->intr_cnt; i++) {
5001 rc = ddi_intr_enable(igb->htable[i]);
5002 if (rc != DDI_SUCCESS) {
5003 igb_log(igb, IGB_LOG_ERROR,
5004 "Enable intr failed: %d", rc);
5005 return (IGB_FAILURE);
5006 }
5007 }
5008 }
5009
5010 return (IGB_SUCCESS);
5011 }
5012
5013 /*
5014 * igb_disable_intrs - disable all the ddi interrupts
5015 */
5016 static int
igb_disable_intrs(igb_t * igb)5017 igb_disable_intrs(igb_t *igb)
5018 {
5019 int i;
5020 int rc;
5021
5022 /* Disable all interrupts */
5023 if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) {
5024 rc = ddi_intr_block_disable(igb->htable, igb->intr_cnt);
5025 if (rc != DDI_SUCCESS) {
5026 igb_log(igb, IGB_LOG_ERROR,
5027 "Disable block intr failed: %d", rc);
5028 return (IGB_FAILURE);
5029 }
5030 } else {
5031 for (i = 0; i < igb->intr_cnt; i++) {
5032 rc = ddi_intr_disable(igb->htable[i]);
5033 if (rc != DDI_SUCCESS) {
5034 igb_log(igb, IGB_LOG_ERROR,
5035 "Disable intr failed: %d", rc);
5036 return (IGB_FAILURE);
5037 }
5038 }
5039 }
5040
5041 return (IGB_SUCCESS);
5042 }
5043
5044 /*
5045 * igb_get_phy_state - Get and save the parameters read from PHY registers
5046 */
5047 static void
igb_get_phy_state(igb_t * igb)5048 igb_get_phy_state(igb_t *igb)
5049 {
5050 struct e1000_hw *hw = &igb->hw;
5051 uint16_t phy_ctrl;
5052 uint16_t phy_status;
5053 uint16_t phy_an_adv;
5054 uint16_t phy_an_exp;
5055 uint16_t phy_ext_status;
5056 uint16_t phy_1000t_ctrl;
5057 uint16_t phy_1000t_status;
5058 uint16_t phy_lp_able;
5059
5060 ASSERT(mutex_owned(&igb->gen_lock));
5061
5062 if (hw->phy.media_type == e1000_media_type_copper) {
5063 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5064 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
5065 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &phy_an_adv);
5066 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_an_exp);
5067 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status);
5068 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_1000t_ctrl);
5069 (void) e1000_read_phy_reg(hw,
5070 PHY_1000T_STATUS, &phy_1000t_status);
5071 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_lp_able);
5072
5073 igb->param_autoneg_cap =
5074 (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
5075 igb->param_pause_cap =
5076 (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5077 igb->param_asym_pause_cap =
5078 (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5079 igb->param_1000fdx_cap =
5080 ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5081 (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5082 igb->param_1000hdx_cap =
5083 ((phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5084 (phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5085 igb->param_100t4_cap =
5086 (phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
5087 igb->param_100fdx_cap = ((phy_status & MII_SR_100X_FD_CAPS) ||
5088 (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5089 igb->param_100hdx_cap = ((phy_status & MII_SR_100X_HD_CAPS) ||
5090 (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5091 igb->param_10fdx_cap =
5092 (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5093 igb->param_10hdx_cap =
5094 (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5095 igb->param_rem_fault =
5096 (phy_status & MII_SR_REMOTE_FAULT) ? 1 : 0;
5097
5098 igb->param_adv_autoneg_cap = hw->mac.autoneg;
5099 igb->param_adv_pause_cap =
5100 (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5101 igb->param_adv_asym_pause_cap =
5102 (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5103 igb->param_adv_1000hdx_cap =
5104 (phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
5105 igb->param_adv_100t4_cap =
5106 (phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
5107 igb->param_adv_rem_fault =
5108 (phy_an_adv & NWAY_AR_REMOTE_FAULT) ? 1 : 0;
5109 if (igb->param_adv_autoneg_cap == 1) {
5110 igb->param_adv_1000fdx_cap =
5111 (phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
5112 igb->param_adv_100fdx_cap =
5113 (phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
5114 igb->param_adv_100hdx_cap =
5115 (phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
5116 igb->param_adv_10fdx_cap =
5117 (phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
5118 igb->param_adv_10hdx_cap =
5119 (phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
5120 }
5121
5122 igb->param_lp_autoneg_cap =
5123 (phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
5124 igb->param_lp_pause_cap =
5125 (phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
5126 igb->param_lp_asym_pause_cap =
5127 (phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
5128 igb->param_lp_1000fdx_cap =
5129 (phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
5130 igb->param_lp_1000hdx_cap =
5131 (phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
5132 igb->param_lp_100t4_cap =
5133 (phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
5134 igb->param_lp_100fdx_cap =
5135 (phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
5136 igb->param_lp_100hdx_cap =
5137 (phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
5138 igb->param_lp_10fdx_cap =
5139 (phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
5140 igb->param_lp_10hdx_cap =
5141 (phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
5142 igb->param_lp_rem_fault =
5143 (phy_lp_able & NWAY_LPAR_REMOTE_FAULT) ? 1 : 0;
5144 } else {
5145 /*
5146 * 1Gig Fiber adapter only offers 1Gig Full Duplex.
5147 */
5148 igb->param_autoneg_cap = 0;
5149 igb->param_pause_cap = 1;
5150 igb->param_asym_pause_cap = 1;
5151 igb->param_1000fdx_cap = 1;
5152 igb->param_1000hdx_cap = 0;
5153 igb->param_100t4_cap = 0;
5154 igb->param_100fdx_cap = 0;
5155 igb->param_100hdx_cap = 0;
5156 igb->param_10fdx_cap = 0;
5157 igb->param_10hdx_cap = 0;
5158
5159 igb->param_adv_autoneg_cap = 0;
5160 igb->param_adv_pause_cap = 1;
5161 igb->param_adv_asym_pause_cap = 1;
5162 igb->param_adv_1000fdx_cap = 1;
5163 igb->param_adv_1000hdx_cap = 0;
5164 igb->param_adv_100t4_cap = 0;
5165 igb->param_adv_100fdx_cap = 0;
5166 igb->param_adv_100hdx_cap = 0;
5167 igb->param_adv_10fdx_cap = 0;
5168 igb->param_adv_10hdx_cap = 0;
5169
5170 igb->param_lp_autoneg_cap = 0;
5171 igb->param_lp_pause_cap = 0;
5172 igb->param_lp_asym_pause_cap = 0;
5173 igb->param_lp_1000fdx_cap = 0;
5174 igb->param_lp_1000hdx_cap = 0;
5175 igb->param_lp_100t4_cap = 0;
5176 igb->param_lp_100fdx_cap = 0;
5177 igb->param_lp_100hdx_cap = 0;
5178 igb->param_lp_10fdx_cap = 0;
5179 igb->param_lp_10hdx_cap = 0;
5180 igb->param_lp_rem_fault = 0;
5181 }
5182 }
5183
5184 /*
5185 * synchronize the adv* and en* parameters.
5186 *
5187 * See comments in <sys/dld.h> for details of the *_en_*
5188 * parameters. The usage of ndd for setting adv parameters will
5189 * synchronize all the en parameters with the e1000g parameters,
5190 * implicitly disabling any settings made via dladm.
5191 */
5192 static void
igb_param_sync(igb_t * igb)5193 igb_param_sync(igb_t *igb)
5194 {
5195 igb->param_en_1000fdx_cap = igb->param_adv_1000fdx_cap;
5196 igb->param_en_1000hdx_cap = igb->param_adv_1000hdx_cap;
5197 igb->param_en_100t4_cap = igb->param_adv_100t4_cap;
5198 igb->param_en_100fdx_cap = igb->param_adv_100fdx_cap;
5199 igb->param_en_100hdx_cap = igb->param_adv_100hdx_cap;
5200 igb->param_en_10fdx_cap = igb->param_adv_10fdx_cap;
5201 igb->param_en_10hdx_cap = igb->param_adv_10hdx_cap;
5202 }
5203
5204 /*
5205 * igb_get_driver_control
5206 */
5207 static void
igb_get_driver_control(struct e1000_hw * hw)5208 igb_get_driver_control(struct e1000_hw *hw)
5209 {
5210 uint32_t ctrl_ext;
5211
5212 /* Notify firmware that driver is in control of device */
5213 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5214 ctrl_ext |= E1000_CTRL_EXT_DRV_LOAD;
5215 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5216 }
5217
5218 /*
5219 * igb_release_driver_control
5220 */
5221 static void
igb_release_driver_control(struct e1000_hw * hw)5222 igb_release_driver_control(struct e1000_hw *hw)
5223 {
5224 uint32_t ctrl_ext;
5225
5226 /* Notify firmware that driver is no longer in control of device */
5227 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5228 ctrl_ext &= ~E1000_CTRL_EXT_DRV_LOAD;
5229 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5230 }
5231
5232 /*
5233 * igb_atomic_reserve - Atomic decrease operation
5234 */
5235 int
igb_atomic_reserve(uint32_t * count_p,uint32_t n)5236 igb_atomic_reserve(uint32_t *count_p, uint32_t n)
5237 {
5238 uint32_t oldval;
5239 uint32_t newval;
5240
5241 /* ATOMICALLY */
5242 do {
5243 oldval = *count_p;
5244 if (oldval < n)
5245 return (-1);
5246 newval = oldval - n;
5247 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5248
5249 return (newval);
5250 }
5251
5252 /*
5253 * FMA support
5254 */
5255
5256 int
igb_check_acc_handle(ddi_acc_handle_t handle)5257 igb_check_acc_handle(ddi_acc_handle_t handle)
5258 {
5259 ddi_fm_error_t de;
5260
5261 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5262 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5263 return (de.fme_status);
5264 }
5265
5266 int
igb_check_dma_handle(ddi_dma_handle_t handle)5267 igb_check_dma_handle(ddi_dma_handle_t handle)
5268 {
5269 ddi_fm_error_t de;
5270
5271 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5272 return (de.fme_status);
5273 }
5274
5275 /*
5276 * The IO fault service error handling callback function
5277 */
5278 /*ARGSUSED*/
5279 static int
igb_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)5280 igb_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5281 {
5282 /*
5283 * as the driver can always deal with an error in any dma or
5284 * access handle, we can just return the fme_status value.
5285 */
5286 pci_ereport_post(dip, err, NULL);
5287 return (err->fme_status);
5288 }
5289
5290 static void
igb_fm_init(igb_t * igb)5291 igb_fm_init(igb_t *igb)
5292 {
5293 ddi_iblock_cookie_t iblk;
5294 int fma_dma_flag;
5295
5296 /* Only register with IO Fault Services if we have some capability */
5297 if (igb->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5298 igb_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5299 } else {
5300 igb_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5301 }
5302
5303 if (igb->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5304 fma_dma_flag = 1;
5305 } else {
5306 fma_dma_flag = 0;
5307 }
5308
5309 (void) igb_set_fma_flags(fma_dma_flag);
5310
5311 if (igb->fm_capabilities) {
5312
5313 /* Register capabilities with IO Fault Services */
5314 ddi_fm_init(igb->dip, &igb->fm_capabilities, &iblk);
5315
5316 /*
5317 * Initialize pci ereport capabilities if ereport capable
5318 */
5319 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities) ||
5320 DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5321 pci_ereport_setup(igb->dip);
5322
5323 /*
5324 * Register error callback if error callback capable
5325 */
5326 if (DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5327 ddi_fm_handler_register(igb->dip,
5328 igb_fm_error_cb, (void*) igb);
5329 }
5330 }
5331
5332 static void
igb_fm_fini(igb_t * igb)5333 igb_fm_fini(igb_t *igb)
5334 {
5335 /* Only unregister FMA capabilities if we registered some */
5336 if (igb->fm_capabilities) {
5337
5338 /*
5339 * Release any resources allocated by pci_ereport_setup()
5340 */
5341 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities) ||
5342 DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5343 pci_ereport_teardown(igb->dip);
5344
5345 /*
5346 * Un-register error callback if error callback capable
5347 */
5348 if (DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5349 ddi_fm_handler_unregister(igb->dip);
5350
5351 /* Unregister from IO Fault Services */
5352 ddi_fm_fini(igb->dip);
5353 }
5354 }
5355
5356 void
igb_fm_ereport(igb_t * igb,char * detail)5357 igb_fm_ereport(igb_t *igb, char *detail)
5358 {
5359 uint64_t ena;
5360 char buf[FM_MAX_CLASS];
5361
5362 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5363 ena = fm_ena_generate(0, FM_ENA_FMT1);
5364 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities)) {
5365 ddi_fm_ereport_post(igb->dip, buf, ena, DDI_NOSLEEP,
5366 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5367 }
5368 }
5369