1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
6 *
7 * CDDL LICENSE SUMMARY
8 *
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 *
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
13 *
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
19 */
20
21 /*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2017, Joyent, Inc.
29 */
30
31 /*
32 * **********************************************************************
33 * *
34 * Module Name: *
35 * e1000g_main.c *
36 * *
37 * Abstract: *
38 * This file contains the interface routines for the solaris OS. *
39 * It has all DDI entry point routines and GLD entry point routines. *
40 * *
41 * This file also contains routines that take care of initialization *
42 * uninit routine and interrupt routine. *
43 * *
44 * **********************************************************************
45 */
46
47 #include <sys/dlpi.h>
48 #include <sys/mac.h>
49 #include "e1000g_sw.h"
50 #include "e1000g_debug.h"
51
52 static char ident[] = "Intel PRO/1000 Ethernet";
53 /* LINTED E_STATIC_UNUSED */
54 static char e1000g_version[] = "Driver Ver. 5.3.24";
55
56 /*
57 * Proto types for DDI entry points
58 */
59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 static int e1000g_quiesce(dev_info_t *);
62
63 /*
64 * init and intr routines prototype
65 */
66 static int e1000g_resume(dev_info_t *);
67 static int e1000g_suspend(dev_info_t *);
68 static uint_t e1000g_intr_pciexpress(caddr_t);
69 static uint_t e1000g_intr(caddr_t);
70 static void e1000g_intr_work(struct e1000g *, uint32_t);
71 #pragma inline(e1000g_intr_work)
72 static int e1000g_init(struct e1000g *);
73 static int e1000g_start(struct e1000g *, boolean_t);
74 static void e1000g_stop(struct e1000g *, boolean_t);
75 static int e1000g_m_start(void *);
76 static void e1000g_m_stop(void *);
77 static int e1000g_m_promisc(void *, boolean_t);
78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
82 uint_t, const void *);
83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
84 uint_t, void *);
85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
86 mac_prop_info_handle_t);
87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
88 const void *);
89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
90 static void e1000g_init_locks(struct e1000g *);
91 static void e1000g_destroy_locks(struct e1000g *);
92 static int e1000g_identify_hardware(struct e1000g *);
93 static int e1000g_regs_map(struct e1000g *);
94 static int e1000g_set_driver_params(struct e1000g *);
95 static void e1000g_set_bufsize(struct e1000g *);
96 static int e1000g_register_mac(struct e1000g *);
97 static boolean_t e1000g_rx_drain(struct e1000g *);
98 static boolean_t e1000g_tx_drain(struct e1000g *);
99 static void e1000g_init_unicst(struct e1000g *);
100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
101 static int e1000g_alloc_rx_data(struct e1000g *);
102 static void e1000g_release_multicast(struct e1000g *);
103 static void e1000g_pch_limits(struct e1000g *);
104 static uint32_t e1000g_mtu2maxframe(uint32_t);
105
106 /*
107 * Local routines
108 */
109 static boolean_t e1000g_reset_adapter(struct e1000g *);
110 static void e1000g_tx_clean(struct e1000g *);
111 static void e1000g_rx_clean(struct e1000g *);
112 static void e1000g_link_timer(void *);
113 static void e1000g_local_timer(void *);
114 static boolean_t e1000g_link_check(struct e1000g *);
115 static boolean_t e1000g_stall_check(struct e1000g *);
116 static void e1000g_smartspeed(struct e1000g *);
117 static void e1000g_get_conf(struct e1000g *);
118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
119 int *);
120 static void enable_watchdog_timer(struct e1000g *);
121 static void disable_watchdog_timer(struct e1000g *);
122 static void start_watchdog_timer(struct e1000g *);
123 static void restart_watchdog_timer(struct e1000g *);
124 static void stop_watchdog_timer(struct e1000g *);
125 static void stop_link_timer(struct e1000g *);
126 static void stop_82547_timer(e1000g_tx_ring_t *);
127 static void e1000g_force_speed_duplex(struct e1000g *);
128 static void e1000g_setup_max_mtu(struct e1000g *);
129 static void e1000g_get_max_frame_size(struct e1000g *);
130 static boolean_t is_valid_mac_addr(uint8_t *);
131 static void e1000g_unattach(dev_info_t *, struct e1000g *);
132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
133 #ifdef E1000G_DEBUG
134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
139 struct iocblk *, mblk_t *);
140 #endif
141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
142 struct iocblk *, mblk_t *);
143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
145 static void e1000g_set_internal_loopback(struct e1000g *);
146 static void e1000g_set_external_loopback_1000(struct e1000g *);
147 static void e1000g_set_external_loopback_100(struct e1000g *);
148 static void e1000g_set_external_loopback_10(struct e1000g *);
149 static int e1000g_add_intrs(struct e1000g *);
150 static int e1000g_intr_add(struct e1000g *, int);
151 static int e1000g_rem_intrs(struct e1000g *);
152 static int e1000g_enable_intrs(struct e1000g *);
153 static int e1000g_disable_intrs(struct e1000g *);
154 static boolean_t e1000g_link_up(struct e1000g *);
155 #ifdef __sparc
156 static boolean_t e1000g_find_mac_address(struct e1000g *);
157 #endif
158 static void e1000g_get_phy_state(struct e1000g *);
159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
160 const void *impl_data);
161 static void e1000g_fm_init(struct e1000g *Adapter);
162 static void e1000g_fm_fini(struct e1000g *Adapter);
163 static void e1000g_param_sync(struct e1000g *);
164 static void e1000g_get_driver_control(struct e1000_hw *);
165 static void e1000g_release_driver_control(struct e1000_hw *);
166 static void e1000g_restore_promisc(struct e1000g *Adapter);
167
168 char *e1000g_priv_props[] = {
169 "_tx_bcopy_threshold",
170 "_tx_interrupt_enable",
171 "_tx_intr_delay",
172 "_tx_intr_abs_delay",
173 "_rx_bcopy_threshold",
174 "_max_num_rcv_packets",
175 "_rx_intr_delay",
176 "_rx_intr_abs_delay",
177 "_intr_throttling_rate",
178 "_intr_adaptive",
179 "_adv_pause_cap",
180 "_adv_asym_pause_cap",
181 NULL
182 };
183
184 static struct cb_ops cb_ws_ops = {
185 nulldev, /* cb_open */
186 nulldev, /* cb_close */
187 nodev, /* cb_strategy */
188 nodev, /* cb_print */
189 nodev, /* cb_dump */
190 nodev, /* cb_read */
191 nodev, /* cb_write */
192 nodev, /* cb_ioctl */
193 nodev, /* cb_devmap */
194 nodev, /* cb_mmap */
195 nodev, /* cb_segmap */
196 nochpoll, /* cb_chpoll */
197 ddi_prop_op, /* cb_prop_op */
198 NULL, /* cb_stream */
199 D_MP | D_HOTPLUG, /* cb_flag */
200 CB_REV, /* cb_rev */
201 nodev, /* cb_aread */
202 nodev /* cb_awrite */
203 };
204
205 static struct dev_ops ws_ops = {
206 DEVO_REV, /* devo_rev */
207 0, /* devo_refcnt */
208 NULL, /* devo_getinfo */
209 nulldev, /* devo_identify */
210 nulldev, /* devo_probe */
211 e1000g_attach, /* devo_attach */
212 e1000g_detach, /* devo_detach */
213 nodev, /* devo_reset */
214 &cb_ws_ops, /* devo_cb_ops */
215 NULL, /* devo_bus_ops */
216 ddi_power, /* devo_power */
217 e1000g_quiesce /* devo_quiesce */
218 };
219
220 static struct modldrv modldrv = {
221 &mod_driverops, /* Type of module. This one is a driver */
222 ident, /* Discription string */
223 &ws_ops, /* driver ops */
224 };
225
226 static struct modlinkage modlinkage = {
227 MODREV_1, &modldrv, NULL
228 };
229
230 /* Access attributes for register mapping */
231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
232 DDI_DEVICE_ATTR_V1,
233 DDI_STRUCTURE_LE_ACC,
234 DDI_STRICTORDER_ACC,
235 DDI_FLAGERR_ACC
236 };
237
238 #define E1000G_M_CALLBACK_FLAGS \
239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
240
241 static mac_callbacks_t e1000g_m_callbacks = {
242 E1000G_M_CALLBACK_FLAGS,
243 e1000g_m_stat,
244 e1000g_m_start,
245 e1000g_m_stop,
246 e1000g_m_promisc,
247 e1000g_m_multicst,
248 NULL,
249 e1000g_m_tx,
250 NULL,
251 e1000g_m_ioctl,
252 e1000g_m_getcapab,
253 NULL,
254 NULL,
255 e1000g_m_setprop,
256 e1000g_m_getprop,
257 e1000g_m_propinfo
258 };
259
260 /*
261 * Global variables
262 */
263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
264 uint32_t e1000g_mblks_pending = 0;
265 /*
266 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
267 * Here we maintain a private dev_info list if e1000g_force_detach is
268 * enabled. If we force the driver to detach while there are still some
269 * rx buffers retained in the upper layer, we have to keep a copy of the
270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
271 * structure will be freed after the driver is detached. However when we
272 * finally free those rx buffers released by the upper layer, we need to
273 * refer to the dev_info to free the dma buffers. So we save a copy of
274 * the dev_info for this purpose. On x86 platform, we assume this copy
275 * of dev_info is always valid, but on SPARC platform, it could be invalid
276 * after the system board level DR operation. For this reason, the global
277 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
278 */
279 #ifdef __sparc
280 boolean_t e1000g_force_detach = B_FALSE;
281 #else
282 boolean_t e1000g_force_detach = B_TRUE;
283 #endif
284 private_devi_list_t *e1000g_private_devi_list = NULL;
285
286 /*
287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
288 * the private dev_info list, and to serialize the processing of rx buffer
289 * freeing and rx buffer recycling.
290 */
291 kmutex_t e1000g_rx_detach_lock;
292 /*
293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
295 * If there are many e1000g instances, the system may run out of DVMA
296 * resources during the initialization of the instances, then the flag will
297 * be changed to "USE_DMA". Because different e1000g instances are initialized
298 * in parallel, we need to use this lock to protect the flag.
299 */
300 krwlock_t e1000g_dma_type_lock;
301
302 /*
303 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
304 * Based on the information from Intel, the 82546 chipset has some hardware
305 * problem. When one port is being reset and the other port is trying to
306 * access the eeprom, it could cause system hang or panic. To workaround this
307 * hardware problem, we use a global mutex to prevent such operations from
308 * happening simultaneously on different instances. This workaround is applied
309 * to all the devices supported by this driver.
310 */
311 kmutex_t e1000g_nvm_lock;
312
313 /*
314 * Loadable module configuration entry points for the driver
315 */
316
317 /*
318 * _init - module initialization
319 */
320 int
_init(void)321 _init(void)
322 {
323 int status;
324
325 mac_init_ops(&ws_ops, WSNAME);
326 status = mod_install(&modlinkage);
327 if (status != DDI_SUCCESS)
328 mac_fini_ops(&ws_ops);
329 else {
330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
333 }
334
335 return (status);
336 }
337
338 /*
339 * _fini - module finalization
340 */
341 int
_fini(void)342 _fini(void)
343 {
344 int status;
345
346 if (e1000g_mblks_pending != 0)
347 return (EBUSY);
348
349 status = mod_remove(&modlinkage);
350 if (status == DDI_SUCCESS) {
351 mac_fini_ops(&ws_ops);
352
353 if (e1000g_force_detach) {
354 private_devi_list_t *devi_node;
355
356 mutex_enter(&e1000g_rx_detach_lock);
357 while (e1000g_private_devi_list != NULL) {
358 devi_node = e1000g_private_devi_list;
359 e1000g_private_devi_list =
360 e1000g_private_devi_list->next;
361
362 kmem_free(devi_node->priv_dip,
363 sizeof (struct dev_info));
364 kmem_free(devi_node,
365 sizeof (private_devi_list_t));
366 }
367 mutex_exit(&e1000g_rx_detach_lock);
368 }
369
370 mutex_destroy(&e1000g_rx_detach_lock);
371 rw_destroy(&e1000g_dma_type_lock);
372 mutex_destroy(&e1000g_nvm_lock);
373 }
374
375 return (status);
376 }
377
378 /*
379 * _info - module information
380 */
381 int
_info(struct modinfo * modinfop)382 _info(struct modinfo *modinfop)
383 {
384 return (mod_info(&modlinkage, modinfop));
385 }
386
387 /*
388 * e1000g_attach - driver attach
389 *
390 * This function is the device-specific initialization entry
391 * point. This entry point is required and must be written.
392 * The DDI_ATTACH command must be provided in the attach entry
393 * point. When attach() is called with cmd set to DDI_ATTACH,
394 * all normal kernel services (such as kmem_alloc(9F)) are
395 * available for use by the driver.
396 *
397 * The attach() function will be called once for each instance
398 * of the device on the system with cmd set to DDI_ATTACH.
399 * Until attach() succeeds, the only driver entry points which
400 * may be called are open(9E) and getinfo(9E).
401 */
402 static int
e1000g_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
404 {
405 struct e1000g *Adapter;
406 struct e1000_hw *hw;
407 struct e1000g_osdep *osdep;
408 int instance;
409
410 switch (cmd) {
411 default:
412 e1000g_log(NULL, CE_WARN,
413 "Unsupported command send to e1000g_attach... ");
414 return (DDI_FAILURE);
415
416 case DDI_RESUME:
417 return (e1000g_resume(devinfo));
418
419 case DDI_ATTACH:
420 break;
421 }
422
423 /*
424 * get device instance number
425 */
426 instance = ddi_get_instance(devinfo);
427
428 /*
429 * Allocate soft data structure
430 */
431 Adapter =
432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
433
434 Adapter->dip = devinfo;
435 Adapter->instance = instance;
436 Adapter->tx_ring->adapter = Adapter;
437 Adapter->rx_ring->adapter = Adapter;
438
439 hw = &Adapter->shared;
440 osdep = &Adapter->osdep;
441 hw->back = osdep;
442 osdep->adapter = Adapter;
443
444 ddi_set_driver_private(devinfo, (caddr_t)Adapter);
445
446 /*
447 * Initialize for fma support
448 */
449 (void) e1000g_get_prop(Adapter, "fm-capable",
450 0, 0x0f,
451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
453 &Adapter->fm_capabilities);
454 e1000g_fm_init(Adapter);
455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
456
457 /*
458 * PCI Configure
459 */
460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
462 goto attach_fail;
463 }
464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
465
466 /*
467 * Setup hardware
468 */
469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
471 goto attach_fail;
472 }
473
474 /*
475 * Map in the device registers.
476 */
477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
479 goto attach_fail;
480 }
481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
482
483 /*
484 * Initialize driver parameters
485 */
486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
487 goto attach_fail;
488 }
489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
490
491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
493 goto attach_fail;
494 }
495
496 /*
497 * Disable ULP support
498 */
499 (void) e1000_disable_ulp_lpt_lp(hw, TRUE);
500
501 /*
502 * Initialize interrupts
503 */
504 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
505 e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
506 goto attach_fail;
507 }
508 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
509
510 /*
511 * Initialize mutex's for this device.
512 * Do this before enabling the interrupt handler and
513 * register the softint to avoid the condition where
514 * interrupt handler can try using uninitialized mutex
515 */
516 e1000g_init_locks(Adapter);
517 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
518
519 /*
520 * Initialize Driver Counters
521 */
522 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
523 e1000g_log(Adapter, CE_WARN, "Init stats failed");
524 goto attach_fail;
525 }
526 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
527
528 /*
529 * Initialize chip hardware and software structures
530 */
531 rw_enter(&Adapter->chip_lock, RW_WRITER);
532 if (e1000g_init(Adapter) != DDI_SUCCESS) {
533 rw_exit(&Adapter->chip_lock);
534 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
535 goto attach_fail;
536 }
537 rw_exit(&Adapter->chip_lock);
538 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
539
540 /*
541 * Register the driver to the MAC
542 */
543 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
544 e1000g_log(Adapter, CE_WARN, "Register MAC failed");
545 goto attach_fail;
546 }
547 Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
548
549 /*
550 * Now that mutex locks are initialized, and the chip is also
551 * initialized, enable interrupts.
552 */
553 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
554 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
555 goto attach_fail;
556 }
557 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
558
559 /*
560 * If e1000g_force_detach is enabled, in global private dip list,
561 * we will create a new entry, which maintains the priv_dip for DR
562 * supports after driver detached.
563 */
564 if (e1000g_force_detach) {
565 private_devi_list_t *devi_node;
566
567 Adapter->priv_dip =
568 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
569 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
570 sizeof (struct dev_info));
571
572 devi_node =
573 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
574
575 mutex_enter(&e1000g_rx_detach_lock);
576 devi_node->priv_dip = Adapter->priv_dip;
577 devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
578 devi_node->pending_rx_count = 0;
579
580 Adapter->priv_devi_node = devi_node;
581
582 if (e1000g_private_devi_list == NULL) {
583 devi_node->prev = NULL;
584 devi_node->next = NULL;
585 e1000g_private_devi_list = devi_node;
586 } else {
587 devi_node->prev = NULL;
588 devi_node->next = e1000g_private_devi_list;
589 e1000g_private_devi_list->prev = devi_node;
590 e1000g_private_devi_list = devi_node;
591 }
592 mutex_exit(&e1000g_rx_detach_lock);
593 }
594
595 Adapter->e1000g_state = E1000G_INITIALIZED;
596 return (DDI_SUCCESS);
597
598 attach_fail:
599 e1000g_unattach(devinfo, Adapter);
600 return (DDI_FAILURE);
601 }
602
603 static int
e1000g_register_mac(struct e1000g * Adapter)604 e1000g_register_mac(struct e1000g *Adapter)
605 {
606 struct e1000_hw *hw = &Adapter->shared;
607 mac_register_t *mac;
608 int err;
609
610 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
611 return (DDI_FAILURE);
612
613 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
614 mac->m_driver = Adapter;
615 mac->m_dip = Adapter->dip;
616 mac->m_src_addr = hw->mac.addr;
617 mac->m_callbacks = &e1000g_m_callbacks;
618 mac->m_min_sdu = 0;
619 mac->m_max_sdu = Adapter->default_mtu;
620 mac->m_margin = VLAN_TAGSZ;
621 mac->m_priv_props = e1000g_priv_props;
622 mac->m_v12n = MAC_VIRT_LEVEL1;
623
624 err = mac_register(mac, &Adapter->mh);
625 mac_free(mac);
626
627 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
628 }
629
630 static int
e1000g_identify_hardware(struct e1000g * Adapter)631 e1000g_identify_hardware(struct e1000g *Adapter)
632 {
633 struct e1000_hw *hw = &Adapter->shared;
634 struct e1000g_osdep *osdep = &Adapter->osdep;
635
636 /* Get the device id */
637 hw->vendor_id =
638 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
639 hw->device_id =
640 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
641 hw->revision_id =
642 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
643 hw->subsystem_device_id =
644 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
645 hw->subsystem_vendor_id =
646 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
647
648 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
649 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
650 "MAC type could not be set properly.");
651 return (DDI_FAILURE);
652 }
653
654 return (DDI_SUCCESS);
655 }
656
657 static int
e1000g_regs_map(struct e1000g * Adapter)658 e1000g_regs_map(struct e1000g *Adapter)
659 {
660 dev_info_t *devinfo = Adapter->dip;
661 struct e1000_hw *hw = &Adapter->shared;
662 struct e1000g_osdep *osdep = &Adapter->osdep;
663 off_t mem_size;
664 bar_info_t bar_info;
665 int offset, rnumber;
666
667 rnumber = ADAPTER_REG_SET;
668 /* Get size of adapter register memory */
669 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
670 DDI_SUCCESS) {
671 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
672 "ddi_dev_regsize for registers failed");
673 return (DDI_FAILURE);
674 }
675
676 /* Map adapter register memory */
677 if ((ddi_regs_map_setup(devinfo, rnumber,
678 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
679 &osdep->reg_handle)) != DDI_SUCCESS) {
680 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
681 "ddi_regs_map_setup for registers failed");
682 goto regs_map_fail;
683 }
684
685 /* ICH needs to map flash memory */
686 switch (hw->mac.type) {
687 case e1000_ich8lan:
688 case e1000_ich9lan:
689 case e1000_ich10lan:
690 case e1000_pchlan:
691 case e1000_pch2lan:
692 case e1000_pch_lpt:
693 rnumber = ICH_FLASH_REG_SET;
694
695 /* get flash size */
696 if (ddi_dev_regsize(devinfo, rnumber,
697 &mem_size) != DDI_SUCCESS) {
698 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
699 "ddi_dev_regsize for ICH flash failed");
700 goto regs_map_fail;
701 }
702
703 /* map flash in */
704 if (ddi_regs_map_setup(devinfo, rnumber,
705 (caddr_t *)&hw->flash_address, 0,
706 mem_size, &e1000g_regs_acc_attr,
707 &osdep->ich_flash_handle) != DDI_SUCCESS) {
708 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
709 "ddi_regs_map_setup for ICH flash failed");
710 goto regs_map_fail;
711 }
712 break;
713 case e1000_pch_spt:
714 /*
715 * On the SPT, the device flash is actually in BAR0, not a
716 * separate BAR. Therefore we end up setting the
717 * ich_flash_handle to be the same as the register handle.
718 * We mark the same to reduce the confusion in the other
719 * functions and macros. Though this does make the set up and
720 * tear-down path slightly more complicated.
721 */
722 osdep->ich_flash_handle = osdep->reg_handle;
723 hw->flash_address = hw->hw_addr;
724 default:
725 break;
726 }
727
728 /* map io space */
729 switch (hw->mac.type) {
730 case e1000_82544:
731 case e1000_82540:
732 case e1000_82545:
733 case e1000_82546:
734 case e1000_82541:
735 case e1000_82541_rev_2:
736 /* find the IO bar */
737 rnumber = -1;
738 for (offset = PCI_CONF_BASE1;
739 offset <= PCI_CONF_BASE5; offset += 4) {
740 if (e1000g_get_bar_info(devinfo, offset, &bar_info)
741 != DDI_SUCCESS)
742 continue;
743 if (bar_info.type == E1000G_BAR_IO) {
744 rnumber = bar_info.rnumber;
745 break;
746 }
747 }
748
749 if (rnumber < 0) {
750 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
751 "No io space is found");
752 goto regs_map_fail;
753 }
754
755 /* get io space size */
756 if (ddi_dev_regsize(devinfo, rnumber,
757 &mem_size) != DDI_SUCCESS) {
758 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
759 "ddi_dev_regsize for io space failed");
760 goto regs_map_fail;
761 }
762
763 /* map io space */
764 if ((ddi_regs_map_setup(devinfo, rnumber,
765 (caddr_t *)&hw->io_base, 0, mem_size,
766 &e1000g_regs_acc_attr,
767 &osdep->io_reg_handle)) != DDI_SUCCESS) {
768 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
769 "ddi_regs_map_setup for io space failed");
770 goto regs_map_fail;
771 }
772 break;
773 default:
774 hw->io_base = 0;
775 break;
776 }
777
778 return (DDI_SUCCESS);
779
780 regs_map_fail:
781 if (osdep->reg_handle != NULL)
782 ddi_regs_map_free(&osdep->reg_handle);
783 if (osdep->ich_flash_handle != NULL && hw->mac.type != e1000_pch_spt)
784 ddi_regs_map_free(&osdep->ich_flash_handle);
785 return (DDI_FAILURE);
786 }
787
788 static int
e1000g_set_driver_params(struct e1000g * Adapter)789 e1000g_set_driver_params(struct e1000g *Adapter)
790 {
791 struct e1000_hw *hw;
792
793 hw = &Adapter->shared;
794
795 /* Set MAC type and initialize hardware functions */
796 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
797 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
798 "Could not setup hardware functions");
799 return (DDI_FAILURE);
800 }
801
802 /* Get bus information */
803 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
804 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
805 "Could not get bus information");
806 return (DDI_FAILURE);
807 }
808
809 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
810
811 hw->mac.autoneg_failed = B_TRUE;
812
813 /* Set the autoneg_wait_to_complete flag to B_FALSE */
814 hw->phy.autoneg_wait_to_complete = B_FALSE;
815
816 /* Adaptive IFS related changes */
817 hw->mac.adaptive_ifs = B_TRUE;
818
819 /* Enable phy init script for IGP phy of 82541/82547 */
820 if ((hw->mac.type == e1000_82547) ||
821 (hw->mac.type == e1000_82541) ||
822 (hw->mac.type == e1000_82547_rev_2) ||
823 (hw->mac.type == e1000_82541_rev_2))
824 e1000_init_script_state_82541(hw, B_TRUE);
825
826 /* Enable the TTL workaround for 82541/82547 */
827 e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
828
829 #ifdef __sparc
830 Adapter->strip_crc = B_TRUE;
831 #else
832 Adapter->strip_crc = B_FALSE;
833 #endif
834
835 /* setup the maximum MTU size of the chip */
836 e1000g_setup_max_mtu(Adapter);
837
838 /* Get speed/duplex settings in conf file */
839 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
840 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
841 e1000g_force_speed_duplex(Adapter);
842
843 /* Get Jumbo Frames settings in conf file */
844 e1000g_get_max_frame_size(Adapter);
845
846 /* Get conf file properties */
847 e1000g_get_conf(Adapter);
848
849 /* enforce PCH limits */
850 e1000g_pch_limits(Adapter);
851
852 /* Set Rx/Tx buffer size */
853 e1000g_set_bufsize(Adapter);
854
855 /* Master Latency Timer */
856 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
857
858 /* copper options */
859 if (hw->phy.media_type == e1000_media_type_copper) {
860 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
861 hw->phy.disable_polarity_correction = B_FALSE;
862 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
863 }
864
865 /* The initial link state should be "unknown" */
866 Adapter->link_state = LINK_STATE_UNKNOWN;
867
868 /* Initialize rx parameters */
869 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
870 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
871
872 /* Initialize tx parameters */
873 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
874 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
875 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
876 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
877
878 /* Initialize rx parameters */
879 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
880
881 return (DDI_SUCCESS);
882 }
883
884 static void
e1000g_setup_max_mtu(struct e1000g * Adapter)885 e1000g_setup_max_mtu(struct e1000g *Adapter)
886 {
887 struct e1000_mac_info *mac = &Adapter->shared.mac;
888 struct e1000_phy_info *phy = &Adapter->shared.phy;
889
890 switch (mac->type) {
891 /* types that do not support jumbo frames */
892 case e1000_ich8lan:
893 case e1000_82573:
894 case e1000_82583:
895 Adapter->max_mtu = ETHERMTU;
896 break;
897 /* ich9 supports jumbo frames except on one phy type */
898 case e1000_ich9lan:
899 if (phy->type == e1000_phy_ife)
900 Adapter->max_mtu = ETHERMTU;
901 else
902 Adapter->max_mtu = MAXIMUM_MTU_9K;
903 break;
904 /* pch can do jumbo frames up to 4K */
905 case e1000_pchlan:
906 Adapter->max_mtu = MAXIMUM_MTU_4K;
907 break;
908 /* pch2 can do jumbo frames up to 9K */
909 case e1000_pch2lan:
910 case e1000_pch_lpt:
911 case e1000_pch_spt:
912 Adapter->max_mtu = MAXIMUM_MTU_9K;
913 break;
914 /* types with a special limit */
915 case e1000_82571:
916 case e1000_82572:
917 case e1000_82574:
918 case e1000_80003es2lan:
919 case e1000_ich10lan:
920 if (e1000g_jumbo_mtu >= ETHERMTU &&
921 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
922 Adapter->max_mtu = e1000g_jumbo_mtu;
923 } else {
924 Adapter->max_mtu = MAXIMUM_MTU_9K;
925 }
926 break;
927 /* default limit is 16K */
928 default:
929 Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
930 sizeof (struct ether_vlan_header) - ETHERFCSL;
931 break;
932 }
933 }
934
935 static void
e1000g_set_bufsize(struct e1000g * Adapter)936 e1000g_set_bufsize(struct e1000g *Adapter)
937 {
938 struct e1000_mac_info *mac = &Adapter->shared.mac;
939 uint64_t rx_size;
940 uint64_t tx_size;
941
942 dev_info_t *devinfo = Adapter->dip;
943 #ifdef __sparc
944 ulong_t iommu_pagesize;
945 #endif
946 /* Get the system page size */
947 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
948
949 #ifdef __sparc
950 iommu_pagesize = dvma_pagesize(devinfo);
951 if (iommu_pagesize != 0) {
952 if (Adapter->sys_page_sz == iommu_pagesize) {
953 if (iommu_pagesize > 0x4000)
954 Adapter->sys_page_sz = 0x4000;
955 } else {
956 if (Adapter->sys_page_sz > iommu_pagesize)
957 Adapter->sys_page_sz = iommu_pagesize;
958 }
959 }
960 if (Adapter->lso_enable) {
961 Adapter->dvma_page_num = E1000_LSO_MAXLEN /
962 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
963 } else {
964 Adapter->dvma_page_num = Adapter->max_frame_size /
965 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
966 }
967 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
968 #endif
969
970 Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
971
972 if (Adapter->mem_workaround_82546 &&
973 ((mac->type == e1000_82545) ||
974 (mac->type == e1000_82546) ||
975 (mac->type == e1000_82546_rev_3))) {
976 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
977 } else {
978 rx_size = Adapter->max_frame_size;
979 if ((rx_size > FRAME_SIZE_UPTO_2K) &&
980 (rx_size <= FRAME_SIZE_UPTO_4K))
981 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
982 else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
983 (rx_size <= FRAME_SIZE_UPTO_8K))
984 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
985 else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
986 (rx_size <= FRAME_SIZE_UPTO_16K))
987 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
988 else
989 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
990 }
991 Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
992
993 tx_size = Adapter->max_frame_size;
994 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
995 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
996 else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
997 (tx_size <= FRAME_SIZE_UPTO_8K))
998 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
999 else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
1000 (tx_size <= FRAME_SIZE_UPTO_16K))
1001 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
1002 else
1003 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
1004
1005 /*
1006 * For Wiseman adapters we have an requirement of having receive
1007 * buffers aligned at 256 byte boundary. Since Livengood does not
1008 * require this and forcing it for all hardwares will have
1009 * performance implications, I am making it applicable only for
1010 * Wiseman and for Jumbo frames enabled mode as rest of the time,
1011 * it is okay to have normal frames...but it does involve a
1012 * potential risk where we may loose data if buffer is not
1013 * aligned...so all wiseman boards to have 256 byte aligned
1014 * buffers
1015 */
1016 if (mac->type < e1000_82543)
1017 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
1018 else
1019 Adapter->rx_buf_align = 1;
1020 }
1021
1022 /*
1023 * e1000g_detach - driver detach
1024 *
1025 * The detach() function is the complement of the attach routine.
1026 * If cmd is set to DDI_DETACH, detach() is used to remove the
1027 * state associated with a given instance of a device node
1028 * prior to the removal of that instance from the system.
1029 *
1030 * The detach() function will be called once for each instance
1031 * of the device for which there has been a successful attach()
1032 * once there are no longer any opens on the device.
1033 *
1034 * Interrupts routine are disabled, All memory allocated by this
1035 * driver are freed.
1036 */
1037 static int
e1000g_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)1038 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1039 {
1040 struct e1000g *Adapter;
1041 boolean_t rx_drain;
1042
1043 switch (cmd) {
1044 default:
1045 return (DDI_FAILURE);
1046
1047 case DDI_SUSPEND:
1048 return (e1000g_suspend(devinfo));
1049
1050 case DDI_DETACH:
1051 break;
1052 }
1053
1054 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1055 if (Adapter == NULL)
1056 return (DDI_FAILURE);
1057
1058 rx_drain = e1000g_rx_drain(Adapter);
1059 if (!rx_drain && !e1000g_force_detach)
1060 return (DDI_FAILURE);
1061
1062 if (mac_unregister(Adapter->mh) != 0) {
1063 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1064 return (DDI_FAILURE);
1065 }
1066 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1067
1068 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1069
1070 if (!e1000g_force_detach && !rx_drain)
1071 return (DDI_FAILURE);
1072
1073 e1000g_unattach(devinfo, Adapter);
1074
1075 return (DDI_SUCCESS);
1076 }
1077
1078 /*
1079 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1080 */
1081 void
e1000g_free_priv_devi_node(private_devi_list_t * devi_node)1082 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1083 {
1084 ASSERT(e1000g_private_devi_list != NULL);
1085 ASSERT(devi_node != NULL);
1086
1087 if (devi_node->prev != NULL)
1088 devi_node->prev->next = devi_node->next;
1089 if (devi_node->next != NULL)
1090 devi_node->next->prev = devi_node->prev;
1091 if (devi_node == e1000g_private_devi_list)
1092 e1000g_private_devi_list = devi_node->next;
1093
1094 kmem_free(devi_node->priv_dip,
1095 sizeof (struct dev_info));
1096 kmem_free(devi_node,
1097 sizeof (private_devi_list_t));
1098 }
1099
1100 static void
e1000g_unattach(dev_info_t * devinfo,struct e1000g * Adapter)1101 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1102 {
1103 private_devi_list_t *devi_node;
1104 int result;
1105
1106 if (Adapter->e1000g_blink != NULL) {
1107 ddi_periodic_delete(Adapter->e1000g_blink);
1108 Adapter->e1000g_blink = NULL;
1109 }
1110
1111 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1112 (void) e1000g_disable_intrs(Adapter);
1113 }
1114
1115 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1116 (void) mac_unregister(Adapter->mh);
1117 }
1118
1119 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1120 (void) e1000g_rem_intrs(Adapter);
1121 }
1122
1123 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1124 (void) ddi_prop_remove_all(devinfo);
1125 }
1126
1127 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1128 kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1129 }
1130
1131 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1132 stop_link_timer(Adapter);
1133
1134 mutex_enter(&e1000g_nvm_lock);
1135 result = e1000_reset_hw(&Adapter->shared);
1136 mutex_exit(&e1000g_nvm_lock);
1137
1138 if (result != E1000_SUCCESS) {
1139 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1140 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1141 }
1142 }
1143
1144 e1000g_release_multicast(Adapter);
1145
1146 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1147 if (Adapter->osdep.reg_handle != NULL)
1148 ddi_regs_map_free(&Adapter->osdep.reg_handle);
1149 if (Adapter->osdep.ich_flash_handle != NULL &&
1150 Adapter->shared.mac.type != e1000_pch_spt)
1151 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1152 if (Adapter->osdep.io_reg_handle != NULL)
1153 ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1154 }
1155
1156 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1157 if (Adapter->osdep.cfg_handle != NULL)
1158 pci_config_teardown(&Adapter->osdep.cfg_handle);
1159 }
1160
1161 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1162 e1000g_destroy_locks(Adapter);
1163 }
1164
1165 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1166 e1000g_fm_fini(Adapter);
1167 }
1168
1169 mutex_enter(&e1000g_rx_detach_lock);
1170 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1171 devi_node = Adapter->priv_devi_node;
1172 devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1173
1174 if (devi_node->pending_rx_count == 0) {
1175 e1000g_free_priv_devi_node(devi_node);
1176 }
1177 }
1178 mutex_exit(&e1000g_rx_detach_lock);
1179
1180 kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1181
1182 /*
1183 * Another hotplug spec requirement,
1184 * run ddi_set_driver_private(devinfo, null);
1185 */
1186 ddi_set_driver_private(devinfo, NULL);
1187 }
1188
1189 /*
1190 * Get the BAR type and rnumber for a given PCI BAR offset
1191 */
1192 static int
e1000g_get_bar_info(dev_info_t * dip,int bar_offset,bar_info_t * bar_info)1193 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1194 {
1195 pci_regspec_t *regs;
1196 uint_t regs_length;
1197 int type, rnumber, rcount;
1198
1199 ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1200 (bar_offset <= PCI_CONF_BASE5));
1201
1202 /*
1203 * Get the DDI "reg" property
1204 */
1205 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1206 DDI_PROP_DONTPASS, "reg", (int **)®s,
1207 ®s_length) != DDI_PROP_SUCCESS) {
1208 return (DDI_FAILURE);
1209 }
1210
1211 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1212 /*
1213 * Check the BAR offset
1214 */
1215 for (rnumber = 0; rnumber < rcount; ++rnumber) {
1216 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1217 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1218 break;
1219 }
1220 }
1221
1222 ddi_prop_free(regs);
1223
1224 if (rnumber >= rcount)
1225 return (DDI_FAILURE);
1226
1227 switch (type) {
1228 case PCI_ADDR_CONFIG:
1229 bar_info->type = E1000G_BAR_CONFIG;
1230 break;
1231 case PCI_ADDR_IO:
1232 bar_info->type = E1000G_BAR_IO;
1233 break;
1234 case PCI_ADDR_MEM32:
1235 bar_info->type = E1000G_BAR_MEM32;
1236 break;
1237 case PCI_ADDR_MEM64:
1238 bar_info->type = E1000G_BAR_MEM64;
1239 break;
1240 default:
1241 return (DDI_FAILURE);
1242 }
1243 bar_info->rnumber = rnumber;
1244 return (DDI_SUCCESS);
1245 }
1246
1247 static void
e1000g_init_locks(struct e1000g * Adapter)1248 e1000g_init_locks(struct e1000g *Adapter)
1249 {
1250 e1000g_tx_ring_t *tx_ring;
1251 e1000g_rx_ring_t *rx_ring;
1252
1253 rw_init(&Adapter->chip_lock, NULL,
1254 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1255 mutex_init(&Adapter->link_lock, NULL,
1256 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1257 mutex_init(&Adapter->watchdog_lock, NULL,
1258 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1259
1260 tx_ring = Adapter->tx_ring;
1261
1262 mutex_init(&tx_ring->tx_lock, NULL,
1263 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1264 mutex_init(&tx_ring->usedlist_lock, NULL,
1265 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1266 mutex_init(&tx_ring->freelist_lock, NULL,
1267 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1268
1269 rx_ring = Adapter->rx_ring;
1270
1271 mutex_init(&rx_ring->rx_lock, NULL,
1272 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1273
1274 mutex_init(&Adapter->e1000g_led_lock, NULL,
1275 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1276 }
1277
1278 static void
e1000g_destroy_locks(struct e1000g * Adapter)1279 e1000g_destroy_locks(struct e1000g *Adapter)
1280 {
1281 e1000g_tx_ring_t *tx_ring;
1282 e1000g_rx_ring_t *rx_ring;
1283
1284 mutex_destroy(&Adapter->e1000g_led_lock);
1285
1286 tx_ring = Adapter->tx_ring;
1287 mutex_destroy(&tx_ring->tx_lock);
1288 mutex_destroy(&tx_ring->usedlist_lock);
1289 mutex_destroy(&tx_ring->freelist_lock);
1290
1291 rx_ring = Adapter->rx_ring;
1292 mutex_destroy(&rx_ring->rx_lock);
1293
1294 mutex_destroy(&Adapter->link_lock);
1295 mutex_destroy(&Adapter->watchdog_lock);
1296 rw_destroy(&Adapter->chip_lock);
1297
1298 /* destory mutex initialized in shared code */
1299 e1000_destroy_hw_mutex(&Adapter->shared);
1300 }
1301
1302 static int
e1000g_resume(dev_info_t * devinfo)1303 e1000g_resume(dev_info_t *devinfo)
1304 {
1305 struct e1000g *Adapter;
1306
1307 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1308 if (Adapter == NULL)
1309 e1000g_log(Adapter, CE_PANIC,
1310 "Instance pointer is null\n");
1311
1312 if (Adapter->dip != devinfo)
1313 e1000g_log(Adapter, CE_PANIC,
1314 "Devinfo is not the same as saved devinfo\n");
1315
1316 rw_enter(&Adapter->chip_lock, RW_WRITER);
1317
1318 if (Adapter->e1000g_state & E1000G_STARTED) {
1319 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1320 rw_exit(&Adapter->chip_lock);
1321 /*
1322 * We note the failure, but return success, as the
1323 * system is still usable without this controller.
1324 */
1325 e1000g_log(Adapter, CE_WARN,
1326 "e1000g_resume: failed to restart controller\n");
1327 return (DDI_SUCCESS);
1328 }
1329 /* Enable and start the watchdog timer */
1330 enable_watchdog_timer(Adapter);
1331 }
1332
1333 Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1334
1335 rw_exit(&Adapter->chip_lock);
1336
1337 return (DDI_SUCCESS);
1338 }
1339
1340 static int
e1000g_suspend(dev_info_t * devinfo)1341 e1000g_suspend(dev_info_t *devinfo)
1342 {
1343 struct e1000g *Adapter;
1344
1345 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1346 if (Adapter == NULL)
1347 return (DDI_FAILURE);
1348
1349 rw_enter(&Adapter->chip_lock, RW_WRITER);
1350
1351 Adapter->e1000g_state |= E1000G_SUSPENDED;
1352
1353 /* if the port isn't plumbed, we can simply return */
1354 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1355 rw_exit(&Adapter->chip_lock);
1356 return (DDI_SUCCESS);
1357 }
1358
1359 e1000g_stop(Adapter, B_FALSE);
1360
1361 rw_exit(&Adapter->chip_lock);
1362
1363 /* Disable and stop all the timers */
1364 disable_watchdog_timer(Adapter);
1365 stop_link_timer(Adapter);
1366 stop_82547_timer(Adapter->tx_ring);
1367
1368 return (DDI_SUCCESS);
1369 }
1370
1371 static int
e1000g_init(struct e1000g * Adapter)1372 e1000g_init(struct e1000g *Adapter)
1373 {
1374 uint32_t pba;
1375 uint32_t high_water;
1376 struct e1000_hw *hw;
1377 clock_t link_timeout;
1378 int result;
1379
1380 hw = &Adapter->shared;
1381
1382 /*
1383 * reset to put the hardware in a known state
1384 * before we try to do anything with the eeprom
1385 */
1386 mutex_enter(&e1000g_nvm_lock);
1387 result = e1000_reset_hw(hw);
1388 mutex_exit(&e1000g_nvm_lock);
1389
1390 if (result != E1000_SUCCESS) {
1391 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1392 goto init_fail;
1393 }
1394
1395 mutex_enter(&e1000g_nvm_lock);
1396 result = e1000_validate_nvm_checksum(hw);
1397 if (result < E1000_SUCCESS) {
1398 /*
1399 * Some PCI-E parts fail the first check due to
1400 * the link being in sleep state. Call it again,
1401 * if it fails a second time its a real issue.
1402 */
1403 result = e1000_validate_nvm_checksum(hw);
1404 }
1405 mutex_exit(&e1000g_nvm_lock);
1406
1407 if (result < E1000_SUCCESS) {
1408 e1000g_log(Adapter, CE_WARN,
1409 "Invalid NVM checksum. Please contact "
1410 "the vendor to update the NVM.");
1411 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1412 goto init_fail;
1413 }
1414
1415 result = 0;
1416 #ifdef __sparc
1417 /*
1418 * First, we try to get the local ethernet address from OBP. If
1419 * failed, then we get it from the EEPROM of NIC card.
1420 */
1421 result = e1000g_find_mac_address(Adapter);
1422 #endif
1423 /* Get the local ethernet address. */
1424 if (!result) {
1425 mutex_enter(&e1000g_nvm_lock);
1426 result = e1000_read_mac_addr(hw);
1427 mutex_exit(&e1000g_nvm_lock);
1428 }
1429
1430 if (result < E1000_SUCCESS) {
1431 e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1432 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1433 goto init_fail;
1434 }
1435
1436 /* check for valid mac address */
1437 if (!is_valid_mac_addr(hw->mac.addr)) {
1438 e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1439 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1440 goto init_fail;
1441 }
1442
1443 /* Set LAA state for 82571 chipset */
1444 e1000_set_laa_state_82571(hw, B_TRUE);
1445
1446 /* Master Latency Timer implementation */
1447 if (Adapter->master_latency_timer) {
1448 pci_config_put8(Adapter->osdep.cfg_handle,
1449 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1450 }
1451
1452 if (hw->mac.type < e1000_82547) {
1453 /*
1454 * Total FIFO is 64K
1455 */
1456 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1457 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1458 else
1459 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1460 } else if ((hw->mac.type == e1000_82571) ||
1461 (hw->mac.type == e1000_82572) ||
1462 (hw->mac.type == e1000_80003es2lan)) {
1463 /*
1464 * Total FIFO is 48K
1465 */
1466 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1467 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */
1468 else
1469 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */
1470 } else if (hw->mac.type == e1000_82573) {
1471 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */
1472 } else if (hw->mac.type == e1000_82574) {
1473 /* Keep adapter default: 20K for Rx, 20K for Tx */
1474 pba = E1000_READ_REG(hw, E1000_PBA);
1475 } else if (hw->mac.type == e1000_ich8lan) {
1476 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */
1477 } else if (hw->mac.type == e1000_ich9lan) {
1478 pba = E1000_PBA_10K;
1479 } else if (hw->mac.type == e1000_ich10lan) {
1480 pba = E1000_PBA_10K;
1481 } else if (hw->mac.type == e1000_pchlan) {
1482 pba = E1000_PBA_26K;
1483 } else if (hw->mac.type == e1000_pch2lan) {
1484 pba = E1000_PBA_26K;
1485 } else if (hw->mac.type == e1000_pch_lpt) {
1486 pba = E1000_PBA_26K;
1487 } else if (hw->mac.type == e1000_pch_spt) {
1488 pba = E1000_PBA_26K;
1489 } else {
1490 /*
1491 * Total FIFO is 40K
1492 */
1493 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1494 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1495 else
1496 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1497 }
1498 E1000_WRITE_REG(hw, E1000_PBA, pba);
1499
1500 /*
1501 * These parameters set thresholds for the adapter's generation(Tx)
1502 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1503 * settings. Flow control is enabled or disabled in the configuration
1504 * file.
1505 * High-water mark is set down from the top of the rx fifo (not
1506 * sensitive to max_frame_size) and low-water is set just below
1507 * high-water mark.
1508 * The high water mark must be low enough to fit one full frame above
1509 * it in the rx FIFO. Should be the lower of:
1510 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1511 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1512 * Rx FIFO size minus one full frame.
1513 */
1514 high_water = min(((pba << 10) * 9 / 10),
1515 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1516 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1517 ((pba << 10) - (E1000_ERT_2048 << 3)) :
1518 ((pba << 10) - Adapter->max_frame_size)));
1519
1520 hw->fc.high_water = high_water & 0xFFF8;
1521 hw->fc.low_water = hw->fc.high_water - 8;
1522
1523 if (hw->mac.type == e1000_80003es2lan)
1524 hw->fc.pause_time = 0xFFFF;
1525 else
1526 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1527 hw->fc.send_xon = B_TRUE;
1528
1529 /*
1530 * Reset the adapter hardware the second time.
1531 */
1532 mutex_enter(&e1000g_nvm_lock);
1533 result = e1000_reset_hw(hw);
1534 mutex_exit(&e1000g_nvm_lock);
1535
1536 if (result != E1000_SUCCESS) {
1537 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1538 goto init_fail;
1539 }
1540
1541 /* disable wakeup control by default */
1542 if (hw->mac.type >= e1000_82544)
1543 E1000_WRITE_REG(hw, E1000_WUC, 0);
1544
1545 /*
1546 * MWI should be disabled on 82546.
1547 */
1548 if (hw->mac.type == e1000_82546)
1549 e1000_pci_clear_mwi(hw);
1550 else
1551 e1000_pci_set_mwi(hw);
1552
1553 /*
1554 * Configure/Initialize hardware
1555 */
1556 mutex_enter(&e1000g_nvm_lock);
1557 result = e1000_init_hw(hw);
1558 mutex_exit(&e1000g_nvm_lock);
1559
1560 if (result < E1000_SUCCESS) {
1561 e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1562 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1563 goto init_fail;
1564 }
1565
1566 /*
1567 * Restore LED settings to the default from EEPROM
1568 * to meet the standard for Sun platforms.
1569 */
1570 (void) e1000_cleanup_led(hw);
1571
1572 /* Disable Smart Power Down */
1573 phy_spd_state(hw, B_FALSE);
1574
1575 /* Make sure driver has control */
1576 e1000g_get_driver_control(hw);
1577
1578 /*
1579 * Initialize unicast addresses.
1580 */
1581 e1000g_init_unicst(Adapter);
1582
1583 /*
1584 * Setup and initialize the mctable structures. After this routine
1585 * completes Multicast table will be set
1586 */
1587 e1000_update_mc_addr_list(hw,
1588 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1589 msec_delay(5);
1590
1591 /*
1592 * Implement Adaptive IFS
1593 */
1594 e1000_reset_adaptive(hw);
1595
1596 /* Setup Interrupt Throttling Register */
1597 if (hw->mac.type >= e1000_82540) {
1598 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1599 } else
1600 Adapter->intr_adaptive = B_FALSE;
1601
1602 /* Start the timer for link setup */
1603 if (hw->mac.autoneg)
1604 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1605 else
1606 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1607
1608 mutex_enter(&Adapter->link_lock);
1609 if (hw->phy.autoneg_wait_to_complete) {
1610 Adapter->link_complete = B_TRUE;
1611 } else {
1612 Adapter->link_complete = B_FALSE;
1613 Adapter->link_tid = timeout(e1000g_link_timer,
1614 (void *)Adapter, link_timeout);
1615 }
1616 mutex_exit(&Adapter->link_lock);
1617
1618 /* Save the state of the phy */
1619 e1000g_get_phy_state(Adapter);
1620
1621 e1000g_param_sync(Adapter);
1622
1623 Adapter->init_count++;
1624
1625 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1626 goto init_fail;
1627 }
1628 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1629 goto init_fail;
1630 }
1631
1632 Adapter->poll_mode = e1000g_poll_mode;
1633
1634 return (DDI_SUCCESS);
1635
1636 init_fail:
1637 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1638 return (DDI_FAILURE);
1639 }
1640
1641 static int
e1000g_alloc_rx_data(struct e1000g * Adapter)1642 e1000g_alloc_rx_data(struct e1000g *Adapter)
1643 {
1644 e1000g_rx_ring_t *rx_ring;
1645 e1000g_rx_data_t *rx_data;
1646
1647 rx_ring = Adapter->rx_ring;
1648
1649 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1650
1651 if (rx_data == NULL)
1652 return (DDI_FAILURE);
1653
1654 rx_data->priv_devi_node = Adapter->priv_devi_node;
1655 rx_data->rx_ring = rx_ring;
1656
1657 mutex_init(&rx_data->freelist_lock, NULL,
1658 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1659 mutex_init(&rx_data->recycle_lock, NULL,
1660 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1661
1662 rx_ring->rx_data = rx_data;
1663
1664 return (DDI_SUCCESS);
1665 }
1666
1667 void
e1000g_free_rx_pending_buffers(e1000g_rx_data_t * rx_data)1668 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1669 {
1670 rx_sw_packet_t *packet, *next_packet;
1671
1672 if (rx_data == NULL)
1673 return;
1674
1675 packet = rx_data->packet_area;
1676 while (packet != NULL) {
1677 next_packet = packet->next;
1678 e1000g_free_rx_sw_packet(packet, B_TRUE);
1679 packet = next_packet;
1680 }
1681 rx_data->packet_area = NULL;
1682 }
1683
1684 void
e1000g_free_rx_data(e1000g_rx_data_t * rx_data)1685 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1686 {
1687 if (rx_data == NULL)
1688 return;
1689
1690 mutex_destroy(&rx_data->freelist_lock);
1691 mutex_destroy(&rx_data->recycle_lock);
1692
1693 kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1694 }
1695
1696 /*
1697 * Check if the link is up
1698 */
1699 static boolean_t
e1000g_link_up(struct e1000g * Adapter)1700 e1000g_link_up(struct e1000g *Adapter)
1701 {
1702 struct e1000_hw *hw = &Adapter->shared;
1703 boolean_t link_up = B_FALSE;
1704
1705 /*
1706 * get_link_status is set in the interrupt handler on link-status-change
1707 * or rx sequence error interrupt. get_link_status will stay
1708 * false until the e1000_check_for_link establishes link only
1709 * for copper adapters.
1710 */
1711 switch (hw->phy.media_type) {
1712 case e1000_media_type_copper:
1713 if (hw->mac.get_link_status) {
1714 /*
1715 * SPT devices need a bit of extra time before we ask
1716 * them.
1717 */
1718 if (hw->mac.type == e1000_pch_spt)
1719 msec_delay(50);
1720 (void) e1000_check_for_link(hw);
1721 if ((E1000_READ_REG(hw, E1000_STATUS) &
1722 E1000_STATUS_LU)) {
1723 link_up = B_TRUE;
1724 } else {
1725 link_up = !hw->mac.get_link_status;
1726 }
1727 } else {
1728 link_up = B_TRUE;
1729 }
1730 break;
1731 case e1000_media_type_fiber:
1732 (void) e1000_check_for_link(hw);
1733 link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1734 E1000_STATUS_LU);
1735 break;
1736 case e1000_media_type_internal_serdes:
1737 (void) e1000_check_for_link(hw);
1738 link_up = hw->mac.serdes_has_link;
1739 break;
1740 }
1741
1742 return (link_up);
1743 }
1744
1745 static void
e1000g_m_ioctl(void * arg,queue_t * q,mblk_t * mp)1746 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1747 {
1748 struct iocblk *iocp;
1749 struct e1000g *e1000gp;
1750 enum ioc_reply status;
1751
1752 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1753 iocp->ioc_error = 0;
1754 e1000gp = (struct e1000g *)arg;
1755
1756 ASSERT(e1000gp);
1757 if (e1000gp == NULL) {
1758 miocnak(q, mp, 0, EINVAL);
1759 return;
1760 }
1761
1762 rw_enter(&e1000gp->chip_lock, RW_READER);
1763 if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1764 rw_exit(&e1000gp->chip_lock);
1765 miocnak(q, mp, 0, EINVAL);
1766 return;
1767 }
1768 rw_exit(&e1000gp->chip_lock);
1769
1770 switch (iocp->ioc_cmd) {
1771
1772 case LB_GET_INFO_SIZE:
1773 case LB_GET_INFO:
1774 case LB_GET_MODE:
1775 case LB_SET_MODE:
1776 status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1777 break;
1778
1779
1780 #ifdef E1000G_DEBUG
1781 case E1000G_IOC_REG_PEEK:
1782 case E1000G_IOC_REG_POKE:
1783 status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1784 break;
1785 case E1000G_IOC_CHIP_RESET:
1786 e1000gp->reset_count++;
1787 if (e1000g_reset_adapter(e1000gp))
1788 status = IOC_ACK;
1789 else
1790 status = IOC_INVAL;
1791 break;
1792 #endif
1793 default:
1794 status = IOC_INVAL;
1795 break;
1796 }
1797
1798 /*
1799 * Decide how to reply
1800 */
1801 switch (status) {
1802 default:
1803 case IOC_INVAL:
1804 /*
1805 * Error, reply with a NAK and EINVAL or the specified error
1806 */
1807 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1808 EINVAL : iocp->ioc_error);
1809 break;
1810
1811 case IOC_DONE:
1812 /*
1813 * OK, reply already sent
1814 */
1815 break;
1816
1817 case IOC_ACK:
1818 /*
1819 * OK, reply with an ACK
1820 */
1821 miocack(q, mp, 0, 0);
1822 break;
1823
1824 case IOC_REPLY:
1825 /*
1826 * OK, send prepared reply as ACK or NAK
1827 */
1828 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1829 M_IOCACK : M_IOCNAK;
1830 qreply(q, mp);
1831 break;
1832 }
1833 }
1834
1835 /*
1836 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1837 * capable of supporting only one interrupt and we shouldn't disable
1838 * the physical interrupt. In this case we let the interrupt come and
1839 * we queue the packets in the rx ring itself in case we are in polling
1840 * mode (better latency but slightly lower performance and a very
1841 * high intrrupt count in mpstat which is harmless).
1842 *
1843 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1844 * which can be disabled in poll mode. This gives better overall
1845 * throughput (compared to the mode above), shows very low interrupt
1846 * count but has slightly higher latency since we pick the packets when
1847 * the poll thread does polling.
1848 *
1849 * Currently, this flag should be enabled only while doing performance
1850 * measurement or when it can be guaranteed that entire NIC going
1851 * in poll mode will not harm any traffic like cluster heartbeat etc.
1852 */
1853 int e1000g_poll_mode = 0;
1854
1855 /*
1856 * Called from the upper layers when driver is in polling mode to
1857 * pick up any queued packets. Care should be taken to not block
1858 * this thread.
1859 */
e1000g_poll_ring(void * arg,int bytes_to_pickup)1860 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1861 {
1862 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg;
1863 mblk_t *mp = NULL;
1864 mblk_t *tail;
1865 struct e1000g *adapter;
1866
1867 adapter = rx_ring->adapter;
1868
1869 rw_enter(&adapter->chip_lock, RW_READER);
1870
1871 if (adapter->e1000g_state & E1000G_SUSPENDED) {
1872 rw_exit(&adapter->chip_lock);
1873 return (NULL);
1874 }
1875
1876 mutex_enter(&rx_ring->rx_lock);
1877 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1878 mutex_exit(&rx_ring->rx_lock);
1879 rw_exit(&adapter->chip_lock);
1880 return (mp);
1881 }
1882
1883 static int
e1000g_m_start(void * arg)1884 e1000g_m_start(void *arg)
1885 {
1886 struct e1000g *Adapter = (struct e1000g *)arg;
1887
1888 rw_enter(&Adapter->chip_lock, RW_WRITER);
1889
1890 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1891 rw_exit(&Adapter->chip_lock);
1892 return (ECANCELED);
1893 }
1894
1895 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1896 rw_exit(&Adapter->chip_lock);
1897 return (ENOTACTIVE);
1898 }
1899
1900 Adapter->e1000g_state |= E1000G_STARTED;
1901
1902 rw_exit(&Adapter->chip_lock);
1903
1904 /* Enable and start the watchdog timer */
1905 enable_watchdog_timer(Adapter);
1906
1907 return (0);
1908 }
1909
1910 static int
e1000g_start(struct e1000g * Adapter,boolean_t global)1911 e1000g_start(struct e1000g *Adapter, boolean_t global)
1912 {
1913 e1000g_rx_data_t *rx_data;
1914
1915 if (global) {
1916 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1917 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1918 goto start_fail;
1919 }
1920
1921 /* Allocate dma resources for descriptors and buffers */
1922 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1923 e1000g_log(Adapter, CE_WARN,
1924 "Alloc DMA resources failed");
1925 goto start_fail;
1926 }
1927 Adapter->rx_buffer_setup = B_FALSE;
1928 }
1929
1930 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1931 if (e1000g_init(Adapter) != DDI_SUCCESS) {
1932 e1000g_log(Adapter, CE_WARN,
1933 "Adapter initialization failed");
1934 goto start_fail;
1935 }
1936 }
1937
1938 /* Setup and initialize the transmit structures */
1939 e1000g_tx_setup(Adapter);
1940 msec_delay(5);
1941
1942 /* Setup and initialize the receive structures */
1943 e1000g_rx_setup(Adapter);
1944 msec_delay(5);
1945
1946 /* Restore the e1000g promiscuous mode */
1947 e1000g_restore_promisc(Adapter);
1948
1949 e1000g_mask_interrupt(Adapter);
1950
1951 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1952
1953 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1954 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1955 goto start_fail;
1956 }
1957
1958 return (DDI_SUCCESS);
1959
1960 start_fail:
1961 rx_data = Adapter->rx_ring->rx_data;
1962
1963 if (global) {
1964 e1000g_release_dma_resources(Adapter);
1965 e1000g_free_rx_pending_buffers(rx_data);
1966 e1000g_free_rx_data(rx_data);
1967 }
1968
1969 mutex_enter(&e1000g_nvm_lock);
1970 (void) e1000_reset_hw(&Adapter->shared);
1971 mutex_exit(&e1000g_nvm_lock);
1972
1973 return (DDI_FAILURE);
1974 }
1975
1976 /*
1977 * The I219 has the curious property that if the descriptor rings are not
1978 * emptied before resetting the hardware or before changing the device state
1979 * based on runtime power management, it'll cause the card to hang. This can
1980 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we
1981 * have to flush the rings if we're in this state.
1982 */
1983 static void
e1000g_flush_desc_rings(struct e1000g * Adapter)1984 e1000g_flush_desc_rings(struct e1000g *Adapter)
1985 {
1986 struct e1000_hw *hw = &Adapter->shared;
1987 u16 hang_state;
1988 u32 fext_nvm11, tdlen;
1989
1990 /* First, disable MULR fix in FEXTNVM11 */
1991 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
1992 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
1993 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
1994
1995 /* do nothing if we're not in faulty state, or if the queue is empty */
1996 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
1997 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
1998 PCICFG_DESC_RING_STATUS);
1999 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
2000 return;
2001 e1000g_flush_tx_ring(Adapter);
2002
2003 /* recheck, maybe the fault is caused by the rx ring */
2004 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
2005 PCICFG_DESC_RING_STATUS);
2006 if (hang_state & FLUSH_DESC_REQUIRED)
2007 e1000g_flush_rx_ring(Adapter);
2008
2009 }
2010
2011 static void
e1000g_m_stop(void * arg)2012 e1000g_m_stop(void *arg)
2013 {
2014 struct e1000g *Adapter = (struct e1000g *)arg;
2015
2016 /* Drain tx sessions */
2017 (void) e1000g_tx_drain(Adapter);
2018
2019 rw_enter(&Adapter->chip_lock, RW_WRITER);
2020
2021 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2022 rw_exit(&Adapter->chip_lock);
2023 return;
2024 }
2025 Adapter->e1000g_state &= ~E1000G_STARTED;
2026 e1000g_stop(Adapter, B_TRUE);
2027
2028 rw_exit(&Adapter->chip_lock);
2029
2030 /* Disable and stop all the timers */
2031 disable_watchdog_timer(Adapter);
2032 stop_link_timer(Adapter);
2033 stop_82547_timer(Adapter->tx_ring);
2034 }
2035
2036 static void
e1000g_stop(struct e1000g * Adapter,boolean_t global)2037 e1000g_stop(struct e1000g *Adapter, boolean_t global)
2038 {
2039 private_devi_list_t *devi_node;
2040 e1000g_rx_data_t *rx_data;
2041 int result;
2042
2043 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
2044
2045 /* Stop the chip and release pending resources */
2046
2047 /* Tell firmware driver is no longer in control */
2048 e1000g_release_driver_control(&Adapter->shared);
2049
2050 e1000g_clear_all_interrupts(Adapter);
2051
2052 mutex_enter(&e1000g_nvm_lock);
2053 result = e1000_reset_hw(&Adapter->shared);
2054 mutex_exit(&e1000g_nvm_lock);
2055
2056 if (result != E1000_SUCCESS) {
2057 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
2058 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2059 }
2060
2061 mutex_enter(&Adapter->link_lock);
2062 Adapter->link_complete = B_FALSE;
2063 mutex_exit(&Adapter->link_lock);
2064
2065 /* Release resources still held by the TX descriptors */
2066 e1000g_tx_clean(Adapter);
2067
2068 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2069 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2070
2071 /* Clean the pending rx jumbo packet fragment */
2072 e1000g_rx_clean(Adapter);
2073
2074 /*
2075 * The I219, eg. the pch_spt, has bugs such that we must ensure that
2076 * rings are flushed before we do anything else. This must be done
2077 * before we release DMA resources.
2078 */
2079 if (Adapter->shared.mac.type == e1000_pch_spt)
2080 e1000g_flush_desc_rings(Adapter);
2081
2082 if (global) {
2083 e1000g_release_dma_resources(Adapter);
2084
2085 mutex_enter(&e1000g_rx_detach_lock);
2086 rx_data = Adapter->rx_ring->rx_data;
2087 rx_data->flag |= E1000G_RX_STOPPED;
2088
2089 if (rx_data->pending_count == 0) {
2090 e1000g_free_rx_pending_buffers(rx_data);
2091 e1000g_free_rx_data(rx_data);
2092 } else {
2093 devi_node = rx_data->priv_devi_node;
2094 if (devi_node != NULL)
2095 atomic_inc_32(&devi_node->pending_rx_count);
2096 else
2097 atomic_inc_32(&Adapter->pending_rx_count);
2098 }
2099 mutex_exit(&e1000g_rx_detach_lock);
2100 }
2101
2102 if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2103 Adapter->link_state = LINK_STATE_UNKNOWN;
2104 if (!Adapter->reset_flag)
2105 mac_link_update(Adapter->mh, Adapter->link_state);
2106 }
2107 }
2108
2109 static void
e1000g_rx_clean(struct e1000g * Adapter)2110 e1000g_rx_clean(struct e1000g *Adapter)
2111 {
2112 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2113
2114 if (rx_data == NULL)
2115 return;
2116
2117 if (rx_data->rx_mblk != NULL) {
2118 freemsg(rx_data->rx_mblk);
2119 rx_data->rx_mblk = NULL;
2120 rx_data->rx_mblk_tail = NULL;
2121 rx_data->rx_mblk_len = 0;
2122 }
2123 }
2124
2125 static void
e1000g_tx_clean(struct e1000g * Adapter)2126 e1000g_tx_clean(struct e1000g *Adapter)
2127 {
2128 e1000g_tx_ring_t *tx_ring;
2129 p_tx_sw_packet_t packet;
2130 mblk_t *mp;
2131 mblk_t *nmp;
2132 uint32_t packet_count;
2133
2134 tx_ring = Adapter->tx_ring;
2135
2136 /*
2137 * Here we don't need to protect the lists using
2138 * the usedlist_lock and freelist_lock, for they
2139 * have been protected by the chip_lock.
2140 */
2141 mp = NULL;
2142 nmp = NULL;
2143 packet_count = 0;
2144 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2145 while (packet != NULL) {
2146 if (packet->mp != NULL) {
2147 /* Assemble the message chain */
2148 if (mp == NULL) {
2149 mp = packet->mp;
2150 nmp = packet->mp;
2151 } else {
2152 nmp->b_next = packet->mp;
2153 nmp = packet->mp;
2154 }
2155 /* Disconnect the message from the sw packet */
2156 packet->mp = NULL;
2157 }
2158
2159 e1000g_free_tx_swpkt(packet);
2160 packet_count++;
2161
2162 packet = (p_tx_sw_packet_t)
2163 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2164 }
2165
2166 if (mp != NULL)
2167 freemsgchain(mp);
2168
2169 if (packet_count > 0) {
2170 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2171 QUEUE_INIT_LIST(&tx_ring->used_list);
2172
2173 /* Setup TX descriptor pointers */
2174 tx_ring->tbd_next = tx_ring->tbd_first;
2175 tx_ring->tbd_oldest = tx_ring->tbd_first;
2176
2177 /* Setup our HW Tx Head & Tail descriptor pointers */
2178 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2179 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2180 }
2181 }
2182
2183 static boolean_t
e1000g_tx_drain(struct e1000g * Adapter)2184 e1000g_tx_drain(struct e1000g *Adapter)
2185 {
2186 int i;
2187 boolean_t done;
2188 e1000g_tx_ring_t *tx_ring;
2189
2190 tx_ring = Adapter->tx_ring;
2191
2192 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2193 for (i = 0; i < TX_DRAIN_TIME; i++) {
2194 mutex_enter(&tx_ring->usedlist_lock);
2195 done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2196 mutex_exit(&tx_ring->usedlist_lock);
2197
2198 if (done)
2199 break;
2200
2201 msec_delay(1);
2202 }
2203
2204 return (done);
2205 }
2206
2207 static boolean_t
e1000g_rx_drain(struct e1000g * Adapter)2208 e1000g_rx_drain(struct e1000g *Adapter)
2209 {
2210 int i;
2211 boolean_t done;
2212
2213 /*
2214 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2215 */
2216 for (i = 0; i < RX_DRAIN_TIME; i++) {
2217 done = (Adapter->pending_rx_count == 0);
2218
2219 if (done)
2220 break;
2221
2222 msec_delay(1);
2223 }
2224
2225 return (done);
2226 }
2227
2228 static boolean_t
e1000g_reset_adapter(struct e1000g * Adapter)2229 e1000g_reset_adapter(struct e1000g *Adapter)
2230 {
2231 /* Disable and stop all the timers */
2232 disable_watchdog_timer(Adapter);
2233 stop_link_timer(Adapter);
2234 stop_82547_timer(Adapter->tx_ring);
2235
2236 rw_enter(&Adapter->chip_lock, RW_WRITER);
2237
2238 if (Adapter->stall_flag) {
2239 Adapter->stall_flag = B_FALSE;
2240 Adapter->reset_flag = B_TRUE;
2241 }
2242
2243 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2244 rw_exit(&Adapter->chip_lock);
2245 return (B_TRUE);
2246 }
2247
2248 e1000g_stop(Adapter, B_FALSE);
2249
2250 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2251 rw_exit(&Adapter->chip_lock);
2252 e1000g_log(Adapter, CE_WARN, "Reset failed");
2253 return (B_FALSE);
2254 }
2255
2256 rw_exit(&Adapter->chip_lock);
2257
2258 /* Enable and start the watchdog timer */
2259 enable_watchdog_timer(Adapter);
2260
2261 return (B_TRUE);
2262 }
2263
2264 boolean_t
e1000g_global_reset(struct e1000g * Adapter)2265 e1000g_global_reset(struct e1000g *Adapter)
2266 {
2267 /* Disable and stop all the timers */
2268 disable_watchdog_timer(Adapter);
2269 stop_link_timer(Adapter);
2270 stop_82547_timer(Adapter->tx_ring);
2271
2272 rw_enter(&Adapter->chip_lock, RW_WRITER);
2273
2274 e1000g_stop(Adapter, B_TRUE);
2275
2276 Adapter->init_count = 0;
2277
2278 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2279 rw_exit(&Adapter->chip_lock);
2280 e1000g_log(Adapter, CE_WARN, "Reset failed");
2281 return (B_FALSE);
2282 }
2283
2284 rw_exit(&Adapter->chip_lock);
2285
2286 /* Enable and start the watchdog timer */
2287 enable_watchdog_timer(Adapter);
2288
2289 return (B_TRUE);
2290 }
2291
2292 /*
2293 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2294 *
2295 * This interrupt service routine is for PCI-Express adapters.
2296 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2297 * bit is set.
2298 */
2299 static uint_t
e1000g_intr_pciexpress(caddr_t arg)2300 e1000g_intr_pciexpress(caddr_t arg)
2301 {
2302 struct e1000g *Adapter;
2303 uint32_t icr;
2304
2305 Adapter = (struct e1000g *)(uintptr_t)arg;
2306 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2307
2308 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2309 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2310 return (DDI_INTR_CLAIMED);
2311 }
2312
2313 if (icr & E1000_ICR_INT_ASSERTED) {
2314 /*
2315 * E1000_ICR_INT_ASSERTED bit was set:
2316 * Read(Clear) the ICR, claim this interrupt,
2317 * look for work to do.
2318 */
2319 e1000g_intr_work(Adapter, icr);
2320 return (DDI_INTR_CLAIMED);
2321 } else {
2322 /*
2323 * E1000_ICR_INT_ASSERTED bit was not set:
2324 * Don't claim this interrupt, return immediately.
2325 */
2326 return (DDI_INTR_UNCLAIMED);
2327 }
2328 }
2329
2330 /*
2331 * e1000g_intr - ISR for PCI/PCI-X chipsets
2332 *
2333 * This interrupt service routine is for PCI/PCI-X adapters.
2334 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2335 * bit is set or not.
2336 */
2337 static uint_t
e1000g_intr(caddr_t arg)2338 e1000g_intr(caddr_t arg)
2339 {
2340 struct e1000g *Adapter;
2341 uint32_t icr;
2342
2343 Adapter = (struct e1000g *)(uintptr_t)arg;
2344 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2345
2346 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2347 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2348 return (DDI_INTR_CLAIMED);
2349 }
2350
2351 if (icr) {
2352 /*
2353 * Any bit was set in ICR:
2354 * Read(Clear) the ICR, claim this interrupt,
2355 * look for work to do.
2356 */
2357 e1000g_intr_work(Adapter, icr);
2358 return (DDI_INTR_CLAIMED);
2359 } else {
2360 /*
2361 * No bit was set in ICR:
2362 * Don't claim this interrupt, return immediately.
2363 */
2364 return (DDI_INTR_UNCLAIMED);
2365 }
2366 }
2367
2368 /*
2369 * e1000g_intr_work - actual processing of ISR
2370 *
2371 * Read(clear) the ICR contents and call appropriate interrupt
2372 * processing routines.
2373 */
2374 static void
e1000g_intr_work(struct e1000g * Adapter,uint32_t icr)2375 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2376 {
2377 struct e1000_hw *hw;
2378 hw = &Adapter->shared;
2379 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2380
2381 Adapter->rx_pkt_cnt = 0;
2382 Adapter->tx_pkt_cnt = 0;
2383
2384 rw_enter(&Adapter->chip_lock, RW_READER);
2385
2386 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2387 rw_exit(&Adapter->chip_lock);
2388 return;
2389 }
2390 /*
2391 * Here we need to check the "e1000g_state" flag within the chip_lock to
2392 * ensure the receive routine will not execute when the adapter is
2393 * being reset.
2394 */
2395 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2396 rw_exit(&Adapter->chip_lock);
2397 return;
2398 }
2399
2400 if (icr & E1000_ICR_RXT0) {
2401 mblk_t *mp = NULL;
2402 mblk_t *tail = NULL;
2403 e1000g_rx_ring_t *rx_ring;
2404
2405 rx_ring = Adapter->rx_ring;
2406 mutex_enter(&rx_ring->rx_lock);
2407 /*
2408 * Sometimes with legacy interrupts, it possible that
2409 * there is a single interrupt for Rx/Tx. In which
2410 * case, if poll flag is set, we shouldn't really
2411 * be doing Rx processing.
2412 */
2413 if (!rx_ring->poll_flag)
2414 mp = e1000g_receive(rx_ring, &tail,
2415 E1000G_CHAIN_NO_LIMIT);
2416 mutex_exit(&rx_ring->rx_lock);
2417 rw_exit(&Adapter->chip_lock);
2418 if (mp != NULL)
2419 mac_rx_ring(Adapter->mh, rx_ring->mrh,
2420 mp, rx_ring->ring_gen_num);
2421 } else
2422 rw_exit(&Adapter->chip_lock);
2423
2424 if (icr & E1000_ICR_TXDW) {
2425 if (!Adapter->tx_intr_enable)
2426 e1000g_clear_tx_interrupt(Adapter);
2427
2428 /* Recycle the tx descriptors */
2429 rw_enter(&Adapter->chip_lock, RW_READER);
2430 (void) e1000g_recycle(tx_ring);
2431 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2432 rw_exit(&Adapter->chip_lock);
2433
2434 if (tx_ring->resched_needed &&
2435 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2436 tx_ring->resched_needed = B_FALSE;
2437 mac_tx_update(Adapter->mh);
2438 E1000G_STAT(tx_ring->stat_reschedule);
2439 }
2440 }
2441
2442 /*
2443 * The Receive Sequence errors RXSEQ and the link status change LSC
2444 * are checked to detect that the cable has been pulled out. For
2445 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2446 * are an indication that cable is not connected.
2447 */
2448 if ((icr & E1000_ICR_RXSEQ) ||
2449 (icr & E1000_ICR_LSC) ||
2450 (icr & E1000_ICR_GPI_EN1)) {
2451 boolean_t link_changed;
2452 timeout_id_t tid = 0;
2453
2454 stop_watchdog_timer(Adapter);
2455
2456 rw_enter(&Adapter->chip_lock, RW_WRITER);
2457
2458 /*
2459 * Because we got a link-status-change interrupt, force
2460 * e1000_check_for_link() to look at phy
2461 */
2462 Adapter->shared.mac.get_link_status = B_TRUE;
2463
2464 /* e1000g_link_check takes care of link status change */
2465 link_changed = e1000g_link_check(Adapter);
2466
2467 /* Get new phy state */
2468 e1000g_get_phy_state(Adapter);
2469
2470 /*
2471 * If the link timer has not timed out, we'll not notify
2472 * the upper layer with any link state until the link is up.
2473 */
2474 if (link_changed && !Adapter->link_complete) {
2475 if (Adapter->link_state == LINK_STATE_UP) {
2476 mutex_enter(&Adapter->link_lock);
2477 Adapter->link_complete = B_TRUE;
2478 tid = Adapter->link_tid;
2479 Adapter->link_tid = 0;
2480 mutex_exit(&Adapter->link_lock);
2481 } else {
2482 link_changed = B_FALSE;
2483 }
2484 }
2485 rw_exit(&Adapter->chip_lock);
2486
2487 if (link_changed) {
2488 if (tid != 0)
2489 (void) untimeout(tid);
2490
2491 /*
2492 * Workaround for esb2. Data stuck in fifo on a link
2493 * down event. Stop receiver here and reset in watchdog.
2494 */
2495 if ((Adapter->link_state == LINK_STATE_DOWN) &&
2496 (Adapter->shared.mac.type == e1000_80003es2lan)) {
2497 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2498 E1000_WRITE_REG(hw, E1000_RCTL,
2499 rctl & ~E1000_RCTL_EN);
2500 e1000g_log(Adapter, CE_WARN,
2501 "ESB2 receiver disabled");
2502 Adapter->esb2_workaround = B_TRUE;
2503 }
2504 if (!Adapter->reset_flag)
2505 mac_link_update(Adapter->mh,
2506 Adapter->link_state);
2507 if (Adapter->link_state == LINK_STATE_UP)
2508 Adapter->reset_flag = B_FALSE;
2509 }
2510
2511 start_watchdog_timer(Adapter);
2512 }
2513 }
2514
2515 static void
e1000g_init_unicst(struct e1000g * Adapter)2516 e1000g_init_unicst(struct e1000g *Adapter)
2517 {
2518 struct e1000_hw *hw;
2519 int slot;
2520
2521 hw = &Adapter->shared;
2522
2523 if (Adapter->init_count == 0) {
2524 /* Initialize the multiple unicast addresses */
2525 Adapter->unicst_total = min(hw->mac.rar_entry_count,
2526 MAX_NUM_UNICAST_ADDRESSES);
2527
2528 /*
2529 * The common code does not correctly calculate the number of
2530 * rar's that could be reserved by firmware for the pch_lpt and
2531 * pch_spt macs. The interface has one primary rar, and 11
2532 * additional ones. Those 11 additional ones are not always
2533 * available. According to the datasheet, we need to check a
2534 * few of the bits set in the FWSM register. If the value is
2535 * zero, everything is available. If the value is 1, none of the
2536 * additional registers are available. If the value is 2-7, only
2537 * that number are available.
2538 */
2539 if (hw->mac.type == e1000_pch_lpt ||
2540 hw->mac.type == e1000_pch_spt) {
2541 uint32_t locked, rar;
2542
2543 locked = E1000_READ_REG(hw, E1000_FWSM) &
2544 E1000_FWSM_WLOCK_MAC_MASK;
2545 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2546 rar = 1;
2547 if (locked == 0)
2548 rar += 11;
2549 else if (locked == 1)
2550 rar += 0;
2551 else
2552 rar += locked;
2553 Adapter->unicst_total = min(rar,
2554 MAX_NUM_UNICAST_ADDRESSES);
2555 }
2556
2557 /* Workaround for an erratum of 82571 chipst */
2558 if ((hw->mac.type == e1000_82571) &&
2559 (e1000_get_laa_state_82571(hw) == B_TRUE))
2560 Adapter->unicst_total--;
2561
2562 /* VMware doesn't support multiple mac addresses properly */
2563 if (hw->subsystem_vendor_id == 0x15ad)
2564 Adapter->unicst_total = 1;
2565
2566 Adapter->unicst_avail = Adapter->unicst_total;
2567
2568 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2569 /* Clear both the flag and MAC address */
2570 Adapter->unicst_addr[slot].reg.high = 0;
2571 Adapter->unicst_addr[slot].reg.low = 0;
2572 }
2573 } else {
2574 /* Workaround for an erratum of 82571 chipst */
2575 if ((hw->mac.type == e1000_82571) &&
2576 (e1000_get_laa_state_82571(hw) == B_TRUE))
2577 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2578
2579 /* Re-configure the RAR registers */
2580 for (slot = 0; slot < Adapter->unicst_total; slot++)
2581 if (Adapter->unicst_addr[slot].mac.set == 1)
2582 (void) e1000_rar_set(hw,
2583 Adapter->unicst_addr[slot].mac.addr, slot);
2584 }
2585
2586 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2587 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2588 }
2589
2590 static int
e1000g_unicst_set(struct e1000g * Adapter,const uint8_t * mac_addr,int slot)2591 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2592 int slot)
2593 {
2594 struct e1000_hw *hw;
2595
2596 hw = &Adapter->shared;
2597
2598 /*
2599 * The first revision of Wiseman silicon (rev 2.0) has an errata
2600 * that requires the receiver to be in reset when any of the
2601 * receive address registers (RAR regs) are accessed. The first
2602 * rev of Wiseman silicon also requires MWI to be disabled when
2603 * a global reset or a receive reset is issued. So before we
2604 * initialize the RARs, we check the rev of the Wiseman controller
2605 * and work around any necessary HW errata.
2606 */
2607 if ((hw->mac.type == e1000_82542) &&
2608 (hw->revision_id == E1000_REVISION_2)) {
2609 e1000_pci_clear_mwi(hw);
2610 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2611 msec_delay(5);
2612 }
2613 if (mac_addr == NULL) {
2614 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2615 E1000_WRITE_FLUSH(hw);
2616 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2617 E1000_WRITE_FLUSH(hw);
2618 /* Clear both the flag and MAC address */
2619 Adapter->unicst_addr[slot].reg.high = 0;
2620 Adapter->unicst_addr[slot].reg.low = 0;
2621 } else {
2622 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2623 ETHERADDRL);
2624 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2625 Adapter->unicst_addr[slot].mac.set = 1;
2626 }
2627
2628 /* Workaround for an erratum of 82571 chipst */
2629 if (slot == 0) {
2630 if ((hw->mac.type == e1000_82571) &&
2631 (e1000_get_laa_state_82571(hw) == B_TRUE))
2632 if (mac_addr == NULL) {
2633 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2634 slot << 1, 0);
2635 E1000_WRITE_FLUSH(hw);
2636 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2637 (slot << 1) + 1, 0);
2638 E1000_WRITE_FLUSH(hw);
2639 } else {
2640 (void) e1000_rar_set(hw, (uint8_t *)mac_addr,
2641 LAST_RAR_ENTRY);
2642 }
2643 }
2644
2645 /*
2646 * If we are using Wiseman rev 2.0 silicon, we will have previously
2647 * put the receive in reset, and disabled MWI, to work around some
2648 * HW errata. Now we should take the receiver out of reset, and
2649 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2650 */
2651 if ((hw->mac.type == e1000_82542) &&
2652 (hw->revision_id == E1000_REVISION_2)) {
2653 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2654 msec_delay(1);
2655 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2656 e1000_pci_set_mwi(hw);
2657 e1000g_rx_setup(Adapter);
2658 }
2659
2660 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2661 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2662 return (EIO);
2663 }
2664
2665 return (0);
2666 }
2667
2668 static int
multicst_add(struct e1000g * Adapter,const uint8_t * multiaddr)2669 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2670 {
2671 struct e1000_hw *hw = &Adapter->shared;
2672 struct ether_addr *newtable;
2673 size_t new_len;
2674 size_t old_len;
2675 int res = 0;
2676
2677 if ((multiaddr[0] & 01) == 0) {
2678 res = EINVAL;
2679 e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2680 goto done;
2681 }
2682
2683 if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2684 res = ENOENT;
2685 e1000g_log(Adapter, CE_WARN,
2686 "Adapter requested more than %d mcast addresses",
2687 Adapter->mcast_max_num);
2688 goto done;
2689 }
2690
2691
2692 if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2693 old_len = Adapter->mcast_alloc_count *
2694 sizeof (struct ether_addr);
2695 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2696 sizeof (struct ether_addr);
2697
2698 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2699 if (newtable == NULL) {
2700 res = ENOMEM;
2701 e1000g_log(Adapter, CE_WARN,
2702 "Not enough memory to alloc mcast table");
2703 goto done;
2704 }
2705
2706 if (Adapter->mcast_table != NULL) {
2707 bcopy(Adapter->mcast_table, newtable, old_len);
2708 kmem_free(Adapter->mcast_table, old_len);
2709 }
2710 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2711 Adapter->mcast_table = newtable;
2712 }
2713
2714 bcopy(multiaddr,
2715 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2716 Adapter->mcast_count++;
2717
2718 /*
2719 * Update the MC table in the hardware
2720 */
2721 e1000g_clear_interrupt(Adapter);
2722
2723 e1000_update_mc_addr_list(hw,
2724 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2725
2726 e1000g_mask_interrupt(Adapter);
2727
2728 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2729 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2730 res = EIO;
2731 }
2732
2733 done:
2734 return (res);
2735 }
2736
2737 static int
multicst_remove(struct e1000g * Adapter,const uint8_t * multiaddr)2738 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2739 {
2740 struct e1000_hw *hw = &Adapter->shared;
2741 struct ether_addr *newtable;
2742 size_t new_len;
2743 size_t old_len;
2744 unsigned i;
2745
2746 for (i = 0; i < Adapter->mcast_count; i++) {
2747 if (bcmp(multiaddr, &Adapter->mcast_table[i],
2748 ETHERADDRL) == 0) {
2749 for (i++; i < Adapter->mcast_count; i++) {
2750 Adapter->mcast_table[i - 1] =
2751 Adapter->mcast_table[i];
2752 }
2753 Adapter->mcast_count--;
2754 break;
2755 }
2756 }
2757
2758 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2759 MCAST_ALLOC_SIZE) {
2760 old_len = Adapter->mcast_alloc_count *
2761 sizeof (struct ether_addr);
2762 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2763 sizeof (struct ether_addr);
2764
2765 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2766 if (newtable != NULL) {
2767 bcopy(Adapter->mcast_table, newtable, new_len);
2768 kmem_free(Adapter->mcast_table, old_len);
2769
2770 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2771 Adapter->mcast_table = newtable;
2772 }
2773 }
2774
2775 /*
2776 * Update the MC table in the hardware
2777 */
2778 e1000g_clear_interrupt(Adapter);
2779
2780 e1000_update_mc_addr_list(hw,
2781 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2782
2783 e1000g_mask_interrupt(Adapter);
2784
2785 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2786 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2787 return (EIO);
2788 }
2789
2790 return (0);
2791 }
2792
2793 static void
e1000g_release_multicast(struct e1000g * Adapter)2794 e1000g_release_multicast(struct e1000g *Adapter)
2795 {
2796 if (Adapter->mcast_table != NULL) {
2797 kmem_free(Adapter->mcast_table,
2798 Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2799 Adapter->mcast_table = NULL;
2800 }
2801 }
2802
2803 int
e1000g_m_multicst(void * arg,boolean_t add,const uint8_t * addr)2804 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2805 {
2806 struct e1000g *Adapter = (struct e1000g *)arg;
2807 int result;
2808
2809 rw_enter(&Adapter->chip_lock, RW_WRITER);
2810
2811 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2812 result = ECANCELED;
2813 goto done;
2814 }
2815
2816 result = (add) ? multicst_add(Adapter, addr)
2817 : multicst_remove(Adapter, addr);
2818
2819 done:
2820 rw_exit(&Adapter->chip_lock);
2821 return (result);
2822
2823 }
2824
2825 int
e1000g_m_promisc(void * arg,boolean_t on)2826 e1000g_m_promisc(void *arg, boolean_t on)
2827 {
2828 struct e1000g *Adapter = (struct e1000g *)arg;
2829 uint32_t rctl;
2830
2831 rw_enter(&Adapter->chip_lock, RW_WRITER);
2832
2833 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2834 rw_exit(&Adapter->chip_lock);
2835 return (ECANCELED);
2836 }
2837
2838 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2839
2840 if (on)
2841 rctl |=
2842 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2843 else
2844 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2845
2846 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2847
2848 Adapter->e1000g_promisc = on;
2849
2850 rw_exit(&Adapter->chip_lock);
2851
2852 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2853 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2854 return (EIO);
2855 }
2856
2857 return (0);
2858 }
2859
2860 /*
2861 * Entry points to enable and disable interrupts at the granularity of
2862 * a group.
2863 * Turns the poll_mode for the whole adapter on and off to enable or
2864 * override the ring level polling control over the hardware interrupts.
2865 */
2866 static int
e1000g_rx_group_intr_enable(mac_intr_handle_t arg)2867 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2868 {
2869 struct e1000g *adapter = (struct e1000g *)arg;
2870 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2871
2872 /*
2873 * Later interrupts at the granularity of the this ring will
2874 * invoke mac_rx() with NULL, indicating the need for another
2875 * software classification.
2876 * We have a single ring usable per adapter now, so we only need to
2877 * reset the rx handle for that one.
2878 * When more RX rings can be used, we should update each one of them.
2879 */
2880 mutex_enter(&rx_ring->rx_lock);
2881 rx_ring->mrh = NULL;
2882 adapter->poll_mode = B_FALSE;
2883 mutex_exit(&rx_ring->rx_lock);
2884 return (0);
2885 }
2886
2887 static int
e1000g_rx_group_intr_disable(mac_intr_handle_t arg)2888 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2889 {
2890 struct e1000g *adapter = (struct e1000g *)arg;
2891 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2892
2893 mutex_enter(&rx_ring->rx_lock);
2894
2895 /*
2896 * Later interrupts at the granularity of the this ring will
2897 * invoke mac_rx() with the handle for this ring;
2898 */
2899 adapter->poll_mode = B_TRUE;
2900 rx_ring->mrh = rx_ring->mrh_init;
2901 mutex_exit(&rx_ring->rx_lock);
2902 return (0);
2903 }
2904
2905 /*
2906 * Entry points to enable and disable interrupts at the granularity of
2907 * a ring.
2908 * adapter poll_mode controls whether we actually proceed with hardware
2909 * interrupt toggling.
2910 */
2911 static int
e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)2912 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2913 {
2914 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2915 struct e1000g *adapter = rx_ring->adapter;
2916 struct e1000_hw *hw = &adapter->shared;
2917 uint32_t intr_mask;
2918
2919 rw_enter(&adapter->chip_lock, RW_READER);
2920
2921 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2922 rw_exit(&adapter->chip_lock);
2923 return (0);
2924 }
2925
2926 mutex_enter(&rx_ring->rx_lock);
2927 rx_ring->poll_flag = 0;
2928 mutex_exit(&rx_ring->rx_lock);
2929
2930 /* Rx interrupt enabling for MSI and legacy */
2931 intr_mask = E1000_READ_REG(hw, E1000_IMS);
2932 intr_mask |= E1000_IMS_RXT0;
2933 E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2934 E1000_WRITE_FLUSH(hw);
2935
2936 /* Trigger a Rx interrupt to check Rx ring */
2937 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2938 E1000_WRITE_FLUSH(hw);
2939
2940 rw_exit(&adapter->chip_lock);
2941 return (0);
2942 }
2943
2944 static int
e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)2945 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2946 {
2947 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2948 struct e1000g *adapter = rx_ring->adapter;
2949 struct e1000_hw *hw = &adapter->shared;
2950
2951 rw_enter(&adapter->chip_lock, RW_READER);
2952
2953 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2954 rw_exit(&adapter->chip_lock);
2955 return (0);
2956 }
2957 mutex_enter(&rx_ring->rx_lock);
2958 rx_ring->poll_flag = 1;
2959 mutex_exit(&rx_ring->rx_lock);
2960
2961 /* Rx interrupt disabling for MSI and legacy */
2962 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2963 E1000_WRITE_FLUSH(hw);
2964
2965 rw_exit(&adapter->chip_lock);
2966 return (0);
2967 }
2968
2969 /*
2970 * e1000g_unicst_find - Find the slot for the specified unicast address
2971 */
2972 static int
e1000g_unicst_find(struct e1000g * Adapter,const uint8_t * mac_addr)2973 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2974 {
2975 int slot;
2976
2977 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2978 if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2979 (bcmp(Adapter->unicst_addr[slot].mac.addr,
2980 mac_addr, ETHERADDRL) == 0))
2981 return (slot);
2982 }
2983
2984 return (-1);
2985 }
2986
2987 /*
2988 * Entry points to add and remove a MAC address to a ring group.
2989 * The caller takes care of adding and removing the MAC addresses
2990 * to the filter via these two routines.
2991 */
2992
2993 static int
e1000g_addmac(void * arg,const uint8_t * mac_addr)2994 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2995 {
2996 struct e1000g *Adapter = (struct e1000g *)arg;
2997 int slot, err;
2998
2999 rw_enter(&Adapter->chip_lock, RW_WRITER);
3000
3001 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3002 rw_exit(&Adapter->chip_lock);
3003 return (ECANCELED);
3004 }
3005
3006 if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
3007 /* The same address is already in slot */
3008 rw_exit(&Adapter->chip_lock);
3009 return (0);
3010 }
3011
3012 if (Adapter->unicst_avail == 0) {
3013 /* no slots available */
3014 rw_exit(&Adapter->chip_lock);
3015 return (ENOSPC);
3016 }
3017
3018 /* Search for a free slot */
3019 for (slot = 0; slot < Adapter->unicst_total; slot++) {
3020 if (Adapter->unicst_addr[slot].mac.set == 0)
3021 break;
3022 }
3023 ASSERT(slot < Adapter->unicst_total);
3024
3025 err = e1000g_unicst_set(Adapter, mac_addr, slot);
3026 if (err == 0)
3027 Adapter->unicst_avail--;
3028
3029 rw_exit(&Adapter->chip_lock);
3030
3031 return (err);
3032 }
3033
3034 static int
e1000g_remmac(void * arg,const uint8_t * mac_addr)3035 e1000g_remmac(void *arg, const uint8_t *mac_addr)
3036 {
3037 struct e1000g *Adapter = (struct e1000g *)arg;
3038 int slot, err;
3039
3040 rw_enter(&Adapter->chip_lock, RW_WRITER);
3041
3042 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3043 rw_exit(&Adapter->chip_lock);
3044 return (ECANCELED);
3045 }
3046
3047 slot = e1000g_unicst_find(Adapter, mac_addr);
3048 if (slot == -1) {
3049 rw_exit(&Adapter->chip_lock);
3050 return (EINVAL);
3051 }
3052
3053 ASSERT(Adapter->unicst_addr[slot].mac.set);
3054
3055 /* Clear this slot */
3056 err = e1000g_unicst_set(Adapter, NULL, slot);
3057 if (err == 0)
3058 Adapter->unicst_avail++;
3059
3060 rw_exit(&Adapter->chip_lock);
3061
3062 return (err);
3063 }
3064
3065 static int
e1000g_ring_start(mac_ring_driver_t rh,uint64_t mr_gen_num)3066 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
3067 {
3068 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
3069
3070 mutex_enter(&rx_ring->rx_lock);
3071 rx_ring->ring_gen_num = mr_gen_num;
3072 mutex_exit(&rx_ring->rx_lock);
3073 return (0);
3074 }
3075
3076 /*
3077 * Callback funtion for MAC layer to register all rings.
3078 *
3079 * The hardware supports a single group with currently only one ring
3080 * available.
3081 * Though not offering virtualization ability per se, exposing the
3082 * group/ring still enables the polling and interrupt toggling.
3083 */
3084 /* ARGSUSED */
3085 void
e1000g_fill_ring(void * arg,mac_ring_type_t rtype,const int grp_index,const int ring_index,mac_ring_info_t * infop,mac_ring_handle_t rh)3086 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
3087 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3088 {
3089 struct e1000g *Adapter = (struct e1000g *)arg;
3090 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
3091 mac_intr_t *mintr;
3092
3093 /*
3094 * We advertised only RX group/rings, so the MAC framework shouldn't
3095 * ask for any thing else.
3096 */
3097 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
3098
3099 rx_ring->mrh = rx_ring->mrh_init = rh;
3100 infop->mri_driver = (mac_ring_driver_t)rx_ring;
3101 infop->mri_start = e1000g_ring_start;
3102 infop->mri_stop = NULL;
3103 infop->mri_poll = e1000g_poll_ring;
3104 infop->mri_stat = e1000g_rx_ring_stat;
3105
3106 /* Ring level interrupts */
3107 mintr = &infop->mri_intr;
3108 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
3109 mintr->mi_enable = e1000g_rx_ring_intr_enable;
3110 mintr->mi_disable = e1000g_rx_ring_intr_disable;
3111 if (Adapter->msi_enable)
3112 mintr->mi_ddi_handle = Adapter->htable[0];
3113 }
3114
3115 /* ARGSUSED */
3116 static void
e1000g_fill_group(void * arg,mac_ring_type_t rtype,const int grp_index,mac_group_info_t * infop,mac_group_handle_t gh)3117 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3118 mac_group_info_t *infop, mac_group_handle_t gh)
3119 {
3120 struct e1000g *Adapter = (struct e1000g *)arg;
3121 mac_intr_t *mintr;
3122
3123 /*
3124 * We advertised a single RX ring. Getting a request for anything else
3125 * signifies a bug in the MAC framework.
3126 */
3127 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3128
3129 Adapter->rx_group = gh;
3130
3131 infop->mgi_driver = (mac_group_driver_t)Adapter;
3132 infop->mgi_start = NULL;
3133 infop->mgi_stop = NULL;
3134 infop->mgi_addmac = e1000g_addmac;
3135 infop->mgi_remmac = e1000g_remmac;
3136 infop->mgi_count = 1;
3137
3138 /* Group level interrupts */
3139 mintr = &infop->mgi_intr;
3140 mintr->mi_handle = (mac_intr_handle_t)Adapter;
3141 mintr->mi_enable = e1000g_rx_group_intr_enable;
3142 mintr->mi_disable = e1000g_rx_group_intr_disable;
3143 }
3144
3145 static void
e1000g_led_blink(void * arg)3146 e1000g_led_blink(void *arg)
3147 {
3148 e1000g_t *e1000g = arg;
3149
3150 mutex_enter(&e1000g->e1000g_led_lock);
3151 VERIFY(e1000g->e1000g_emul_blink);
3152 if (e1000g->e1000g_emul_state) {
3153 (void) e1000_led_on(&e1000g->shared);
3154 } else {
3155 (void) e1000_led_off(&e1000g->shared);
3156 }
3157 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state;
3158 mutex_exit(&e1000g->e1000g_led_lock);
3159 }
3160
3161 static int
e1000g_led_set(void * arg,mac_led_mode_t mode,uint_t flags)3162 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
3163 {
3164 e1000g_t *e1000g = arg;
3165
3166 if (flags != 0)
3167 return (EINVAL);
3168
3169 if (mode != MAC_LED_DEFAULT &&
3170 mode != MAC_LED_IDENT &&
3171 mode != MAC_LED_OFF &&
3172 mode != MAC_LED_ON)
3173 return (ENOTSUP);
3174
3175 mutex_enter(&e1000g->e1000g_led_lock);
3176
3177 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF ||
3178 mode == MAC_LED_ON) &&
3179 !e1000g->e1000g_led_setup) {
3180 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) {
3181 mutex_exit(&e1000g->e1000g_led_lock);
3182 return (EIO);
3183 }
3184
3185 e1000g->e1000g_led_setup = B_TRUE;
3186 }
3187
3188 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) {
3189 ddi_periodic_t id = e1000g->e1000g_blink;
3190 e1000g->e1000g_blink = NULL;
3191 mutex_exit(&e1000g->e1000g_led_lock);
3192 ddi_periodic_delete(id);
3193 mutex_enter(&e1000g->e1000g_led_lock);
3194 }
3195
3196 switch (mode) {
3197 case MAC_LED_DEFAULT:
3198 if (e1000g->e1000g_led_setup) {
3199 if (e1000_cleanup_led(&e1000g->shared) !=
3200 E1000_SUCCESS) {
3201 mutex_exit(&e1000g->e1000g_led_lock);
3202 return (EIO);
3203 }
3204 e1000g->e1000g_led_setup = B_FALSE;
3205 }
3206 break;
3207 case MAC_LED_IDENT:
3208 if (e1000g->e1000g_emul_blink) {
3209 if (e1000g->e1000g_blink != NULL)
3210 break;
3211
3212 /*
3213 * Note, we use a 200 ms period here as that's what
3214 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family
3215 * of Gigabit Ethernet Controllers Software Developer's
3216 * Manual) indicates that the optional blink hardware
3217 * operates at.
3218 */
3219 e1000g->e1000g_blink =
3220 ddi_periodic_add(e1000g_led_blink, e1000g,
3221 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0);
3222 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) {
3223 mutex_exit(&e1000g->e1000g_led_lock);
3224 return (EIO);
3225 }
3226 break;
3227 case MAC_LED_OFF:
3228 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) {
3229 mutex_exit(&e1000g->e1000g_led_lock);
3230 return (EIO);
3231 }
3232 break;
3233 case MAC_LED_ON:
3234 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) {
3235 mutex_exit(&e1000g->e1000g_led_lock);
3236 return (EIO);
3237 }
3238 break;
3239 default:
3240 mutex_exit(&e1000g->e1000g_led_lock);
3241 return (ENOTSUP);
3242 }
3243
3244 mutex_exit(&e1000g->e1000g_led_lock);
3245 return (0);
3246
3247 }
3248
3249 static boolean_t
e1000g_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)3250 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3251 {
3252 struct e1000g *Adapter = (struct e1000g *)arg;
3253
3254 switch (cap) {
3255 case MAC_CAPAB_HCKSUM: {
3256 uint32_t *txflags = cap_data;
3257
3258 if (Adapter->tx_hcksum_enable)
3259 *txflags = HCKSUM_IPHDRCKSUM |
3260 HCKSUM_INET_PARTIAL;
3261 else
3262 return (B_FALSE);
3263 break;
3264 }
3265
3266 case MAC_CAPAB_LSO: {
3267 mac_capab_lso_t *cap_lso = cap_data;
3268
3269 if (Adapter->lso_enable) {
3270 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3271 cap_lso->lso_basic_tcp_ipv4.lso_max =
3272 E1000_LSO_MAXLEN;
3273 } else
3274 return (B_FALSE);
3275 break;
3276 }
3277 case MAC_CAPAB_RINGS: {
3278 mac_capab_rings_t *cap_rings = cap_data;
3279
3280 /* No TX rings exposed yet */
3281 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3282 return (B_FALSE);
3283
3284 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3285 cap_rings->mr_rnum = 1;
3286 cap_rings->mr_gnum = 1;
3287 cap_rings->mr_rget = e1000g_fill_ring;
3288 cap_rings->mr_gget = e1000g_fill_group;
3289 break;
3290 }
3291 case MAC_CAPAB_LED: {
3292 mac_capab_led_t *cap_led = cap_data;
3293
3294 cap_led->mcl_flags = 0;
3295 cap_led->mcl_modes = MAC_LED_DEFAULT;
3296 if (Adapter->shared.mac.ops.blink_led != NULL &&
3297 Adapter->shared.mac.ops.blink_led !=
3298 e1000_null_ops_generic) {
3299 cap_led->mcl_modes |= MAC_LED_IDENT;
3300 }
3301
3302 if (Adapter->shared.mac.ops.led_off != NULL &&
3303 Adapter->shared.mac.ops.led_off !=
3304 e1000_null_ops_generic) {
3305 cap_led->mcl_modes |= MAC_LED_OFF;
3306 }
3307
3308 if (Adapter->shared.mac.ops.led_on != NULL &&
3309 Adapter->shared.mac.ops.led_on !=
3310 e1000_null_ops_generic) {
3311 cap_led->mcl_modes |= MAC_LED_ON;
3312 }
3313
3314 /*
3315 * Some hardware doesn't support blinking natively as they're
3316 * missing the optional blink circuit. If they have both off and
3317 * on then we'll emulate it ourselves.
3318 */
3319 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) &&
3320 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) &&
3321 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) {
3322 cap_led->mcl_modes |= MAC_LED_IDENT;
3323 Adapter->e1000g_emul_blink = B_TRUE;
3324 }
3325
3326 cap_led->mcl_set = e1000g_led_set;
3327 break;
3328 }
3329 default:
3330 return (B_FALSE);
3331 }
3332 return (B_TRUE);
3333 }
3334
3335 static boolean_t
e1000g_param_locked(mac_prop_id_t pr_num)3336 e1000g_param_locked(mac_prop_id_t pr_num)
3337 {
3338 /*
3339 * All en_* parameters are locked (read-only) while
3340 * the device is in any sort of loopback mode ...
3341 */
3342 switch (pr_num) {
3343 case MAC_PROP_EN_1000FDX_CAP:
3344 case MAC_PROP_EN_1000HDX_CAP:
3345 case MAC_PROP_EN_100FDX_CAP:
3346 case MAC_PROP_EN_100HDX_CAP:
3347 case MAC_PROP_EN_10FDX_CAP:
3348 case MAC_PROP_EN_10HDX_CAP:
3349 case MAC_PROP_AUTONEG:
3350 case MAC_PROP_FLOWCTRL:
3351 return (B_TRUE);
3352 }
3353 return (B_FALSE);
3354 }
3355
3356 /*
3357 * callback function for set/get of properties
3358 */
3359 static int
e1000g_m_setprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3360 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3361 uint_t pr_valsize, const void *pr_val)
3362 {
3363 struct e1000g *Adapter = arg;
3364 struct e1000_hw *hw = &Adapter->shared;
3365 struct e1000_fc_info *fc = &Adapter->shared.fc;
3366 int err = 0;
3367 link_flowctrl_t flowctrl;
3368 uint32_t cur_mtu, new_mtu;
3369
3370 rw_enter(&Adapter->chip_lock, RW_WRITER);
3371
3372 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3373 rw_exit(&Adapter->chip_lock);
3374 return (ECANCELED);
3375 }
3376
3377 if (Adapter->loopback_mode != E1000G_LB_NONE &&
3378 e1000g_param_locked(pr_num)) {
3379 /*
3380 * All en_* parameters are locked (read-only)
3381 * while the device is in any sort of loopback mode.
3382 */
3383 rw_exit(&Adapter->chip_lock);
3384 return (EBUSY);
3385 }
3386
3387 switch (pr_num) {
3388 case MAC_PROP_EN_1000FDX_CAP:
3389 if (hw->phy.media_type != e1000_media_type_copper) {
3390 err = ENOTSUP;
3391 break;
3392 }
3393 Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3394 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3395 goto reset;
3396 case MAC_PROP_EN_100FDX_CAP:
3397 if (hw->phy.media_type != e1000_media_type_copper) {
3398 err = ENOTSUP;
3399 break;
3400 }
3401 Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3402 Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3403 goto reset;
3404 case MAC_PROP_EN_100HDX_CAP:
3405 if (hw->phy.media_type != e1000_media_type_copper) {
3406 err = ENOTSUP;
3407 break;
3408 }
3409 Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3410 Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3411 goto reset;
3412 case MAC_PROP_EN_10FDX_CAP:
3413 if (hw->phy.media_type != e1000_media_type_copper) {
3414 err = ENOTSUP;
3415 break;
3416 }
3417 Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3418 Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3419 goto reset;
3420 case MAC_PROP_EN_10HDX_CAP:
3421 if (hw->phy.media_type != e1000_media_type_copper) {
3422 err = ENOTSUP;
3423 break;
3424 }
3425 Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3426 Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3427 goto reset;
3428 case MAC_PROP_AUTONEG:
3429 if (hw->phy.media_type != e1000_media_type_copper) {
3430 err = ENOTSUP;
3431 break;
3432 }
3433 Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3434 goto reset;
3435 case MAC_PROP_FLOWCTRL:
3436 fc->send_xon = B_TRUE;
3437 bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3438
3439 switch (flowctrl) {
3440 default:
3441 err = EINVAL;
3442 break;
3443 case LINK_FLOWCTRL_NONE:
3444 fc->requested_mode = e1000_fc_none;
3445 break;
3446 case LINK_FLOWCTRL_RX:
3447 fc->requested_mode = e1000_fc_rx_pause;
3448 break;
3449 case LINK_FLOWCTRL_TX:
3450 fc->requested_mode = e1000_fc_tx_pause;
3451 break;
3452 case LINK_FLOWCTRL_BI:
3453 fc->requested_mode = e1000_fc_full;
3454 break;
3455 }
3456 reset:
3457 if (err == 0) {
3458 /* check PCH limits & reset the link */
3459 e1000g_pch_limits(Adapter);
3460 if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3461 err = EINVAL;
3462 }
3463 break;
3464 case MAC_PROP_ADV_1000FDX_CAP:
3465 case MAC_PROP_ADV_1000HDX_CAP:
3466 case MAC_PROP_ADV_100FDX_CAP:
3467 case MAC_PROP_ADV_100HDX_CAP:
3468 case MAC_PROP_ADV_10FDX_CAP:
3469 case MAC_PROP_ADV_10HDX_CAP:
3470 case MAC_PROP_EN_1000HDX_CAP:
3471 case MAC_PROP_STATUS:
3472 case MAC_PROP_SPEED:
3473 case MAC_PROP_DUPLEX:
3474 err = ENOTSUP; /* read-only prop. Can't set this. */
3475 break;
3476 case MAC_PROP_MTU:
3477 /* adapter must be stopped for an MTU change */
3478 if (Adapter->e1000g_state & E1000G_STARTED) {
3479 err = EBUSY;
3480 break;
3481 }
3482
3483 cur_mtu = Adapter->default_mtu;
3484
3485 /* get new requested MTU */
3486 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3487 if (new_mtu == cur_mtu) {
3488 err = 0;
3489 break;
3490 }
3491
3492 if ((new_mtu < DEFAULT_MTU) ||
3493 (new_mtu > Adapter->max_mtu)) {
3494 err = EINVAL;
3495 break;
3496 }
3497
3498 /* inform MAC framework of new MTU */
3499 err = mac_maxsdu_update(Adapter->mh, new_mtu);
3500
3501 if (err == 0) {
3502 Adapter->default_mtu = new_mtu;
3503 Adapter->max_frame_size =
3504 e1000g_mtu2maxframe(new_mtu);
3505
3506 /*
3507 * check PCH limits & set buffer sizes to
3508 * match new MTU
3509 */
3510 e1000g_pch_limits(Adapter);
3511 e1000g_set_bufsize(Adapter);
3512
3513 /*
3514 * decrease the number of descriptors and free
3515 * packets for jumbo frames to reduce tx/rx
3516 * resource consumption
3517 */
3518 if (Adapter->max_frame_size >=
3519 (FRAME_SIZE_UPTO_4K)) {
3520 if (Adapter->tx_desc_num_flag == 0)
3521 Adapter->tx_desc_num =
3522 DEFAULT_JUMBO_NUM_TX_DESC;
3523
3524 if (Adapter->rx_desc_num_flag == 0)
3525 Adapter->rx_desc_num =
3526 DEFAULT_JUMBO_NUM_RX_DESC;
3527
3528 if (Adapter->tx_buf_num_flag == 0)
3529 Adapter->tx_freelist_num =
3530 DEFAULT_JUMBO_NUM_TX_BUF;
3531
3532 if (Adapter->rx_buf_num_flag == 0)
3533 Adapter->rx_freelist_limit =
3534 DEFAULT_JUMBO_NUM_RX_BUF;
3535 } else {
3536 if (Adapter->tx_desc_num_flag == 0)
3537 Adapter->tx_desc_num =
3538 DEFAULT_NUM_TX_DESCRIPTOR;
3539
3540 if (Adapter->rx_desc_num_flag == 0)
3541 Adapter->rx_desc_num =
3542 DEFAULT_NUM_RX_DESCRIPTOR;
3543
3544 if (Adapter->tx_buf_num_flag == 0)
3545 Adapter->tx_freelist_num =
3546 DEFAULT_NUM_TX_FREELIST;
3547
3548 if (Adapter->rx_buf_num_flag == 0)
3549 Adapter->rx_freelist_limit =
3550 DEFAULT_NUM_RX_FREELIST;
3551 }
3552 }
3553 break;
3554 case MAC_PROP_PRIVATE:
3555 err = e1000g_set_priv_prop(Adapter, pr_name,
3556 pr_valsize, pr_val);
3557 break;
3558 default:
3559 err = ENOTSUP;
3560 break;
3561 }
3562 rw_exit(&Adapter->chip_lock);
3563 return (err);
3564 }
3565
3566 static int
e1000g_m_getprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3567 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3568 uint_t pr_valsize, void *pr_val)
3569 {
3570 struct e1000g *Adapter = arg;
3571 struct e1000_fc_info *fc = &Adapter->shared.fc;
3572 int err = 0;
3573 link_flowctrl_t flowctrl;
3574 uint64_t tmp = 0;
3575
3576 switch (pr_num) {
3577 case MAC_PROP_DUPLEX:
3578 ASSERT(pr_valsize >= sizeof (link_duplex_t));
3579 bcopy(&Adapter->link_duplex, pr_val,
3580 sizeof (link_duplex_t));
3581 break;
3582 case MAC_PROP_SPEED:
3583 ASSERT(pr_valsize >= sizeof (uint64_t));
3584 tmp = Adapter->link_speed * 1000000ull;
3585 bcopy(&tmp, pr_val, sizeof (tmp));
3586 break;
3587 case MAC_PROP_AUTONEG:
3588 *(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3589 break;
3590 case MAC_PROP_FLOWCTRL:
3591 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3592 switch (fc->current_mode) {
3593 case e1000_fc_none:
3594 flowctrl = LINK_FLOWCTRL_NONE;
3595 break;
3596 case e1000_fc_rx_pause:
3597 flowctrl = LINK_FLOWCTRL_RX;
3598 break;
3599 case e1000_fc_tx_pause:
3600 flowctrl = LINK_FLOWCTRL_TX;
3601 break;
3602 case e1000_fc_full:
3603 flowctrl = LINK_FLOWCTRL_BI;
3604 break;
3605 }
3606 bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3607 break;
3608 case MAC_PROP_ADV_1000FDX_CAP:
3609 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3610 break;
3611 case MAC_PROP_EN_1000FDX_CAP:
3612 *(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3613 break;
3614 case MAC_PROP_ADV_1000HDX_CAP:
3615 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3616 break;
3617 case MAC_PROP_EN_1000HDX_CAP:
3618 *(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3619 break;
3620 case MAC_PROP_ADV_100FDX_CAP:
3621 *(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3622 break;
3623 case MAC_PROP_EN_100FDX_CAP:
3624 *(uint8_t *)pr_val = Adapter->param_en_100fdx;
3625 break;
3626 case MAC_PROP_ADV_100HDX_CAP:
3627 *(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3628 break;
3629 case MAC_PROP_EN_100HDX_CAP:
3630 *(uint8_t *)pr_val = Adapter->param_en_100hdx;
3631 break;
3632 case MAC_PROP_ADV_10FDX_CAP:
3633 *(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3634 break;
3635 case MAC_PROP_EN_10FDX_CAP:
3636 *(uint8_t *)pr_val = Adapter->param_en_10fdx;
3637 break;
3638 case MAC_PROP_ADV_10HDX_CAP:
3639 *(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3640 break;
3641 case MAC_PROP_EN_10HDX_CAP:
3642 *(uint8_t *)pr_val = Adapter->param_en_10hdx;
3643 break;
3644 case MAC_PROP_ADV_100T4_CAP:
3645 case MAC_PROP_EN_100T4_CAP:
3646 *(uint8_t *)pr_val = Adapter->param_adv_100t4;
3647 break;
3648 case MAC_PROP_PRIVATE:
3649 err = e1000g_get_priv_prop(Adapter, pr_name,
3650 pr_valsize, pr_val);
3651 break;
3652 default:
3653 err = ENOTSUP;
3654 break;
3655 }
3656
3657 return (err);
3658 }
3659
3660 static void
e1000g_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3661 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3662 mac_prop_info_handle_t prh)
3663 {
3664 struct e1000g *Adapter = arg;
3665 struct e1000_hw *hw = &Adapter->shared;
3666
3667 switch (pr_num) {
3668 case MAC_PROP_DUPLEX:
3669 case MAC_PROP_SPEED:
3670 case MAC_PROP_ADV_1000FDX_CAP:
3671 case MAC_PROP_ADV_1000HDX_CAP:
3672 case MAC_PROP_ADV_100FDX_CAP:
3673 case MAC_PROP_ADV_100HDX_CAP:
3674 case MAC_PROP_ADV_10FDX_CAP:
3675 case MAC_PROP_ADV_10HDX_CAP:
3676 case MAC_PROP_ADV_100T4_CAP:
3677 case MAC_PROP_EN_100T4_CAP:
3678 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3679 break;
3680
3681 case MAC_PROP_EN_1000FDX_CAP:
3682 if (hw->phy.media_type != e1000_media_type_copper) {
3683 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3684 } else {
3685 mac_prop_info_set_default_uint8(prh,
3686 ((Adapter->phy_ext_status &
3687 IEEE_ESR_1000T_FD_CAPS) ||
3688 (Adapter->phy_ext_status &
3689 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3690 }
3691 break;
3692
3693 case MAC_PROP_EN_100FDX_CAP:
3694 if (hw->phy.media_type != e1000_media_type_copper) {
3695 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3696 } else {
3697 mac_prop_info_set_default_uint8(prh,
3698 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3699 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3700 ? 1 : 0);
3701 }
3702 break;
3703
3704 case MAC_PROP_EN_100HDX_CAP:
3705 if (hw->phy.media_type != e1000_media_type_copper) {
3706 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3707 } else {
3708 mac_prop_info_set_default_uint8(prh,
3709 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3710 (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3711 ? 1 : 0);
3712 }
3713 break;
3714
3715 case MAC_PROP_EN_10FDX_CAP:
3716 if (hw->phy.media_type != e1000_media_type_copper) {
3717 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3718 } else {
3719 mac_prop_info_set_default_uint8(prh,
3720 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3721 }
3722 break;
3723
3724 case MAC_PROP_EN_10HDX_CAP:
3725 if (hw->phy.media_type != e1000_media_type_copper) {
3726 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3727 } else {
3728 mac_prop_info_set_default_uint8(prh,
3729 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3730 }
3731 break;
3732
3733 case MAC_PROP_EN_1000HDX_CAP:
3734 if (hw->phy.media_type != e1000_media_type_copper)
3735 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3736 break;
3737
3738 case MAC_PROP_AUTONEG:
3739 if (hw->phy.media_type != e1000_media_type_copper) {
3740 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3741 } else {
3742 mac_prop_info_set_default_uint8(prh,
3743 (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3744 ? 1 : 0);
3745 }
3746 break;
3747
3748 case MAC_PROP_FLOWCTRL:
3749 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3750 break;
3751
3752 case MAC_PROP_MTU: {
3753 struct e1000_mac_info *mac = &Adapter->shared.mac;
3754 struct e1000_phy_info *phy = &Adapter->shared.phy;
3755 uint32_t max;
3756
3757 /* some MAC types do not support jumbo frames */
3758 if ((mac->type == e1000_ich8lan) ||
3759 ((mac->type == e1000_ich9lan) && (phy->type ==
3760 e1000_phy_ife))) {
3761 max = DEFAULT_MTU;
3762 } else {
3763 max = Adapter->max_mtu;
3764 }
3765
3766 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3767 break;
3768 }
3769 case MAC_PROP_PRIVATE: {
3770 char valstr[64];
3771 int value;
3772
3773 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3774 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3775 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3776 return;
3777 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3778 value = DEFAULT_TX_BCOPY_THRESHOLD;
3779 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3780 value = DEFAULT_TX_INTR_ENABLE;
3781 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3782 value = DEFAULT_TX_INTR_DELAY;
3783 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3784 value = DEFAULT_TX_INTR_ABS_DELAY;
3785 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3786 value = DEFAULT_RX_BCOPY_THRESHOLD;
3787 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3788 value = DEFAULT_RX_LIMIT_ON_INTR;
3789 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3790 value = DEFAULT_RX_INTR_DELAY;
3791 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3792 value = DEFAULT_RX_INTR_ABS_DELAY;
3793 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3794 value = DEFAULT_INTR_THROTTLING;
3795 } else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3796 value = 1;
3797 } else {
3798 return;
3799 }
3800
3801 (void) snprintf(valstr, sizeof (valstr), "%d", value);
3802 mac_prop_info_set_default_str(prh, valstr);
3803 break;
3804 }
3805 }
3806 }
3807
3808 /* ARGSUSED2 */
3809 static int
e1000g_set_priv_prop(struct e1000g * Adapter,const char * pr_name,uint_t pr_valsize,const void * pr_val)3810 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3811 uint_t pr_valsize, const void *pr_val)
3812 {
3813 int err = 0;
3814 long result;
3815 struct e1000_hw *hw = &Adapter->shared;
3816
3817 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3818 if (pr_val == NULL) {
3819 err = EINVAL;
3820 return (err);
3821 }
3822 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3823 if (result < MIN_TX_BCOPY_THRESHOLD ||
3824 result > MAX_TX_BCOPY_THRESHOLD)
3825 err = EINVAL;
3826 else {
3827 Adapter->tx_bcopy_thresh = (uint32_t)result;
3828 }
3829 return (err);
3830 }
3831 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3832 if (pr_val == NULL) {
3833 err = EINVAL;
3834 return (err);
3835 }
3836 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3837 if (result < 0 || result > 1)
3838 err = EINVAL;
3839 else {
3840 Adapter->tx_intr_enable = (result == 1) ?
3841 B_TRUE: B_FALSE;
3842 if (Adapter->tx_intr_enable)
3843 e1000g_mask_tx_interrupt(Adapter);
3844 else
3845 e1000g_clear_tx_interrupt(Adapter);
3846 if (e1000g_check_acc_handle(
3847 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3848 ddi_fm_service_impact(Adapter->dip,
3849 DDI_SERVICE_DEGRADED);
3850 err = EIO;
3851 }
3852 }
3853 return (err);
3854 }
3855 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3856 if (pr_val == NULL) {
3857 err = EINVAL;
3858 return (err);
3859 }
3860 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3861 if (result < MIN_TX_INTR_DELAY ||
3862 result > MAX_TX_INTR_DELAY)
3863 err = EINVAL;
3864 else {
3865 Adapter->tx_intr_delay = (uint32_t)result;
3866 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3867 if (e1000g_check_acc_handle(
3868 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3869 ddi_fm_service_impact(Adapter->dip,
3870 DDI_SERVICE_DEGRADED);
3871 err = EIO;
3872 }
3873 }
3874 return (err);
3875 }
3876 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3877 if (pr_val == NULL) {
3878 err = EINVAL;
3879 return (err);
3880 }
3881 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3882 if (result < MIN_TX_INTR_ABS_DELAY ||
3883 result > MAX_TX_INTR_ABS_DELAY)
3884 err = EINVAL;
3885 else {
3886 Adapter->tx_intr_abs_delay = (uint32_t)result;
3887 E1000_WRITE_REG(hw, E1000_TADV,
3888 Adapter->tx_intr_abs_delay);
3889 if (e1000g_check_acc_handle(
3890 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3891 ddi_fm_service_impact(Adapter->dip,
3892 DDI_SERVICE_DEGRADED);
3893 err = EIO;
3894 }
3895 }
3896 return (err);
3897 }
3898 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3899 if (pr_val == NULL) {
3900 err = EINVAL;
3901 return (err);
3902 }
3903 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3904 if (result < MIN_RX_BCOPY_THRESHOLD ||
3905 result > MAX_RX_BCOPY_THRESHOLD)
3906 err = EINVAL;
3907 else
3908 Adapter->rx_bcopy_thresh = (uint32_t)result;
3909 return (err);
3910 }
3911 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3912 if (pr_val == NULL) {
3913 err = EINVAL;
3914 return (err);
3915 }
3916 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3917 if (result < MIN_RX_LIMIT_ON_INTR ||
3918 result > MAX_RX_LIMIT_ON_INTR)
3919 err = EINVAL;
3920 else
3921 Adapter->rx_limit_onintr = (uint32_t)result;
3922 return (err);
3923 }
3924 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3925 if (pr_val == NULL) {
3926 err = EINVAL;
3927 return (err);
3928 }
3929 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3930 if (result < MIN_RX_INTR_DELAY ||
3931 result > MAX_RX_INTR_DELAY)
3932 err = EINVAL;
3933 else {
3934 Adapter->rx_intr_delay = (uint32_t)result;
3935 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3936 if (e1000g_check_acc_handle(
3937 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3938 ddi_fm_service_impact(Adapter->dip,
3939 DDI_SERVICE_DEGRADED);
3940 err = EIO;
3941 }
3942 }
3943 return (err);
3944 }
3945 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3946 if (pr_val == NULL) {
3947 err = EINVAL;
3948 return (err);
3949 }
3950 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3951 if (result < MIN_RX_INTR_ABS_DELAY ||
3952 result > MAX_RX_INTR_ABS_DELAY)
3953 err = EINVAL;
3954 else {
3955 Adapter->rx_intr_abs_delay = (uint32_t)result;
3956 E1000_WRITE_REG(hw, E1000_RADV,
3957 Adapter->rx_intr_abs_delay);
3958 if (e1000g_check_acc_handle(
3959 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3960 ddi_fm_service_impact(Adapter->dip,
3961 DDI_SERVICE_DEGRADED);
3962 err = EIO;
3963 }
3964 }
3965 return (err);
3966 }
3967 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3968 if (pr_val == NULL) {
3969 err = EINVAL;
3970 return (err);
3971 }
3972 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3973 if (result < MIN_INTR_THROTTLING ||
3974 result > MAX_INTR_THROTTLING)
3975 err = EINVAL;
3976 else {
3977 if (hw->mac.type >= e1000_82540) {
3978 Adapter->intr_throttling_rate =
3979 (uint32_t)result;
3980 E1000_WRITE_REG(hw, E1000_ITR,
3981 Adapter->intr_throttling_rate);
3982 if (e1000g_check_acc_handle(
3983 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3984 ddi_fm_service_impact(Adapter->dip,
3985 DDI_SERVICE_DEGRADED);
3986 err = EIO;
3987 }
3988 } else
3989 err = EINVAL;
3990 }
3991 return (err);
3992 }
3993 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3994 if (pr_val == NULL) {
3995 err = EINVAL;
3996 return (err);
3997 }
3998 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3999 if (result < 0 || result > 1)
4000 err = EINVAL;
4001 else {
4002 if (hw->mac.type >= e1000_82540) {
4003 Adapter->intr_adaptive = (result == 1) ?
4004 B_TRUE : B_FALSE;
4005 } else {
4006 err = EINVAL;
4007 }
4008 }
4009 return (err);
4010 }
4011 return (ENOTSUP);
4012 }
4013
4014 static int
e1000g_get_priv_prop(struct e1000g * Adapter,const char * pr_name,uint_t pr_valsize,void * pr_val)4015 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
4016 uint_t pr_valsize, void *pr_val)
4017 {
4018 int err = ENOTSUP;
4019 int value;
4020
4021 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4022 value = Adapter->param_adv_pause;
4023 err = 0;
4024 goto done;
4025 }
4026 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
4027 value = Adapter->param_adv_asym_pause;
4028 err = 0;
4029 goto done;
4030 }
4031 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
4032 value = Adapter->tx_bcopy_thresh;
4033 err = 0;
4034 goto done;
4035 }
4036 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
4037 value = Adapter->tx_intr_enable;
4038 err = 0;
4039 goto done;
4040 }
4041 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
4042 value = Adapter->tx_intr_delay;
4043 err = 0;
4044 goto done;
4045 }
4046 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
4047 value = Adapter->tx_intr_abs_delay;
4048 err = 0;
4049 goto done;
4050 }
4051 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
4052 value = Adapter->rx_bcopy_thresh;
4053 err = 0;
4054 goto done;
4055 }
4056 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
4057 value = Adapter->rx_limit_onintr;
4058 err = 0;
4059 goto done;
4060 }
4061 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
4062 value = Adapter->rx_intr_delay;
4063 err = 0;
4064 goto done;
4065 }
4066 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
4067 value = Adapter->rx_intr_abs_delay;
4068 err = 0;
4069 goto done;
4070 }
4071 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
4072 value = Adapter->intr_throttling_rate;
4073 err = 0;
4074 goto done;
4075 }
4076 if (strcmp(pr_name, "_intr_adaptive") == 0) {
4077 value = Adapter->intr_adaptive;
4078 err = 0;
4079 goto done;
4080 }
4081 done:
4082 if (err == 0) {
4083 (void) snprintf(pr_val, pr_valsize, "%d", value);
4084 }
4085 return (err);
4086 }
4087
4088 /*
4089 * e1000g_get_conf - get configurations set in e1000g.conf
4090 * This routine gets user-configured values out of the configuration
4091 * file e1000g.conf.
4092 *
4093 * For each configurable value, there is a minimum, a maximum, and a
4094 * default.
4095 * If user does not configure a value, use the default.
4096 * If user configures below the minimum, use the minumum.
4097 * If user configures above the maximum, use the maxumum.
4098 */
4099 static void
e1000g_get_conf(struct e1000g * Adapter)4100 e1000g_get_conf(struct e1000g *Adapter)
4101 {
4102 struct e1000_hw *hw = &Adapter->shared;
4103 boolean_t tbi_compatibility = B_FALSE;
4104 boolean_t is_jumbo = B_FALSE;
4105 int propval;
4106 /*
4107 * decrease the number of descriptors and free packets
4108 * for jumbo frames to reduce tx/rx resource consumption
4109 */
4110 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
4111 is_jumbo = B_TRUE;
4112 }
4113
4114 /*
4115 * get each configurable property from e1000g.conf
4116 */
4117
4118 /*
4119 * NumTxDescriptors
4120 */
4121 Adapter->tx_desc_num_flag =
4122 e1000g_get_prop(Adapter, "NumTxDescriptors",
4123 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
4124 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
4125 : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
4126 Adapter->tx_desc_num = propval;
4127
4128 /*
4129 * NumRxDescriptors
4130 */
4131 Adapter->rx_desc_num_flag =
4132 e1000g_get_prop(Adapter, "NumRxDescriptors",
4133 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
4134 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
4135 : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
4136 Adapter->rx_desc_num = propval;
4137
4138 /*
4139 * NumRxFreeList
4140 */
4141 Adapter->rx_buf_num_flag =
4142 e1000g_get_prop(Adapter, "NumRxFreeList",
4143 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
4144 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
4145 : DEFAULT_NUM_RX_FREELIST, &propval);
4146 Adapter->rx_freelist_limit = propval;
4147
4148 /*
4149 * NumTxPacketList
4150 */
4151 Adapter->tx_buf_num_flag =
4152 e1000g_get_prop(Adapter, "NumTxPacketList",
4153 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
4154 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
4155 : DEFAULT_NUM_TX_FREELIST, &propval);
4156 Adapter->tx_freelist_num = propval;
4157
4158 /*
4159 * FlowControl
4160 */
4161 hw->fc.send_xon = B_TRUE;
4162 (void) e1000g_get_prop(Adapter, "FlowControl",
4163 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
4164 hw->fc.requested_mode = propval;
4165 /* 4 is the setting that says "let the eeprom decide" */
4166 if (hw->fc.requested_mode == 4)
4167 hw->fc.requested_mode = e1000_fc_default;
4168
4169 /*
4170 * Max Num Receive Packets on Interrupt
4171 */
4172 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
4173 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
4174 DEFAULT_RX_LIMIT_ON_INTR, &propval);
4175 Adapter->rx_limit_onintr = propval;
4176
4177 /*
4178 * PHY master slave setting
4179 */
4180 (void) e1000g_get_prop(Adapter, "SetMasterSlave",
4181 e1000_ms_hw_default, e1000_ms_auto,
4182 e1000_ms_hw_default, &propval);
4183 hw->phy.ms_type = propval;
4184
4185 /*
4186 * Parameter which controls TBI mode workaround, which is only
4187 * needed on certain switches such as Cisco 6500/Foundry
4188 */
4189 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
4190 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
4191 tbi_compatibility = (propval == 1);
4192 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
4193
4194 /*
4195 * MSI Enable
4196 */
4197 (void) e1000g_get_prop(Adapter, "MSIEnable",
4198 0, 1, DEFAULT_MSI_ENABLE, &propval);
4199 Adapter->msi_enable = (propval == 1);
4200
4201 /*
4202 * Interrupt Throttling Rate
4203 */
4204 (void) e1000g_get_prop(Adapter, "intr_throttling_rate",
4205 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
4206 DEFAULT_INTR_THROTTLING, &propval);
4207 Adapter->intr_throttling_rate = propval;
4208
4209 /*
4210 * Adaptive Interrupt Blanking Enable/Disable
4211 * It is enabled by default
4212 */
4213 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
4214 &propval);
4215 Adapter->intr_adaptive = (propval == 1);
4216
4217 /*
4218 * Hardware checksum enable/disable parameter
4219 */
4220 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
4221 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
4222 Adapter->tx_hcksum_enable = (propval == 1);
4223 /*
4224 * Checksum on/off selection via global parameters.
4225 *
4226 * If the chip is flagged as not capable of (correctly)
4227 * handling checksumming, we don't enable it on either
4228 * Rx or Tx side. Otherwise, we take this chip's settings
4229 * from the patchable global defaults.
4230 *
4231 * We advertise our capabilities only if TX offload is
4232 * enabled. On receive, the stack will accept checksummed
4233 * packets anyway, even if we haven't said we can deliver
4234 * them.
4235 */
4236 switch (hw->mac.type) {
4237 case e1000_82540:
4238 case e1000_82544:
4239 case e1000_82545:
4240 case e1000_82545_rev_3:
4241 case e1000_82546:
4242 case e1000_82546_rev_3:
4243 case e1000_82571:
4244 case e1000_82572:
4245 case e1000_82573:
4246 case e1000_80003es2lan:
4247 break;
4248 /*
4249 * For the following Intel PRO/1000 chipsets, we have not
4250 * tested the hardware checksum offload capability, so we
4251 * disable the capability for them.
4252 * e1000_82542,
4253 * e1000_82543,
4254 * e1000_82541,
4255 * e1000_82541_rev_2,
4256 * e1000_82547,
4257 * e1000_82547_rev_2,
4258 */
4259 default:
4260 Adapter->tx_hcksum_enable = B_FALSE;
4261 }
4262
4263 /*
4264 * Large Send Offloading(LSO) Enable/Disable
4265 * If the tx hardware checksum is not enabled, LSO should be
4266 * disabled.
4267 */
4268 (void) e1000g_get_prop(Adapter, "lso_enable",
4269 0, 1, DEFAULT_LSO_ENABLE, &propval);
4270 Adapter->lso_enable = (propval == 1);
4271
4272 switch (hw->mac.type) {
4273 case e1000_82546:
4274 case e1000_82546_rev_3:
4275 if (Adapter->lso_enable)
4276 Adapter->lso_premature_issue = B_TRUE;
4277 /* FALLTHRU */
4278 case e1000_82571:
4279 case e1000_82572:
4280 case e1000_82573:
4281 case e1000_80003es2lan:
4282 break;
4283 default:
4284 Adapter->lso_enable = B_FALSE;
4285 }
4286
4287 if (!Adapter->tx_hcksum_enable) {
4288 Adapter->lso_premature_issue = B_FALSE;
4289 Adapter->lso_enable = B_FALSE;
4290 }
4291
4292 /*
4293 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4294 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4295 * will not cross 64k boundary.
4296 */
4297 (void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4298 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4299 Adapter->mem_workaround_82546 = (propval == 1);
4300
4301 /*
4302 * Max number of multicast addresses
4303 */
4304 (void) e1000g_get_prop(Adapter, "mcast_max_num",
4305 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4306 &propval);
4307 Adapter->mcast_max_num = propval;
4308 }
4309
4310 /*
4311 * e1000g_get_prop - routine to read properties
4312 *
4313 * Get a user-configure property value out of the configuration
4314 * file e1000g.conf.
4315 *
4316 * Caller provides name of the property, a default value, a minimum
4317 * value, a maximum value and a pointer to the returned property
4318 * value.
4319 *
4320 * Return B_TRUE if the configured value of the property is not a default
4321 * value, otherwise return B_FALSE.
4322 */
4323 static boolean_t
e1000g_get_prop(struct e1000g * Adapter,char * propname,int minval,int maxval,int defval,int * propvalue)4324 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */
4325 char *propname, /* name of the property */
4326 int minval, /* minimum acceptable value */
4327 int maxval, /* maximim acceptable value */
4328 int defval, /* default value */
4329 int *propvalue) /* property value return to caller */
4330 {
4331 int propval; /* value returned for requested property */
4332 int *props; /* point to array of properties returned */
4333 uint_t nprops; /* number of property value returned */
4334 boolean_t ret = B_TRUE;
4335
4336 /*
4337 * get the array of properties from the config file
4338 */
4339 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4340 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4341 /* got some properties, test if we got enough */
4342 if (Adapter->instance < nprops) {
4343 propval = props[Adapter->instance];
4344 } else {
4345 /* not enough properties configured */
4346 propval = defval;
4347 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4348 "Not Enough %s values found in e1000g.conf"
4349 " - set to %d\n",
4350 propname, propval);
4351 ret = B_FALSE;
4352 }
4353
4354 /* free memory allocated for properties */
4355 ddi_prop_free(props);
4356
4357 } else {
4358 propval = defval;
4359 ret = B_FALSE;
4360 }
4361
4362 /*
4363 * enforce limits
4364 */
4365 if (propval > maxval) {
4366 propval = maxval;
4367 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4368 "Too High %s value in e1000g.conf - set to %d\n",
4369 propname, propval);
4370 }
4371
4372 if (propval < minval) {
4373 propval = minval;
4374 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4375 "Too Low %s value in e1000g.conf - set to %d\n",
4376 propname, propval);
4377 }
4378
4379 *propvalue = propval;
4380 return (ret);
4381 }
4382
4383 static boolean_t
e1000g_link_check(struct e1000g * Adapter)4384 e1000g_link_check(struct e1000g *Adapter)
4385 {
4386 uint16_t speed, duplex, phydata;
4387 boolean_t link_changed = B_FALSE;
4388 struct e1000_hw *hw;
4389 uint32_t reg_tarc;
4390
4391 hw = &Adapter->shared;
4392
4393 if (e1000g_link_up(Adapter)) {
4394 /*
4395 * The Link is up, check whether it was marked as down earlier
4396 */
4397 if (Adapter->link_state != LINK_STATE_UP) {
4398 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4399 Adapter->link_speed = speed;
4400 Adapter->link_duplex = duplex;
4401 Adapter->link_state = LINK_STATE_UP;
4402 link_changed = B_TRUE;
4403
4404 if (Adapter->link_speed == SPEED_1000)
4405 Adapter->stall_threshold = TX_STALL_TIME_2S;
4406 else
4407 Adapter->stall_threshold = TX_STALL_TIME_8S;
4408
4409 Adapter->tx_link_down_timeout = 0;
4410
4411 if ((hw->mac.type == e1000_82571) ||
4412 (hw->mac.type == e1000_82572)) {
4413 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4414 if (speed == SPEED_1000)
4415 reg_tarc |= (1 << 21);
4416 else
4417 reg_tarc &= ~(1 << 21);
4418 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4419 }
4420 }
4421 Adapter->smartspeed = 0;
4422 } else {
4423 if (Adapter->link_state != LINK_STATE_DOWN) {
4424 Adapter->link_speed = 0;
4425 Adapter->link_duplex = 0;
4426 Adapter->link_state = LINK_STATE_DOWN;
4427 link_changed = B_TRUE;
4428
4429 /*
4430 * SmartSpeed workaround for Tabor/TanaX, When the
4431 * driver loses link disable auto master/slave
4432 * resolution.
4433 */
4434 if (hw->phy.type == e1000_phy_igp) {
4435 (void) e1000_read_phy_reg(hw,
4436 PHY_1000T_CTRL, &phydata);
4437 phydata |= CR_1000T_MS_ENABLE;
4438 (void) e1000_write_phy_reg(hw,
4439 PHY_1000T_CTRL, phydata);
4440 }
4441 } else {
4442 e1000g_smartspeed(Adapter);
4443 }
4444
4445 if (Adapter->e1000g_state & E1000G_STARTED) {
4446 if (Adapter->tx_link_down_timeout <
4447 MAX_TX_LINK_DOWN_TIMEOUT) {
4448 Adapter->tx_link_down_timeout++;
4449 } else if (Adapter->tx_link_down_timeout ==
4450 MAX_TX_LINK_DOWN_TIMEOUT) {
4451 e1000g_tx_clean(Adapter);
4452 Adapter->tx_link_down_timeout++;
4453 }
4454 }
4455 }
4456
4457 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4458 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4459
4460 return (link_changed);
4461 }
4462
4463 /*
4464 * e1000g_reset_link - Using the link properties to setup the link
4465 */
4466 int
e1000g_reset_link(struct e1000g * Adapter)4467 e1000g_reset_link(struct e1000g *Adapter)
4468 {
4469 struct e1000_mac_info *mac;
4470 struct e1000_phy_info *phy;
4471 struct e1000_hw *hw;
4472 boolean_t invalid;
4473
4474 mac = &Adapter->shared.mac;
4475 phy = &Adapter->shared.phy;
4476 hw = &Adapter->shared;
4477 invalid = B_FALSE;
4478
4479 if (hw->phy.media_type != e1000_media_type_copper)
4480 goto out;
4481
4482 if (Adapter->param_adv_autoneg == 1) {
4483 mac->autoneg = B_TRUE;
4484 phy->autoneg_advertised = 0;
4485
4486 /*
4487 * 1000hdx is not supported for autonegotiation
4488 */
4489 if (Adapter->param_adv_1000fdx == 1)
4490 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4491
4492 if (Adapter->param_adv_100fdx == 1)
4493 phy->autoneg_advertised |= ADVERTISE_100_FULL;
4494
4495 if (Adapter->param_adv_100hdx == 1)
4496 phy->autoneg_advertised |= ADVERTISE_100_HALF;
4497
4498 if (Adapter->param_adv_10fdx == 1)
4499 phy->autoneg_advertised |= ADVERTISE_10_FULL;
4500
4501 if (Adapter->param_adv_10hdx == 1)
4502 phy->autoneg_advertised |= ADVERTISE_10_HALF;
4503
4504 if (phy->autoneg_advertised == 0)
4505 invalid = B_TRUE;
4506 } else {
4507 mac->autoneg = B_FALSE;
4508
4509 /*
4510 * For Intel copper cards, 1000fdx and 1000hdx are not
4511 * supported for forced link
4512 */
4513 if (Adapter->param_adv_100fdx == 1)
4514 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4515 else if (Adapter->param_adv_100hdx == 1)
4516 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4517 else if (Adapter->param_adv_10fdx == 1)
4518 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4519 else if (Adapter->param_adv_10hdx == 1)
4520 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4521 else
4522 invalid = B_TRUE;
4523
4524 }
4525
4526 if (invalid) {
4527 e1000g_log(Adapter, CE_WARN,
4528 "Invalid link settings. Setup link to "
4529 "support autonegotiation with all link capabilities.");
4530 mac->autoneg = B_TRUE;
4531 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4532 }
4533
4534 out:
4535 return (e1000_setup_link(&Adapter->shared));
4536 }
4537
4538 static void
e1000g_timer_tx_resched(struct e1000g * Adapter)4539 e1000g_timer_tx_resched(struct e1000g *Adapter)
4540 {
4541 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4542
4543 rw_enter(&Adapter->chip_lock, RW_READER);
4544
4545 if (tx_ring->resched_needed &&
4546 ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4547 drv_usectohz(1000000)) &&
4548 (Adapter->e1000g_state & E1000G_STARTED) &&
4549 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4550 tx_ring->resched_needed = B_FALSE;
4551 mac_tx_update(Adapter->mh);
4552 E1000G_STAT(tx_ring->stat_reschedule);
4553 E1000G_STAT(tx_ring->stat_timer_reschedule);
4554 }
4555
4556 rw_exit(&Adapter->chip_lock);
4557 }
4558
4559 static void
e1000g_local_timer(void * ws)4560 e1000g_local_timer(void *ws)
4561 {
4562 struct e1000g *Adapter = (struct e1000g *)ws;
4563 struct e1000_hw *hw;
4564 e1000g_ether_addr_t ether_addr;
4565 boolean_t link_changed;
4566
4567 hw = &Adapter->shared;
4568
4569 if (Adapter->e1000g_state & E1000G_ERROR) {
4570 rw_enter(&Adapter->chip_lock, RW_WRITER);
4571 Adapter->e1000g_state &= ~E1000G_ERROR;
4572 rw_exit(&Adapter->chip_lock);
4573
4574 Adapter->reset_count++;
4575 if (e1000g_global_reset(Adapter)) {
4576 ddi_fm_service_impact(Adapter->dip,
4577 DDI_SERVICE_RESTORED);
4578 e1000g_timer_tx_resched(Adapter);
4579 } else
4580 ddi_fm_service_impact(Adapter->dip,
4581 DDI_SERVICE_LOST);
4582 return;
4583 }
4584
4585 if (e1000g_stall_check(Adapter)) {
4586 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4587 "Tx stall detected. Activate automatic recovery.\n");
4588 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4589 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4590 Adapter->reset_count++;
4591 if (e1000g_reset_adapter(Adapter)) {
4592 ddi_fm_service_impact(Adapter->dip,
4593 DDI_SERVICE_RESTORED);
4594 e1000g_timer_tx_resched(Adapter);
4595 }
4596 return;
4597 }
4598
4599 link_changed = B_FALSE;
4600 rw_enter(&Adapter->chip_lock, RW_READER);
4601 if (Adapter->link_complete)
4602 link_changed = e1000g_link_check(Adapter);
4603 rw_exit(&Adapter->chip_lock);
4604
4605 if (link_changed) {
4606 if (!Adapter->reset_flag &&
4607 (Adapter->e1000g_state & E1000G_STARTED) &&
4608 !(Adapter->e1000g_state & E1000G_SUSPENDED))
4609 mac_link_update(Adapter->mh, Adapter->link_state);
4610 if (Adapter->link_state == LINK_STATE_UP)
4611 Adapter->reset_flag = B_FALSE;
4612 }
4613 /*
4614 * Workaround for esb2. Data stuck in fifo on a link
4615 * down event. Reset the adapter to recover it.
4616 */
4617 if (Adapter->esb2_workaround) {
4618 Adapter->esb2_workaround = B_FALSE;
4619 (void) e1000g_reset_adapter(Adapter);
4620 return;
4621 }
4622
4623 /*
4624 * With 82571 controllers, any locally administered address will
4625 * be overwritten when there is a reset on the other port.
4626 * Detect this circumstance and correct it.
4627 */
4628 if ((hw->mac.type == e1000_82571) &&
4629 (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4630 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4631 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4632
4633 ether_addr.reg.low = ntohl(ether_addr.reg.low);
4634 ether_addr.reg.high = ntohl(ether_addr.reg.high);
4635
4636 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4637 (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4638 (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4639 (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4640 (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4641 (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4642 (void) e1000_rar_set(hw, hw->mac.addr, 0);
4643 }
4644 }
4645
4646 /*
4647 * Long TTL workaround for 82541/82547
4648 */
4649 (void) e1000_igp_ttl_workaround_82547(hw);
4650
4651 /*
4652 * Check for Adaptive IFS settings If there are lots of collisions
4653 * change the value in steps...
4654 * These properties should only be set for 10/100
4655 */
4656 if ((hw->phy.media_type == e1000_media_type_copper) &&
4657 ((Adapter->link_speed == SPEED_100) ||
4658 (Adapter->link_speed == SPEED_10))) {
4659 e1000_update_adaptive(hw);
4660 }
4661 /*
4662 * Set Timer Interrupts
4663 */
4664 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4665
4666 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4667 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4668 else
4669 e1000g_timer_tx_resched(Adapter);
4670
4671 restart_watchdog_timer(Adapter);
4672 }
4673
4674 /*
4675 * The function e1000g_link_timer() is called when the timer for link setup
4676 * is expired, which indicates the completion of the link setup. The link
4677 * state will not be updated until the link setup is completed. And the
4678 * link state will not be sent to the upper layer through mac_link_update()
4679 * in this function. It will be updated in the local timer routine or the
4680 * interrupt service routine after the interface is started (plumbed).
4681 */
4682 static void
e1000g_link_timer(void * arg)4683 e1000g_link_timer(void *arg)
4684 {
4685 struct e1000g *Adapter = (struct e1000g *)arg;
4686
4687 mutex_enter(&Adapter->link_lock);
4688 Adapter->link_complete = B_TRUE;
4689 Adapter->link_tid = 0;
4690 mutex_exit(&Adapter->link_lock);
4691 }
4692
4693 /*
4694 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4695 *
4696 * This function read the forced speed and duplex for 10/100 Mbps speeds
4697 * and also for 1000 Mbps speeds from the e1000g.conf file
4698 */
4699 static void
e1000g_force_speed_duplex(struct e1000g * Adapter)4700 e1000g_force_speed_duplex(struct e1000g *Adapter)
4701 {
4702 int forced;
4703 int propval;
4704 struct e1000_mac_info *mac = &Adapter->shared.mac;
4705 struct e1000_phy_info *phy = &Adapter->shared.phy;
4706
4707 /*
4708 * get value out of config file
4709 */
4710 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4711 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4712
4713 switch (forced) {
4714 case GDIAG_10_HALF:
4715 /*
4716 * Disable Auto Negotiation
4717 */
4718 mac->autoneg = B_FALSE;
4719 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4720 break;
4721 case GDIAG_10_FULL:
4722 /*
4723 * Disable Auto Negotiation
4724 */
4725 mac->autoneg = B_FALSE;
4726 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4727 break;
4728 case GDIAG_100_HALF:
4729 /*
4730 * Disable Auto Negotiation
4731 */
4732 mac->autoneg = B_FALSE;
4733 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4734 break;
4735 case GDIAG_100_FULL:
4736 /*
4737 * Disable Auto Negotiation
4738 */
4739 mac->autoneg = B_FALSE;
4740 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4741 break;
4742 case GDIAG_1000_FULL:
4743 /*
4744 * The gigabit spec requires autonegotiation. Therefore,
4745 * when the user wants to force the speed to 1000Mbps, we
4746 * enable AutoNeg, but only allow the harware to advertise
4747 * 1000Mbps. This is different from 10/100 operation, where
4748 * we are allowed to link without any negotiation.
4749 */
4750 mac->autoneg = B_TRUE;
4751 phy->autoneg_advertised = ADVERTISE_1000_FULL;
4752 break;
4753 default: /* obey the setting of AutoNegAdvertised */
4754 mac->autoneg = B_TRUE;
4755 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4756 0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4757 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4758 phy->autoneg_advertised = (uint16_t)propval;
4759 break;
4760 } /* switch */
4761 }
4762
4763 /*
4764 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4765 *
4766 * This function reads MaxFrameSize from e1000g.conf
4767 */
4768 static void
e1000g_get_max_frame_size(struct e1000g * Adapter)4769 e1000g_get_max_frame_size(struct e1000g *Adapter)
4770 {
4771 int max_frame;
4772
4773 /*
4774 * get value out of config file
4775 */
4776 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4777 &max_frame);
4778
4779 switch (max_frame) {
4780 case 0:
4781 Adapter->default_mtu = ETHERMTU;
4782 break;
4783 case 1:
4784 Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4785 sizeof (struct ether_vlan_header) - ETHERFCSL;
4786 break;
4787 case 2:
4788 Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4789 sizeof (struct ether_vlan_header) - ETHERFCSL;
4790 break;
4791 case 3:
4792 Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4793 sizeof (struct ether_vlan_header) - ETHERFCSL;
4794 break;
4795 default:
4796 Adapter->default_mtu = ETHERMTU;
4797 break;
4798 } /* switch */
4799
4800 /*
4801 * If the user configed MTU is larger than the deivce's maximum MTU,
4802 * the MTU is set to the deivce's maximum value.
4803 */
4804 if (Adapter->default_mtu > Adapter->max_mtu)
4805 Adapter->default_mtu = Adapter->max_mtu;
4806
4807 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4808 }
4809
4810 /*
4811 * e1000g_pch_limits - Apply limits of the PCH silicon type
4812 *
4813 * At any frame size larger than the ethernet default,
4814 * prevent linking at 10/100 speeds.
4815 */
4816 static void
e1000g_pch_limits(struct e1000g * Adapter)4817 e1000g_pch_limits(struct e1000g *Adapter)
4818 {
4819 struct e1000_hw *hw = &Adapter->shared;
4820
4821 /* only applies to PCH silicon type */
4822 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4823 return;
4824
4825 /* only applies to frames larger than ethernet default */
4826 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4827 hw->mac.autoneg = B_TRUE;
4828 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4829
4830 Adapter->param_adv_autoneg = 1;
4831 Adapter->param_adv_1000fdx = 1;
4832
4833 Adapter->param_adv_100fdx = 0;
4834 Adapter->param_adv_100hdx = 0;
4835 Adapter->param_adv_10fdx = 0;
4836 Adapter->param_adv_10hdx = 0;
4837
4838 e1000g_param_sync(Adapter);
4839 }
4840 }
4841
4842 /*
4843 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4844 */
4845 static uint32_t
e1000g_mtu2maxframe(uint32_t mtu)4846 e1000g_mtu2maxframe(uint32_t mtu)
4847 {
4848 uint32_t maxframe;
4849
4850 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4851
4852 return (maxframe);
4853 }
4854
4855 static void
arm_watchdog_timer(struct e1000g * Adapter)4856 arm_watchdog_timer(struct e1000g *Adapter)
4857 {
4858 Adapter->watchdog_tid =
4859 timeout(e1000g_local_timer,
4860 (void *)Adapter, 1 * drv_usectohz(1000000));
4861 }
4862 #pragma inline(arm_watchdog_timer)
4863
4864 static void
enable_watchdog_timer(struct e1000g * Adapter)4865 enable_watchdog_timer(struct e1000g *Adapter)
4866 {
4867 mutex_enter(&Adapter->watchdog_lock);
4868
4869 if (!Adapter->watchdog_timer_enabled) {
4870 Adapter->watchdog_timer_enabled = B_TRUE;
4871 Adapter->watchdog_timer_started = B_TRUE;
4872 arm_watchdog_timer(Adapter);
4873 }
4874
4875 mutex_exit(&Adapter->watchdog_lock);
4876 }
4877
4878 static void
disable_watchdog_timer(struct e1000g * Adapter)4879 disable_watchdog_timer(struct e1000g *Adapter)
4880 {
4881 timeout_id_t tid;
4882
4883 mutex_enter(&Adapter->watchdog_lock);
4884
4885 Adapter->watchdog_timer_enabled = B_FALSE;
4886 Adapter->watchdog_timer_started = B_FALSE;
4887 tid = Adapter->watchdog_tid;
4888 Adapter->watchdog_tid = 0;
4889
4890 mutex_exit(&Adapter->watchdog_lock);
4891
4892 if (tid != 0)
4893 (void) untimeout(tid);
4894 }
4895
4896 static void
start_watchdog_timer(struct e1000g * Adapter)4897 start_watchdog_timer(struct e1000g *Adapter)
4898 {
4899 mutex_enter(&Adapter->watchdog_lock);
4900
4901 if (Adapter->watchdog_timer_enabled) {
4902 if (!Adapter->watchdog_timer_started) {
4903 Adapter->watchdog_timer_started = B_TRUE;
4904 arm_watchdog_timer(Adapter);
4905 }
4906 }
4907
4908 mutex_exit(&Adapter->watchdog_lock);
4909 }
4910
4911 static void
restart_watchdog_timer(struct e1000g * Adapter)4912 restart_watchdog_timer(struct e1000g *Adapter)
4913 {
4914 mutex_enter(&Adapter->watchdog_lock);
4915
4916 if (Adapter->watchdog_timer_started)
4917 arm_watchdog_timer(Adapter);
4918
4919 mutex_exit(&Adapter->watchdog_lock);
4920 }
4921
4922 static void
stop_watchdog_timer(struct e1000g * Adapter)4923 stop_watchdog_timer(struct e1000g *Adapter)
4924 {
4925 timeout_id_t tid;
4926
4927 mutex_enter(&Adapter->watchdog_lock);
4928
4929 Adapter->watchdog_timer_started = B_FALSE;
4930 tid = Adapter->watchdog_tid;
4931 Adapter->watchdog_tid = 0;
4932
4933 mutex_exit(&Adapter->watchdog_lock);
4934
4935 if (tid != 0)
4936 (void) untimeout(tid);
4937 }
4938
4939 static void
stop_link_timer(struct e1000g * Adapter)4940 stop_link_timer(struct e1000g *Adapter)
4941 {
4942 timeout_id_t tid;
4943
4944 /* Disable the link timer */
4945 mutex_enter(&Adapter->link_lock);
4946
4947 tid = Adapter->link_tid;
4948 Adapter->link_tid = 0;
4949
4950 mutex_exit(&Adapter->link_lock);
4951
4952 if (tid != 0)
4953 (void) untimeout(tid);
4954 }
4955
4956 static void
stop_82547_timer(e1000g_tx_ring_t * tx_ring)4957 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4958 {
4959 timeout_id_t tid;
4960
4961 /* Disable the tx timer for 82547 chipset */
4962 mutex_enter(&tx_ring->tx_lock);
4963
4964 tx_ring->timer_enable_82547 = B_FALSE;
4965 tid = tx_ring->timer_id_82547;
4966 tx_ring->timer_id_82547 = 0;
4967
4968 mutex_exit(&tx_ring->tx_lock);
4969
4970 if (tid != 0)
4971 (void) untimeout(tid);
4972 }
4973
4974 void
e1000g_clear_interrupt(struct e1000g * Adapter)4975 e1000g_clear_interrupt(struct e1000g *Adapter)
4976 {
4977 E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4978 0xffffffff & ~E1000_IMS_RXSEQ);
4979 }
4980
4981 void
e1000g_mask_interrupt(struct e1000g * Adapter)4982 e1000g_mask_interrupt(struct e1000g *Adapter)
4983 {
4984 E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4985 IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4986
4987 if (Adapter->tx_intr_enable)
4988 e1000g_mask_tx_interrupt(Adapter);
4989 }
4990
4991 /*
4992 * This routine is called by e1000g_quiesce(), therefore must not block.
4993 */
4994 void
e1000g_clear_all_interrupts(struct e1000g * Adapter)4995 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4996 {
4997 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4998 }
4999
5000 void
e1000g_mask_tx_interrupt(struct e1000g * Adapter)5001 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
5002 {
5003 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
5004 }
5005
5006 void
e1000g_clear_tx_interrupt(struct e1000g * Adapter)5007 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
5008 {
5009 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
5010 }
5011
5012 static void
e1000g_smartspeed(struct e1000g * Adapter)5013 e1000g_smartspeed(struct e1000g *Adapter)
5014 {
5015 struct e1000_hw *hw = &Adapter->shared;
5016 uint16_t phy_status;
5017 uint16_t phy_ctrl;
5018
5019 /*
5020 * If we're not T-or-T, or we're not autoneg'ing, or we're not
5021 * advertising 1000Full, we don't even use the workaround
5022 */
5023 if ((hw->phy.type != e1000_phy_igp) ||
5024 !hw->mac.autoneg ||
5025 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
5026 return;
5027
5028 /*
5029 * True if this is the first call of this function or after every
5030 * 30 seconds of not having link
5031 */
5032 if (Adapter->smartspeed == 0) {
5033 /*
5034 * If Master/Slave config fault is asserted twice, we
5035 * assume back-to-back
5036 */
5037 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5038 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5039 return;
5040
5041 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5042 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5043 return;
5044 /*
5045 * We're assuming back-2-back because our status register
5046 * insists! there's a fault in the master/slave
5047 * relationship that was "negotiated"
5048 */
5049 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5050 /*
5051 * Is the phy configured for manual configuration of
5052 * master/slave?
5053 */
5054 if (phy_ctrl & CR_1000T_MS_ENABLE) {
5055 /*
5056 * Yes. Then disable manual configuration (enable
5057 * auto configuration) of master/slave
5058 */
5059 phy_ctrl &= ~CR_1000T_MS_ENABLE;
5060 (void) e1000_write_phy_reg(hw,
5061 PHY_1000T_CTRL, phy_ctrl);
5062 /*
5063 * Effectively starting the clock
5064 */
5065 Adapter->smartspeed++;
5066 /*
5067 * Restart autonegotiation
5068 */
5069 if (!e1000_phy_setup_autoneg(hw) &&
5070 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5071 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
5072 MII_CR_RESTART_AUTO_NEG);
5073 (void) e1000_write_phy_reg(hw,
5074 PHY_CONTROL, phy_ctrl);
5075 }
5076 }
5077 return;
5078 /*
5079 * Has 6 seconds transpired still without link? Remember,
5080 * you should reset the smartspeed counter once you obtain
5081 * link
5082 */
5083 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
5084 /*
5085 * Yes. Remember, we did at the start determine that
5086 * there's a master/slave configuration fault, so we're
5087 * still assuming there's someone on the other end, but we
5088 * just haven't yet been able to talk to it. We then
5089 * re-enable auto configuration of master/slave to see if
5090 * we're running 2/3 pair cables.
5091 */
5092 /*
5093 * If still no link, perhaps using 2/3 pair cable
5094 */
5095 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5096 phy_ctrl |= CR_1000T_MS_ENABLE;
5097 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
5098 /*
5099 * Restart autoneg with phy enabled for manual
5100 * configuration of master/slave
5101 */
5102 if (!e1000_phy_setup_autoneg(hw) &&
5103 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5104 phy_ctrl |=
5105 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
5106 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
5107 }
5108 /*
5109 * Hopefully, there are no more faults and we've obtained
5110 * link as a result.
5111 */
5112 }
5113 /*
5114 * Restart process after E1000_SMARTSPEED_MAX iterations (30
5115 * seconds)
5116 */
5117 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
5118 Adapter->smartspeed = 0;
5119 }
5120
5121 static boolean_t
is_valid_mac_addr(uint8_t * mac_addr)5122 is_valid_mac_addr(uint8_t *mac_addr)
5123 {
5124 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
5125 const uint8_t addr_test2[6] =
5126 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5127
5128 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
5129 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
5130 return (B_FALSE);
5131
5132 return (B_TRUE);
5133 }
5134
5135 /*
5136 * e1000g_stall_check - check for tx stall
5137 *
5138 * This function checks if the adapter is stalled (in transmit).
5139 *
5140 * It is called each time the watchdog timeout is invoked.
5141 * If the transmit descriptor reclaim continuously fails,
5142 * the watchdog value will increment by 1. If the watchdog
5143 * value exceeds the threshold, the adapter is assumed to
5144 * have stalled and need to be reset.
5145 */
5146 static boolean_t
e1000g_stall_check(struct e1000g * Adapter)5147 e1000g_stall_check(struct e1000g *Adapter)
5148 {
5149 e1000g_tx_ring_t *tx_ring;
5150
5151 tx_ring = Adapter->tx_ring;
5152
5153 if (Adapter->link_state != LINK_STATE_UP)
5154 return (B_FALSE);
5155
5156 (void) e1000g_recycle(tx_ring);
5157
5158 if (Adapter->stall_flag)
5159 return (B_TRUE);
5160
5161 return (B_FALSE);
5162 }
5163
5164 #ifdef E1000G_DEBUG
5165 static enum ioc_reply
e1000g_pp_ioctl(struct e1000g * e1000gp,struct iocblk * iocp,mblk_t * mp)5166 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
5167 {
5168 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
5169 e1000g_peekpoke_t *ppd;
5170 uint64_t mem_va;
5171 uint64_t maxoff;
5172 boolean_t peek;
5173
5174 switch (iocp->ioc_cmd) {
5175
5176 case E1000G_IOC_REG_PEEK:
5177 peek = B_TRUE;
5178 break;
5179
5180 case E1000G_IOC_REG_POKE:
5181 peek = B_FALSE;
5182 break;
5183
5184 deault:
5185 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5186 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
5187 iocp->ioc_cmd);
5188 return (IOC_INVAL);
5189 }
5190
5191 /*
5192 * Validate format of ioctl
5193 */
5194 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
5195 return (IOC_INVAL);
5196 if (mp->b_cont == NULL)
5197 return (IOC_INVAL);
5198
5199 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
5200
5201 /*
5202 * Validate request parameters
5203 */
5204 switch (ppd->pp_acc_space) {
5205
5206 default:
5207 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5208 "e1000g_diag_ioctl: invalid access space 0x%X\n",
5209 ppd->pp_acc_space);
5210 return (IOC_INVAL);
5211
5212 case E1000G_PP_SPACE_REG:
5213 /*
5214 * Memory-mapped I/O space
5215 */
5216 ASSERT(ppd->pp_acc_size == 4);
5217 if (ppd->pp_acc_size != 4)
5218 return (IOC_INVAL);
5219
5220 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5221 return (IOC_INVAL);
5222
5223 mem_va = 0;
5224 maxoff = 0x10000;
5225 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
5226 break;
5227
5228 case E1000G_PP_SPACE_E1000G:
5229 /*
5230 * E1000g data structure!
5231 */
5232 mem_va = (uintptr_t)e1000gp;
5233 maxoff = sizeof (struct e1000g);
5234 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
5235 break;
5236
5237 }
5238
5239 if (ppd->pp_acc_offset >= maxoff)
5240 return (IOC_INVAL);
5241
5242 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
5243 return (IOC_INVAL);
5244
5245 /*
5246 * All OK - go!
5247 */
5248 ppd->pp_acc_offset += mem_va;
5249 (*ppfn)(e1000gp, ppd);
5250 return (peek ? IOC_REPLY : IOC_ACK);
5251 }
5252
5253 static void
e1000g_ioc_peek_reg(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5254 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5255 {
5256 ddi_acc_handle_t handle;
5257 uint32_t *regaddr;
5258
5259 handle = e1000gp->osdep.reg_handle;
5260 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5261 (uintptr_t)ppd->pp_acc_offset);
5262
5263 ppd->pp_acc_data = ddi_get32(handle, regaddr);
5264 }
5265
5266 static void
e1000g_ioc_poke_reg(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5267 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5268 {
5269 ddi_acc_handle_t handle;
5270 uint32_t *regaddr;
5271 uint32_t value;
5272
5273 handle = e1000gp->osdep.reg_handle;
5274 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5275 (uintptr_t)ppd->pp_acc_offset);
5276 value = (uint32_t)ppd->pp_acc_data;
5277
5278 ddi_put32(handle, regaddr, value);
5279 }
5280
5281 static void
e1000g_ioc_peek_mem(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5282 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5283 {
5284 uint64_t value;
5285 void *vaddr;
5286
5287 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5288
5289 switch (ppd->pp_acc_size) {
5290 case 1:
5291 value = *(uint8_t *)vaddr;
5292 break;
5293
5294 case 2:
5295 value = *(uint16_t *)vaddr;
5296 break;
5297
5298 case 4:
5299 value = *(uint32_t *)vaddr;
5300 break;
5301
5302 case 8:
5303 value = *(uint64_t *)vaddr;
5304 break;
5305 }
5306
5307 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5308 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5309 (void *)e1000gp, (void *)ppd, value, vaddr);
5310
5311 ppd->pp_acc_data = value;
5312 }
5313
5314 static void
e1000g_ioc_poke_mem(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5315 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5316 {
5317 uint64_t value;
5318 void *vaddr;
5319
5320 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5321 value = ppd->pp_acc_data;
5322
5323 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5324 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5325 (void *)e1000gp, (void *)ppd, value, vaddr);
5326
5327 switch (ppd->pp_acc_size) {
5328 case 1:
5329 *(uint8_t *)vaddr = (uint8_t)value;
5330 break;
5331
5332 case 2:
5333 *(uint16_t *)vaddr = (uint16_t)value;
5334 break;
5335
5336 case 4:
5337 *(uint32_t *)vaddr = (uint32_t)value;
5338 break;
5339
5340 case 8:
5341 *(uint64_t *)vaddr = (uint64_t)value;
5342 break;
5343 }
5344 }
5345 #endif
5346
5347 /*
5348 * Loopback Support
5349 */
5350 static lb_property_t lb_normal =
5351 { normal, "normal", E1000G_LB_NONE };
5352 static lb_property_t lb_external1000 =
5353 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 };
5354 static lb_property_t lb_external100 =
5355 { external, "100Mbps", E1000G_LB_EXTERNAL_100 };
5356 static lb_property_t lb_external10 =
5357 { external, "10Mbps", E1000G_LB_EXTERNAL_10 };
5358 static lb_property_t lb_phy =
5359 { internal, "PHY", E1000G_LB_INTERNAL_PHY };
5360
5361 static enum ioc_reply
e1000g_loopback_ioctl(struct e1000g * Adapter,struct iocblk * iocp,mblk_t * mp)5362 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5363 {
5364 lb_info_sz_t *lbsp;
5365 lb_property_t *lbpp;
5366 struct e1000_hw *hw;
5367 uint32_t *lbmp;
5368 uint32_t size;
5369 uint32_t value;
5370
5371 hw = &Adapter->shared;
5372
5373 if (mp->b_cont == NULL)
5374 return (IOC_INVAL);
5375
5376 if (!e1000g_check_loopback_support(hw)) {
5377 e1000g_log(NULL, CE_WARN,
5378 "Loopback is not supported on e1000g%d", Adapter->instance);
5379 return (IOC_INVAL);
5380 }
5381
5382 switch (iocp->ioc_cmd) {
5383 default:
5384 return (IOC_INVAL);
5385
5386 case LB_GET_INFO_SIZE:
5387 size = sizeof (lb_info_sz_t);
5388 if (iocp->ioc_count != size)
5389 return (IOC_INVAL);
5390
5391 rw_enter(&Adapter->chip_lock, RW_WRITER);
5392 e1000g_get_phy_state(Adapter);
5393
5394 /*
5395 * Workaround for hardware faults. In order to get a stable
5396 * state of phy, we will wait for a specific interval and
5397 * try again. The time delay is an experiential value based
5398 * on our testing.
5399 */
5400 msec_delay(100);
5401 e1000g_get_phy_state(Adapter);
5402 rw_exit(&Adapter->chip_lock);
5403
5404 value = sizeof (lb_normal);
5405 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5406 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5407 (hw->phy.media_type == e1000_media_type_fiber) ||
5408 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5409 value += sizeof (lb_phy);
5410 switch (hw->mac.type) {
5411 case e1000_82571:
5412 case e1000_82572:
5413 case e1000_80003es2lan:
5414 value += sizeof (lb_external1000);
5415 break;
5416 }
5417 }
5418 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5419 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5420 value += sizeof (lb_external100);
5421 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5422 value += sizeof (lb_external10);
5423
5424 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5425 *lbsp = value;
5426 break;
5427
5428 case LB_GET_INFO:
5429 value = sizeof (lb_normal);
5430 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5431 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5432 (hw->phy.media_type == e1000_media_type_fiber) ||
5433 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5434 value += sizeof (lb_phy);
5435 switch (hw->mac.type) {
5436 case e1000_82571:
5437 case e1000_82572:
5438 case e1000_80003es2lan:
5439 value += sizeof (lb_external1000);
5440 break;
5441 }
5442 }
5443 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5444 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5445 value += sizeof (lb_external100);
5446 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5447 value += sizeof (lb_external10);
5448
5449 size = value;
5450 if (iocp->ioc_count != size)
5451 return (IOC_INVAL);
5452
5453 value = 0;
5454 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5455 lbpp[value++] = lb_normal;
5456 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5457 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5458 (hw->phy.media_type == e1000_media_type_fiber) ||
5459 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5460 lbpp[value++] = lb_phy;
5461 switch (hw->mac.type) {
5462 case e1000_82571:
5463 case e1000_82572:
5464 case e1000_80003es2lan:
5465 lbpp[value++] = lb_external1000;
5466 break;
5467 }
5468 }
5469 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5470 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5471 lbpp[value++] = lb_external100;
5472 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5473 lbpp[value++] = lb_external10;
5474 break;
5475
5476 case LB_GET_MODE:
5477 size = sizeof (uint32_t);
5478 if (iocp->ioc_count != size)
5479 return (IOC_INVAL);
5480
5481 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5482 *lbmp = Adapter->loopback_mode;
5483 break;
5484
5485 case LB_SET_MODE:
5486 size = 0;
5487 if (iocp->ioc_count != sizeof (uint32_t))
5488 return (IOC_INVAL);
5489
5490 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5491 if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5492 return (IOC_INVAL);
5493 break;
5494 }
5495
5496 iocp->ioc_count = size;
5497 iocp->ioc_error = 0;
5498
5499 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5500 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5501 return (IOC_INVAL);
5502 }
5503
5504 return (IOC_REPLY);
5505 }
5506
5507 static boolean_t
e1000g_check_loopback_support(struct e1000_hw * hw)5508 e1000g_check_loopback_support(struct e1000_hw *hw)
5509 {
5510 switch (hw->mac.type) {
5511 case e1000_82540:
5512 case e1000_82545:
5513 case e1000_82545_rev_3:
5514 case e1000_82546:
5515 case e1000_82546_rev_3:
5516 case e1000_82541:
5517 case e1000_82541_rev_2:
5518 case e1000_82547:
5519 case e1000_82547_rev_2:
5520 case e1000_82571:
5521 case e1000_82572:
5522 case e1000_82573:
5523 case e1000_82574:
5524 case e1000_80003es2lan:
5525 case e1000_ich9lan:
5526 case e1000_ich10lan:
5527 return (B_TRUE);
5528 }
5529 return (B_FALSE);
5530 }
5531
5532 static boolean_t
e1000g_set_loopback_mode(struct e1000g * Adapter,uint32_t mode)5533 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5534 {
5535 struct e1000_hw *hw;
5536 int i, times;
5537 boolean_t link_up;
5538
5539 if (mode == Adapter->loopback_mode)
5540 return (B_TRUE);
5541
5542 hw = &Adapter->shared;
5543 times = 0;
5544
5545 Adapter->loopback_mode = mode;
5546
5547 if (mode == E1000G_LB_NONE) {
5548 /* Reset the chip */
5549 hw->phy.autoneg_wait_to_complete = B_TRUE;
5550 (void) e1000g_reset_adapter(Adapter);
5551 hw->phy.autoneg_wait_to_complete = B_FALSE;
5552 return (B_TRUE);
5553 }
5554
5555 again:
5556
5557 rw_enter(&Adapter->chip_lock, RW_WRITER);
5558
5559 switch (mode) {
5560 default:
5561 rw_exit(&Adapter->chip_lock);
5562 return (B_FALSE);
5563
5564 case E1000G_LB_EXTERNAL_1000:
5565 e1000g_set_external_loopback_1000(Adapter);
5566 break;
5567
5568 case E1000G_LB_EXTERNAL_100:
5569 e1000g_set_external_loopback_100(Adapter);
5570 break;
5571
5572 case E1000G_LB_EXTERNAL_10:
5573 e1000g_set_external_loopback_10(Adapter);
5574 break;
5575
5576 case E1000G_LB_INTERNAL_PHY:
5577 e1000g_set_internal_loopback(Adapter);
5578 break;
5579 }
5580
5581 times++;
5582
5583 rw_exit(&Adapter->chip_lock);
5584
5585 /* Wait for link up */
5586 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5587 msec_delay(100);
5588
5589 rw_enter(&Adapter->chip_lock, RW_WRITER);
5590
5591 link_up = e1000g_link_up(Adapter);
5592
5593 rw_exit(&Adapter->chip_lock);
5594
5595 if (!link_up) {
5596 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5597 "Failed to get the link up");
5598 if (times < 2) {
5599 /* Reset the link */
5600 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5601 "Reset the link ...");
5602 (void) e1000g_reset_adapter(Adapter);
5603 goto again;
5604 }
5605
5606 /*
5607 * Reset driver to loopback none when set loopback failed
5608 * for the second time.
5609 */
5610 Adapter->loopback_mode = E1000G_LB_NONE;
5611
5612 /* Reset the chip */
5613 hw->phy.autoneg_wait_to_complete = B_TRUE;
5614 (void) e1000g_reset_adapter(Adapter);
5615 hw->phy.autoneg_wait_to_complete = B_FALSE;
5616
5617 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5618 "Set loopback mode failed, reset to loopback none");
5619
5620 return (B_FALSE);
5621 }
5622
5623 return (B_TRUE);
5624 }
5625
5626 /*
5627 * The following loopback settings are from Intel's technical
5628 * document - "How To Loopback". All the register settings and
5629 * time delay values are directly inherited from the document
5630 * without more explanations available.
5631 */
5632 static void
e1000g_set_internal_loopback(struct e1000g * Adapter)5633 e1000g_set_internal_loopback(struct e1000g *Adapter)
5634 {
5635 struct e1000_hw *hw;
5636 uint32_t ctrl;
5637 uint32_t status;
5638 uint16_t phy_ctrl;
5639 uint16_t phy_reg;
5640 uint32_t txcw;
5641
5642 hw = &Adapter->shared;
5643
5644 /* Disable Smart Power Down */
5645 phy_spd_state(hw, B_FALSE);
5646
5647 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5648 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5649 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5650
5651 switch (hw->mac.type) {
5652 case e1000_82540:
5653 case e1000_82545:
5654 case e1000_82545_rev_3:
5655 case e1000_82546:
5656 case e1000_82546_rev_3:
5657 case e1000_82573:
5658 /* Auto-MDI/MDIX off */
5659 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5660 /* Reset PHY to update Auto-MDI/MDIX */
5661 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5662 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5663 /* Reset PHY to auto-neg off and force 1000 */
5664 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5665 phy_ctrl | MII_CR_RESET);
5666 /*
5667 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5668 * See comments above e1000g_set_internal_loopback() for the
5669 * background.
5670 */
5671 (void) e1000_write_phy_reg(hw, 29, 0x001F);
5672 (void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5673 (void) e1000_write_phy_reg(hw, 29, 0x001A);
5674 (void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5675 break;
5676 case e1000_80003es2lan:
5677 /* Force Link Up */
5678 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5679 0x1CC);
5680 /* Sets PCS loopback at 1Gbs */
5681 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5682 0x1046);
5683 break;
5684 }
5685
5686 /*
5687 * The following registers should be set for e1000_phy_bm phy type.
5688 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5689 * For others, we do not need to set these registers.
5690 */
5691 if (hw->phy.type == e1000_phy_bm) {
5692 /* Set Default MAC Interface speed to 1GB */
5693 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5694 phy_reg &= ~0x0007;
5695 phy_reg |= 0x006;
5696 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5697 /* Assert SW reset for above settings to take effect */
5698 (void) e1000_phy_commit(hw);
5699 msec_delay(1);
5700 /* Force Full Duplex */
5701 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5702 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5703 phy_reg | 0x000C);
5704 /* Set Link Up (in force link) */
5705 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5706 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5707 phy_reg | 0x0040);
5708 /* Force Link */
5709 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5710 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5711 phy_reg | 0x0040);
5712 /* Set Early Link Enable */
5713 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5714 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5715 phy_reg | 0x0400);
5716 }
5717
5718 /* Set loopback */
5719 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5720
5721 msec_delay(250);
5722
5723 /* Now set up the MAC to the same speed/duplex as the PHY. */
5724 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5725 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5726 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5727 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5728 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
5729 E1000_CTRL_FD); /* Force Duplex to FULL */
5730
5731 switch (hw->mac.type) {
5732 case e1000_82540:
5733 case e1000_82545:
5734 case e1000_82545_rev_3:
5735 case e1000_82546:
5736 case e1000_82546_rev_3:
5737 /*
5738 * For some serdes we'll need to commit the writes now
5739 * so that the status is updated on link
5740 */
5741 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5742 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5743 msec_delay(100);
5744 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5745 }
5746
5747 if (hw->phy.media_type == e1000_media_type_copper) {
5748 /* Invert Loss of Signal */
5749 ctrl |= E1000_CTRL_ILOS;
5750 } else {
5751 /* Set ILOS on fiber nic if half duplex is detected */
5752 status = E1000_READ_REG(hw, E1000_STATUS);
5753 if ((status & E1000_STATUS_FD) == 0)
5754 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5755 }
5756 break;
5757
5758 case e1000_82571:
5759 case e1000_82572:
5760 /*
5761 * The fiber/SerDes versions of this adapter do not contain an
5762 * accessible PHY. Therefore, loopback beyond MAC must be done
5763 * using SerDes analog loopback.
5764 */
5765 if (hw->phy.media_type != e1000_media_type_copper) {
5766 /* Disable autoneg by setting bit 31 of TXCW to zero */
5767 txcw = E1000_READ_REG(hw, E1000_TXCW);
5768 txcw &= ~((uint32_t)1 << 31);
5769 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5770
5771 /*
5772 * Write 0x410 to Serdes Control register
5773 * to enable Serdes analog loopback
5774 */
5775 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5776 msec_delay(10);
5777 }
5778
5779 status = E1000_READ_REG(hw, E1000_STATUS);
5780 /* Set ILOS on fiber nic if half duplex is detected */
5781 if ((hw->phy.media_type == e1000_media_type_fiber) &&
5782 ((status & E1000_STATUS_FD) == 0 ||
5783 (status & E1000_STATUS_LU) == 0))
5784 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5785 else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5786 ctrl |= E1000_CTRL_SLU;
5787 break;
5788
5789 case e1000_82573:
5790 ctrl |= E1000_CTRL_ILOS;
5791 break;
5792 case e1000_ich9lan:
5793 case e1000_ich10lan:
5794 ctrl |= E1000_CTRL_SLU;
5795 break;
5796 }
5797 if (hw->phy.type == e1000_phy_bm)
5798 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5799
5800 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5801 }
5802
5803 static void
e1000g_set_external_loopback_1000(struct e1000g * Adapter)5804 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5805 {
5806 struct e1000_hw *hw;
5807 uint32_t rctl;
5808 uint32_t ctrl_ext;
5809 uint32_t ctrl;
5810 uint32_t status;
5811 uint32_t txcw;
5812 uint16_t phydata;
5813
5814 hw = &Adapter->shared;
5815
5816 /* Disable Smart Power Down */
5817 phy_spd_state(hw, B_FALSE);
5818
5819 switch (hw->mac.type) {
5820 case e1000_82571:
5821 case e1000_82572:
5822 switch (hw->phy.media_type) {
5823 case e1000_media_type_copper:
5824 /* Force link up (Must be done before the PHY writes) */
5825 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5826 ctrl |= E1000_CTRL_SLU; /* Force Link Up */
5827 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5828
5829 rctl = E1000_READ_REG(hw, E1000_RCTL);
5830 rctl |= (E1000_RCTL_EN |
5831 E1000_RCTL_SBP |
5832 E1000_RCTL_UPE |
5833 E1000_RCTL_MPE |
5834 E1000_RCTL_LPE |
5835 E1000_RCTL_BAM); /* 0x803E */
5836 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5837
5838 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5839 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5840 E1000_CTRL_EXT_SDP6_DATA |
5841 E1000_CTRL_EXT_SDP3_DATA |
5842 E1000_CTRL_EXT_SDP4_DIR |
5843 E1000_CTRL_EXT_SDP6_DIR |
5844 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */
5845 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5846
5847 /*
5848 * This sequence tunes the PHY's SDP and no customer
5849 * settable values. For background, see comments above
5850 * e1000g_set_internal_loopback().
5851 */
5852 (void) e1000_write_phy_reg(hw, 0x0, 0x140);
5853 msec_delay(10);
5854 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5855 (void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5856 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5857 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5858 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5859 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5860
5861 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5862 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5863 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5864 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5865 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5866
5867 msec_delay(50);
5868 break;
5869 case e1000_media_type_fiber:
5870 case e1000_media_type_internal_serdes:
5871 status = E1000_READ_REG(hw, E1000_STATUS);
5872 if (((status & E1000_STATUS_LU) == 0) ||
5873 (hw->phy.media_type ==
5874 e1000_media_type_internal_serdes)) {
5875 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5876 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5877 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5878 }
5879
5880 /* Disable autoneg by setting bit 31 of TXCW to zero */
5881 txcw = E1000_READ_REG(hw, E1000_TXCW);
5882 txcw &= ~((uint32_t)1 << 31);
5883 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5884
5885 /*
5886 * Write 0x410 to Serdes Control register
5887 * to enable Serdes analog loopback
5888 */
5889 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5890 msec_delay(10);
5891 break;
5892 default:
5893 break;
5894 }
5895 break;
5896 case e1000_82574:
5897 case e1000_80003es2lan:
5898 case e1000_ich9lan:
5899 case e1000_ich10lan:
5900 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5901 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5902 phydata | (1 << 5));
5903 Adapter->param_adv_autoneg = 1;
5904 Adapter->param_adv_1000fdx = 1;
5905 (void) e1000g_reset_link(Adapter);
5906 break;
5907 }
5908 }
5909
5910 static void
e1000g_set_external_loopback_100(struct e1000g * Adapter)5911 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5912 {
5913 struct e1000_hw *hw;
5914 uint32_t ctrl;
5915 uint16_t phy_ctrl;
5916
5917 hw = &Adapter->shared;
5918
5919 /* Disable Smart Power Down */
5920 phy_spd_state(hw, B_FALSE);
5921
5922 phy_ctrl = (MII_CR_FULL_DUPLEX |
5923 MII_CR_SPEED_100);
5924
5925 /* Force 100/FD, reset PHY */
5926 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5927 phy_ctrl | MII_CR_RESET); /* 0xA100 */
5928 msec_delay(10);
5929
5930 /* Force 100/FD */
5931 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5932 phy_ctrl); /* 0x2100 */
5933 msec_delay(10);
5934
5935 /* Now setup the MAC to the same speed/duplex as the PHY. */
5936 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5937 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5938 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5939 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5940 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5941 E1000_CTRL_SPD_100 | /* Force Speed to 100 */
5942 E1000_CTRL_FD); /* Force Duplex to FULL */
5943
5944 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5945 }
5946
5947 static void
e1000g_set_external_loopback_10(struct e1000g * Adapter)5948 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5949 {
5950 struct e1000_hw *hw;
5951 uint32_t ctrl;
5952 uint16_t phy_ctrl;
5953
5954 hw = &Adapter->shared;
5955
5956 /* Disable Smart Power Down */
5957 phy_spd_state(hw, B_FALSE);
5958
5959 phy_ctrl = (MII_CR_FULL_DUPLEX |
5960 MII_CR_SPEED_10);
5961
5962 /* Force 10/FD, reset PHY */
5963 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5964 phy_ctrl | MII_CR_RESET); /* 0x8100 */
5965 msec_delay(10);
5966
5967 /* Force 10/FD */
5968 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5969 phy_ctrl); /* 0x0100 */
5970 msec_delay(10);
5971
5972 /* Now setup the MAC to the same speed/duplex as the PHY. */
5973 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5974 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5975 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5976 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5977 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5978 E1000_CTRL_SPD_10 | /* Force Speed to 10 */
5979 E1000_CTRL_FD); /* Force Duplex to FULL */
5980
5981 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5982 }
5983
5984 #ifdef __sparc
5985 static boolean_t
e1000g_find_mac_address(struct e1000g * Adapter)5986 e1000g_find_mac_address(struct e1000g *Adapter)
5987 {
5988 struct e1000_hw *hw = &Adapter->shared;
5989 uchar_t *bytes;
5990 struct ether_addr sysaddr;
5991 uint_t nelts;
5992 int err;
5993 boolean_t found = B_FALSE;
5994
5995 /*
5996 * The "vendor's factory-set address" may already have
5997 * been extracted from the chip, but if the property
5998 * "local-mac-address" is set we use that instead.
5999 *
6000 * We check whether it looks like an array of 6
6001 * bytes (which it should, if OBP set it). If we can't
6002 * make sense of it this way, we'll ignore it.
6003 */
6004 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6005 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
6006 if (err == DDI_PROP_SUCCESS) {
6007 if (nelts == ETHERADDRL) {
6008 while (nelts--)
6009 hw->mac.addr[nelts] = bytes[nelts];
6010 found = B_TRUE;
6011 }
6012 ddi_prop_free(bytes);
6013 }
6014
6015 /*
6016 * Look up the OBP property "local-mac-address?". If the user has set
6017 * 'local-mac-address? = false', use "the system address" instead.
6018 */
6019 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
6020 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
6021 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
6022 if (localetheraddr(NULL, &sysaddr) != 0) {
6023 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
6024 found = B_TRUE;
6025 }
6026 }
6027 ddi_prop_free(bytes);
6028 }
6029
6030 /*
6031 * Finally(!), if there's a valid "mac-address" property (created
6032 * if we netbooted from this interface), we must use this instead
6033 * of any of the above to ensure that the NFS/install server doesn't
6034 * get confused by the address changing as Solaris takes over!
6035 */
6036 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6037 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
6038 if (err == DDI_PROP_SUCCESS) {
6039 if (nelts == ETHERADDRL) {
6040 while (nelts--)
6041 hw->mac.addr[nelts] = bytes[nelts];
6042 found = B_TRUE;
6043 }
6044 ddi_prop_free(bytes);
6045 }
6046
6047 if (found) {
6048 bcopy(hw->mac.addr, hw->mac.perm_addr,
6049 ETHERADDRL);
6050 }
6051
6052 return (found);
6053 }
6054 #endif
6055
6056 static int
e1000g_add_intrs(struct e1000g * Adapter)6057 e1000g_add_intrs(struct e1000g *Adapter)
6058 {
6059 dev_info_t *devinfo;
6060 int intr_types;
6061 int rc;
6062
6063 devinfo = Adapter->dip;
6064
6065 /* Get supported interrupt types */
6066 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
6067
6068 if (rc != DDI_SUCCESS) {
6069 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6070 "Get supported interrupt types failed: %d\n", rc);
6071 return (DDI_FAILURE);
6072 }
6073
6074 /*
6075 * Based on Intel Technical Advisory document (TA-160), there are some
6076 * cases where some older Intel PCI-X NICs may "advertise" to the OS
6077 * that it supports MSI, but in fact has problems.
6078 * So we should only enable MSI for PCI-E NICs and disable MSI for old
6079 * PCI/PCI-X NICs.
6080 */
6081 if (Adapter->shared.mac.type < e1000_82571)
6082 Adapter->msi_enable = B_FALSE;
6083
6084 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
6085 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
6086
6087 if (rc != DDI_SUCCESS) {
6088 /* EMPTY */
6089 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6090 "Add MSI failed, trying Legacy interrupts\n");
6091 } else {
6092 Adapter->intr_type = DDI_INTR_TYPE_MSI;
6093 }
6094 }
6095
6096 if ((Adapter->intr_type == 0) &&
6097 (intr_types & DDI_INTR_TYPE_FIXED)) {
6098 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
6099
6100 if (rc != DDI_SUCCESS) {
6101 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6102 "Add Legacy interrupts failed\n");
6103 return (DDI_FAILURE);
6104 }
6105
6106 Adapter->intr_type = DDI_INTR_TYPE_FIXED;
6107 }
6108
6109 if (Adapter->intr_type == 0) {
6110 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6111 "No interrupts registered\n");
6112 return (DDI_FAILURE);
6113 }
6114
6115 return (DDI_SUCCESS);
6116 }
6117
6118 /*
6119 * e1000g_intr_add() handles MSI/Legacy interrupts
6120 */
6121 static int
e1000g_intr_add(struct e1000g * Adapter,int intr_type)6122 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
6123 {
6124 dev_info_t *devinfo;
6125 int count, avail, actual;
6126 int x, y, rc, inum = 0;
6127 int flag;
6128 ddi_intr_handler_t *intr_handler;
6129
6130 devinfo = Adapter->dip;
6131
6132 /* get number of interrupts */
6133 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
6134 if ((rc != DDI_SUCCESS) || (count == 0)) {
6135 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6136 "Get interrupt number failed. Return: %d, count: %d\n",
6137 rc, count);
6138 return (DDI_FAILURE);
6139 }
6140
6141 /* get number of available interrupts */
6142 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
6143 if ((rc != DDI_SUCCESS) || (avail == 0)) {
6144 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6145 "Get interrupt available number failed. "
6146 "Return: %d, available: %d\n", rc, avail);
6147 return (DDI_FAILURE);
6148 }
6149
6150 if (avail < count) {
6151 /* EMPTY */
6152 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6153 "Interrupts count: %d, available: %d\n",
6154 count, avail);
6155 }
6156
6157 /* Allocate an array of interrupt handles */
6158 Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
6159 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
6160
6161 /* Set NORMAL behavior for both MSI and FIXED interrupt */
6162 flag = DDI_INTR_ALLOC_NORMAL;
6163
6164 /* call ddi_intr_alloc() */
6165 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
6166 count, &actual, flag);
6167
6168 if ((rc != DDI_SUCCESS) || (actual == 0)) {
6169 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6170 "Allocate interrupts failed: %d\n", rc);
6171
6172 kmem_free(Adapter->htable, Adapter->intr_size);
6173 return (DDI_FAILURE);
6174 }
6175
6176 if (actual < count) {
6177 /* EMPTY */
6178 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6179 "Interrupts requested: %d, received: %d\n",
6180 count, actual);
6181 }
6182
6183 Adapter->intr_cnt = actual;
6184
6185 /* Get priority for first msi, assume remaining are all the same */
6186 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
6187
6188 if (rc != DDI_SUCCESS) {
6189 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6190 "Get interrupt priority failed: %d\n", rc);
6191
6192 /* Free already allocated intr */
6193 for (y = 0; y < actual; y++)
6194 (void) ddi_intr_free(Adapter->htable[y]);
6195
6196 kmem_free(Adapter->htable, Adapter->intr_size);
6197 return (DDI_FAILURE);
6198 }
6199
6200 /*
6201 * In Legacy Interrupt mode, for PCI-Express adapters, we should
6202 * use the interrupt service routine e1000g_intr_pciexpress()
6203 * to avoid interrupt stealing when sharing interrupt with other
6204 * devices.
6205 */
6206 if (Adapter->shared.mac.type < e1000_82571)
6207 intr_handler = (ddi_intr_handler_t *)e1000g_intr;
6208 else
6209 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
6210
6211 /* Call ddi_intr_add_handler() */
6212 for (x = 0; x < actual; x++) {
6213 rc = ddi_intr_add_handler(Adapter->htable[x],
6214 intr_handler, (caddr_t)Adapter, NULL);
6215
6216 if (rc != DDI_SUCCESS) {
6217 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6218 "Add interrupt handler failed: %d\n", rc);
6219
6220 /* Remove already added handler */
6221 for (y = 0; y < x; y++)
6222 (void) ddi_intr_remove_handler(
6223 Adapter->htable[y]);
6224
6225 /* Free already allocated intr */
6226 for (y = 0; y < actual; y++)
6227 (void) ddi_intr_free(Adapter->htable[y]);
6228
6229 kmem_free(Adapter->htable, Adapter->intr_size);
6230 return (DDI_FAILURE);
6231 }
6232 }
6233
6234 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
6235
6236 if (rc != DDI_SUCCESS) {
6237 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6238 "Get interrupt cap failed: %d\n", rc);
6239
6240 /* Free already allocated intr */
6241 for (y = 0; y < actual; y++) {
6242 (void) ddi_intr_remove_handler(Adapter->htable[y]);
6243 (void) ddi_intr_free(Adapter->htable[y]);
6244 }
6245
6246 kmem_free(Adapter->htable, Adapter->intr_size);
6247 return (DDI_FAILURE);
6248 }
6249
6250 return (DDI_SUCCESS);
6251 }
6252
6253 static int
e1000g_rem_intrs(struct e1000g * Adapter)6254 e1000g_rem_intrs(struct e1000g *Adapter)
6255 {
6256 int x;
6257 int rc;
6258
6259 for (x = 0; x < Adapter->intr_cnt; x++) {
6260 rc = ddi_intr_remove_handler(Adapter->htable[x]);
6261 if (rc != DDI_SUCCESS) {
6262 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6263 "Remove intr handler failed: %d\n", rc);
6264 return (DDI_FAILURE);
6265 }
6266
6267 rc = ddi_intr_free(Adapter->htable[x]);
6268 if (rc != DDI_SUCCESS) {
6269 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6270 "Free intr failed: %d\n", rc);
6271 return (DDI_FAILURE);
6272 }
6273 }
6274
6275 kmem_free(Adapter->htable, Adapter->intr_size);
6276
6277 return (DDI_SUCCESS);
6278 }
6279
6280 static int
e1000g_enable_intrs(struct e1000g * Adapter)6281 e1000g_enable_intrs(struct e1000g *Adapter)
6282 {
6283 int x;
6284 int rc;
6285
6286 /* Enable interrupts */
6287 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6288 /* Call ddi_intr_block_enable() for MSI */
6289 rc = ddi_intr_block_enable(Adapter->htable,
6290 Adapter->intr_cnt);
6291 if (rc != DDI_SUCCESS) {
6292 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6293 "Enable block intr failed: %d\n", rc);
6294 return (DDI_FAILURE);
6295 }
6296 } else {
6297 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6298 for (x = 0; x < Adapter->intr_cnt; x++) {
6299 rc = ddi_intr_enable(Adapter->htable[x]);
6300 if (rc != DDI_SUCCESS) {
6301 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6302 "Enable intr failed: %d\n", rc);
6303 return (DDI_FAILURE);
6304 }
6305 }
6306 }
6307
6308 return (DDI_SUCCESS);
6309 }
6310
6311 static int
e1000g_disable_intrs(struct e1000g * Adapter)6312 e1000g_disable_intrs(struct e1000g *Adapter)
6313 {
6314 int x;
6315 int rc;
6316
6317 /* Disable all interrupts */
6318 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6319 rc = ddi_intr_block_disable(Adapter->htable,
6320 Adapter->intr_cnt);
6321 if (rc != DDI_SUCCESS) {
6322 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6323 "Disable block intr failed: %d\n", rc);
6324 return (DDI_FAILURE);
6325 }
6326 } else {
6327 for (x = 0; x < Adapter->intr_cnt; x++) {
6328 rc = ddi_intr_disable(Adapter->htable[x]);
6329 if (rc != DDI_SUCCESS) {
6330 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6331 "Disable intr failed: %d\n", rc);
6332 return (DDI_FAILURE);
6333 }
6334 }
6335 }
6336
6337 return (DDI_SUCCESS);
6338 }
6339
6340 /*
6341 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6342 */
6343 static void
e1000g_get_phy_state(struct e1000g * Adapter)6344 e1000g_get_phy_state(struct e1000g *Adapter)
6345 {
6346 struct e1000_hw *hw = &Adapter->shared;
6347
6348 if (hw->phy.media_type == e1000_media_type_copper) {
6349 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6350 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6351 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6352 &Adapter->phy_an_adv);
6353 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6354 &Adapter->phy_an_exp);
6355 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6356 &Adapter->phy_ext_status);
6357 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6358 &Adapter->phy_1000t_ctrl);
6359 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6360 &Adapter->phy_1000t_status);
6361 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6362 &Adapter->phy_lp_able);
6363
6364 Adapter->param_autoneg_cap =
6365 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6366 Adapter->param_pause_cap =
6367 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6368 Adapter->param_asym_pause_cap =
6369 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6370 Adapter->param_1000fdx_cap =
6371 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6372 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6373 Adapter->param_1000hdx_cap =
6374 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6375 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6376 Adapter->param_100t4_cap =
6377 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6378 Adapter->param_100fdx_cap =
6379 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6380 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6381 Adapter->param_100hdx_cap =
6382 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6383 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6384 Adapter->param_10fdx_cap =
6385 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6386 Adapter->param_10hdx_cap =
6387 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6388
6389 Adapter->param_adv_autoneg = hw->mac.autoneg;
6390 Adapter->param_adv_pause =
6391 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6392 Adapter->param_adv_asym_pause =
6393 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6394 Adapter->param_adv_1000hdx =
6395 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6396 Adapter->param_adv_100t4 =
6397 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6398 if (Adapter->param_adv_autoneg == 1) {
6399 Adapter->param_adv_1000fdx =
6400 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6401 ? 1 : 0;
6402 Adapter->param_adv_100fdx =
6403 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6404 ? 1 : 0;
6405 Adapter->param_adv_100hdx =
6406 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6407 ? 1 : 0;
6408 Adapter->param_adv_10fdx =
6409 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6410 Adapter->param_adv_10hdx =
6411 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6412 }
6413
6414 Adapter->param_lp_autoneg =
6415 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6416 Adapter->param_lp_pause =
6417 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6418 Adapter->param_lp_asym_pause =
6419 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6420 Adapter->param_lp_1000fdx =
6421 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6422 Adapter->param_lp_1000hdx =
6423 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6424 Adapter->param_lp_100t4 =
6425 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6426 Adapter->param_lp_100fdx =
6427 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6428 Adapter->param_lp_100hdx =
6429 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6430 Adapter->param_lp_10fdx =
6431 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6432 Adapter->param_lp_10hdx =
6433 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6434 } else {
6435 /*
6436 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6437 * it can only work with 1Gig Full Duplex Link Partner.
6438 */
6439 Adapter->param_autoneg_cap = 0;
6440 Adapter->param_pause_cap = 1;
6441 Adapter->param_asym_pause_cap = 1;
6442 Adapter->param_1000fdx_cap = 1;
6443 Adapter->param_1000hdx_cap = 0;
6444 Adapter->param_100t4_cap = 0;
6445 Adapter->param_100fdx_cap = 0;
6446 Adapter->param_100hdx_cap = 0;
6447 Adapter->param_10fdx_cap = 0;
6448 Adapter->param_10hdx_cap = 0;
6449
6450 Adapter->param_adv_autoneg = 0;
6451 Adapter->param_adv_pause = 1;
6452 Adapter->param_adv_asym_pause = 1;
6453 Adapter->param_adv_1000fdx = 1;
6454 Adapter->param_adv_1000hdx = 0;
6455 Adapter->param_adv_100t4 = 0;
6456 Adapter->param_adv_100fdx = 0;
6457 Adapter->param_adv_100hdx = 0;
6458 Adapter->param_adv_10fdx = 0;
6459 Adapter->param_adv_10hdx = 0;
6460
6461 Adapter->param_lp_autoneg = 0;
6462 Adapter->param_lp_pause = 0;
6463 Adapter->param_lp_asym_pause = 0;
6464 Adapter->param_lp_1000fdx = 0;
6465 Adapter->param_lp_1000hdx = 0;
6466 Adapter->param_lp_100t4 = 0;
6467 Adapter->param_lp_100fdx = 0;
6468 Adapter->param_lp_100hdx = 0;
6469 Adapter->param_lp_10fdx = 0;
6470 Adapter->param_lp_10hdx = 0;
6471 }
6472 }
6473
6474 /*
6475 * FMA support
6476 */
6477
6478 int
e1000g_check_acc_handle(ddi_acc_handle_t handle)6479 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6480 {
6481 ddi_fm_error_t de;
6482
6483 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6484 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6485 return (de.fme_status);
6486 }
6487
6488 int
e1000g_check_dma_handle(ddi_dma_handle_t handle)6489 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6490 {
6491 ddi_fm_error_t de;
6492
6493 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6494 return (de.fme_status);
6495 }
6496
6497 /*
6498 * The IO fault service error handling callback function
6499 */
6500 /* ARGSUSED2 */
6501 static int
e1000g_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)6502 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6503 {
6504 /*
6505 * as the driver can always deal with an error in any dma or
6506 * access handle, we can just return the fme_status value.
6507 */
6508 pci_ereport_post(dip, err, NULL);
6509 return (err->fme_status);
6510 }
6511
6512 static void
e1000g_fm_init(struct e1000g * Adapter)6513 e1000g_fm_init(struct e1000g *Adapter)
6514 {
6515 ddi_iblock_cookie_t iblk;
6516 int fma_dma_flag;
6517
6518 /* Only register with IO Fault Services if we have some capability */
6519 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6520 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6521 } else {
6522 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6523 }
6524
6525 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6526 fma_dma_flag = 1;
6527 } else {
6528 fma_dma_flag = 0;
6529 }
6530
6531 (void) e1000g_set_fma_flags(fma_dma_flag);
6532
6533 if (Adapter->fm_capabilities) {
6534
6535 /* Register capabilities with IO Fault Services */
6536 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6537
6538 /*
6539 * Initialize pci ereport capabilities if ereport capable
6540 */
6541 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6542 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6543 pci_ereport_setup(Adapter->dip);
6544
6545 /*
6546 * Register error callback if error callback capable
6547 */
6548 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6549 ddi_fm_handler_register(Adapter->dip,
6550 e1000g_fm_error_cb, (void*) Adapter);
6551 }
6552 }
6553
6554 static void
e1000g_fm_fini(struct e1000g * Adapter)6555 e1000g_fm_fini(struct e1000g *Adapter)
6556 {
6557 /* Only unregister FMA capabilities if we registered some */
6558 if (Adapter->fm_capabilities) {
6559
6560 /*
6561 * Release any resources allocated by pci_ereport_setup()
6562 */
6563 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6564 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6565 pci_ereport_teardown(Adapter->dip);
6566
6567 /*
6568 * Un-register error callback if error callback capable
6569 */
6570 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6571 ddi_fm_handler_unregister(Adapter->dip);
6572
6573 /* Unregister from IO Fault Services */
6574 mutex_enter(&e1000g_rx_detach_lock);
6575 ddi_fm_fini(Adapter->dip);
6576 if (Adapter->priv_dip != NULL) {
6577 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6578 }
6579 mutex_exit(&e1000g_rx_detach_lock);
6580 }
6581 }
6582
6583 void
e1000g_fm_ereport(struct e1000g * Adapter,char * detail)6584 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6585 {
6586 uint64_t ena;
6587 char buf[FM_MAX_CLASS];
6588
6589 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6590 ena = fm_ena_generate(0, FM_ENA_FMT1);
6591 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6592 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6593 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6594 }
6595 }
6596
6597 /*
6598 * quiesce(9E) entry point.
6599 *
6600 * This function is called when the system is single-threaded at high
6601 * PIL with preemption disabled. Therefore, this function must not be
6602 * blocked.
6603 *
6604 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6605 * DDI_FAILURE indicates an error condition and should almost never happen.
6606 */
6607 static int
e1000g_quiesce(dev_info_t * devinfo)6608 e1000g_quiesce(dev_info_t *devinfo)
6609 {
6610 struct e1000g *Adapter;
6611
6612 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6613
6614 if (Adapter == NULL)
6615 return (DDI_FAILURE);
6616
6617 e1000g_clear_all_interrupts(Adapter);
6618
6619 (void) e1000_reset_hw(&Adapter->shared);
6620
6621 /* Setup our HW Tx Head & Tail descriptor pointers */
6622 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6623 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6624
6625 /* Setup our HW Rx Head & Tail descriptor pointers */
6626 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6627 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6628
6629 return (DDI_SUCCESS);
6630 }
6631
6632 /*
6633 * synchronize the adv* and en* parameters.
6634 *
6635 * See comments in <sys/dld.h> for details of the *_en_*
6636 * parameters. The usage of ndd for setting adv parameters will
6637 * synchronize all the en parameters with the e1000g parameters,
6638 * implicitly disabling any settings made via dladm.
6639 */
6640 static void
e1000g_param_sync(struct e1000g * Adapter)6641 e1000g_param_sync(struct e1000g *Adapter)
6642 {
6643 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6644 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6645 Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6646 Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6647 Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6648 Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6649 }
6650
6651 /*
6652 * e1000g_get_driver_control - tell manageability firmware that the driver
6653 * has control.
6654 */
6655 static void
e1000g_get_driver_control(struct e1000_hw * hw)6656 e1000g_get_driver_control(struct e1000_hw *hw)
6657 {
6658 uint32_t ctrl_ext;
6659 uint32_t swsm;
6660
6661 /* tell manageability firmware the driver has taken over */
6662 switch (hw->mac.type) {
6663 case e1000_82573:
6664 swsm = E1000_READ_REG(hw, E1000_SWSM);
6665 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6666 break;
6667 case e1000_82571:
6668 case e1000_82572:
6669 case e1000_82574:
6670 case e1000_80003es2lan:
6671 case e1000_ich8lan:
6672 case e1000_ich9lan:
6673 case e1000_ich10lan:
6674 case e1000_pchlan:
6675 case e1000_pch2lan:
6676 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6677 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6678 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6679 break;
6680 default:
6681 /* no manageability firmware: do nothing */
6682 break;
6683 }
6684 }
6685
6686 /*
6687 * e1000g_release_driver_control - tell manageability firmware that the driver
6688 * has released control.
6689 */
6690 static void
e1000g_release_driver_control(struct e1000_hw * hw)6691 e1000g_release_driver_control(struct e1000_hw *hw)
6692 {
6693 uint32_t ctrl_ext;
6694 uint32_t swsm;
6695
6696 /* tell manageability firmware the driver has released control */
6697 switch (hw->mac.type) {
6698 case e1000_82573:
6699 swsm = E1000_READ_REG(hw, E1000_SWSM);
6700 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6701 break;
6702 case e1000_82571:
6703 case e1000_82572:
6704 case e1000_82574:
6705 case e1000_80003es2lan:
6706 case e1000_ich8lan:
6707 case e1000_ich9lan:
6708 case e1000_ich10lan:
6709 case e1000_pchlan:
6710 case e1000_pch2lan:
6711 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6712 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6713 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6714 break;
6715 default:
6716 /* no manageability firmware: do nothing */
6717 break;
6718 }
6719 }
6720
6721 /*
6722 * Restore e1000g promiscuous mode.
6723 */
6724 static void
e1000g_restore_promisc(struct e1000g * Adapter)6725 e1000g_restore_promisc(struct e1000g *Adapter)
6726 {
6727 if (Adapter->e1000g_promisc) {
6728 uint32_t rctl;
6729
6730 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6731 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6732 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6733 }
6734 }
6735