1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
6 *
7 * CDDL LICENSE SUMMARY
8 *
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 *
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
13 *
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
19 */
20
21 /*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2015, Joyent, Inc.
29 */
30
31 /*
32 * **********************************************************************
33 * *
34 * Module Name: *
35 * e1000g_main.c *
36 * *
37 * Abstract: *
38 * This file contains the interface routines for the solaris OS. *
39 * It has all DDI entry point routines and GLD entry point routines. *
40 * *
41 * This file also contains routines that take care of initialization *
42 * uninit routine and interrupt routine. *
43 * *
44 * **********************************************************************
45 */
46
47 #include <sys/dlpi.h>
48 #include <sys/mac.h>
49 #include "e1000g_sw.h"
50 #include "e1000g_debug.h"
51
52 static char ident[] = "Intel PRO/1000 Ethernet";
53 /* LINTED E_STATIC_UNUSED */
54 static char e1000g_version[] = "Driver Ver. 5.3.24";
55
56 /*
57 * Proto types for DDI entry points
58 */
59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 static int e1000g_quiesce(dev_info_t *);
62
63 /*
64 * init and intr routines prototype
65 */
66 static int e1000g_resume(dev_info_t *);
67 static int e1000g_suspend(dev_info_t *);
68 static uint_t e1000g_intr_pciexpress(caddr_t);
69 static uint_t e1000g_intr(caddr_t);
70 static void e1000g_intr_work(struct e1000g *, uint32_t);
71 #pragma inline(e1000g_intr_work)
72 static int e1000g_init(struct e1000g *);
73 static int e1000g_start(struct e1000g *, boolean_t);
74 static void e1000g_stop(struct e1000g *, boolean_t);
75 static int e1000g_m_start(void *);
76 static void e1000g_m_stop(void *);
77 static int e1000g_m_promisc(void *, boolean_t);
78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
82 uint_t, const void *);
83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
84 uint_t, void *);
85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
86 mac_prop_info_handle_t);
87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
88 const void *);
89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
90 static void e1000g_init_locks(struct e1000g *);
91 static void e1000g_destroy_locks(struct e1000g *);
92 static int e1000g_identify_hardware(struct e1000g *);
93 static int e1000g_regs_map(struct e1000g *);
94 static int e1000g_set_driver_params(struct e1000g *);
95 static void e1000g_set_bufsize(struct e1000g *);
96 static int e1000g_register_mac(struct e1000g *);
97 static boolean_t e1000g_rx_drain(struct e1000g *);
98 static boolean_t e1000g_tx_drain(struct e1000g *);
99 static void e1000g_init_unicst(struct e1000g *);
100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
101 static int e1000g_alloc_rx_data(struct e1000g *);
102 static void e1000g_release_multicast(struct e1000g *);
103 static void e1000g_pch_limits(struct e1000g *);
104 static uint32_t e1000g_mtu2maxframe(uint32_t);
105
106 /*
107 * Local routines
108 */
109 static boolean_t e1000g_reset_adapter(struct e1000g *);
110 static void e1000g_tx_clean(struct e1000g *);
111 static void e1000g_rx_clean(struct e1000g *);
112 static void e1000g_link_timer(void *);
113 static void e1000g_local_timer(void *);
114 static boolean_t e1000g_link_check(struct e1000g *);
115 static boolean_t e1000g_stall_check(struct e1000g *);
116 static void e1000g_smartspeed(struct e1000g *);
117 static void e1000g_get_conf(struct e1000g *);
118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
119 int *);
120 static void enable_watchdog_timer(struct e1000g *);
121 static void disable_watchdog_timer(struct e1000g *);
122 static void start_watchdog_timer(struct e1000g *);
123 static void restart_watchdog_timer(struct e1000g *);
124 static void stop_watchdog_timer(struct e1000g *);
125 static void stop_link_timer(struct e1000g *);
126 static void stop_82547_timer(e1000g_tx_ring_t *);
127 static void e1000g_force_speed_duplex(struct e1000g *);
128 static void e1000g_setup_max_mtu(struct e1000g *);
129 static void e1000g_get_max_frame_size(struct e1000g *);
130 static boolean_t is_valid_mac_addr(uint8_t *);
131 static void e1000g_unattach(dev_info_t *, struct e1000g *);
132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
133 #ifdef E1000G_DEBUG
134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
139 struct iocblk *, mblk_t *);
140 #endif
141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
142 struct iocblk *, mblk_t *);
143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
145 static void e1000g_set_internal_loopback(struct e1000g *);
146 static void e1000g_set_external_loopback_1000(struct e1000g *);
147 static void e1000g_set_external_loopback_100(struct e1000g *);
148 static void e1000g_set_external_loopback_10(struct e1000g *);
149 static int e1000g_add_intrs(struct e1000g *);
150 static int e1000g_intr_add(struct e1000g *, int);
151 static int e1000g_rem_intrs(struct e1000g *);
152 static int e1000g_enable_intrs(struct e1000g *);
153 static int e1000g_disable_intrs(struct e1000g *);
154 static boolean_t e1000g_link_up(struct e1000g *);
155 #ifdef __sparc
156 static boolean_t e1000g_find_mac_address(struct e1000g *);
157 #endif
158 static void e1000g_get_phy_state(struct e1000g *);
159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
160 const void *impl_data);
161 static void e1000g_fm_init(struct e1000g *Adapter);
162 static void e1000g_fm_fini(struct e1000g *Adapter);
163 static void e1000g_param_sync(struct e1000g *);
164 static void e1000g_get_driver_control(struct e1000_hw *);
165 static void e1000g_release_driver_control(struct e1000_hw *);
166 static void e1000g_restore_promisc(struct e1000g *Adapter);
167
168 char *e1000g_priv_props[] = {
169 "_tx_bcopy_threshold",
170 "_tx_interrupt_enable",
171 "_tx_intr_delay",
172 "_tx_intr_abs_delay",
173 "_rx_bcopy_threshold",
174 "_max_num_rcv_packets",
175 "_rx_intr_delay",
176 "_rx_intr_abs_delay",
177 "_intr_throttling_rate",
178 "_intr_adaptive",
179 "_adv_pause_cap",
180 "_adv_asym_pause_cap",
181 NULL
182 };
183
184 static struct cb_ops cb_ws_ops = {
185 nulldev, /* cb_open */
186 nulldev, /* cb_close */
187 nodev, /* cb_strategy */
188 nodev, /* cb_print */
189 nodev, /* cb_dump */
190 nodev, /* cb_read */
191 nodev, /* cb_write */
192 nodev, /* cb_ioctl */
193 nodev, /* cb_devmap */
194 nodev, /* cb_mmap */
195 nodev, /* cb_segmap */
196 nochpoll, /* cb_chpoll */
197 ddi_prop_op, /* cb_prop_op */
198 NULL, /* cb_stream */
199 D_MP | D_HOTPLUG, /* cb_flag */
200 CB_REV, /* cb_rev */
201 nodev, /* cb_aread */
202 nodev /* cb_awrite */
203 };
204
205 static struct dev_ops ws_ops = {
206 DEVO_REV, /* devo_rev */
207 0, /* devo_refcnt */
208 NULL, /* devo_getinfo */
209 nulldev, /* devo_identify */
210 nulldev, /* devo_probe */
211 e1000g_attach, /* devo_attach */
212 e1000g_detach, /* devo_detach */
213 nodev, /* devo_reset */
214 &cb_ws_ops, /* devo_cb_ops */
215 NULL, /* devo_bus_ops */
216 ddi_power, /* devo_power */
217 e1000g_quiesce /* devo_quiesce */
218 };
219
220 static struct modldrv modldrv = {
221 &mod_driverops, /* Type of module. This one is a driver */
222 ident, /* Discription string */
223 &ws_ops, /* driver ops */
224 };
225
226 static struct modlinkage modlinkage = {
227 MODREV_1, &modldrv, NULL
228 };
229
230 /* Access attributes for register mapping */
231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
232 DDI_DEVICE_ATTR_V1,
233 DDI_STRUCTURE_LE_ACC,
234 DDI_STRICTORDER_ACC,
235 DDI_FLAGERR_ACC
236 };
237
238 #define E1000G_M_CALLBACK_FLAGS \
239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
240
241 static mac_callbacks_t e1000g_m_callbacks = {
242 E1000G_M_CALLBACK_FLAGS,
243 e1000g_m_stat,
244 e1000g_m_start,
245 e1000g_m_stop,
246 e1000g_m_promisc,
247 e1000g_m_multicst,
248 NULL,
249 e1000g_m_tx,
250 NULL,
251 e1000g_m_ioctl,
252 e1000g_m_getcapab,
253 NULL,
254 NULL,
255 e1000g_m_setprop,
256 e1000g_m_getprop,
257 e1000g_m_propinfo
258 };
259
260 /*
261 * Global variables
262 */
263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
264 uint32_t e1000g_mblks_pending = 0;
265 /*
266 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
267 * Here we maintain a private dev_info list if e1000g_force_detach is
268 * enabled. If we force the driver to detach while there are still some
269 * rx buffers retained in the upper layer, we have to keep a copy of the
270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
271 * structure will be freed after the driver is detached. However when we
272 * finally free those rx buffers released by the upper layer, we need to
273 * refer to the dev_info to free the dma buffers. So we save a copy of
274 * the dev_info for this purpose. On x86 platform, we assume this copy
275 * of dev_info is always valid, but on SPARC platform, it could be invalid
276 * after the system board level DR operation. For this reason, the global
277 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
278 */
279 #ifdef __sparc
280 boolean_t e1000g_force_detach = B_FALSE;
281 #else
282 boolean_t e1000g_force_detach = B_TRUE;
283 #endif
284 private_devi_list_t *e1000g_private_devi_list = NULL;
285
286 /*
287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
288 * the private dev_info list, and to serialize the processing of rx buffer
289 * freeing and rx buffer recycling.
290 */
291 kmutex_t e1000g_rx_detach_lock;
292 /*
293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
295 * If there are many e1000g instances, the system may run out of DVMA
296 * resources during the initialization of the instances, then the flag will
297 * be changed to "USE_DMA". Because different e1000g instances are initialized
298 * in parallel, we need to use this lock to protect the flag.
299 */
300 krwlock_t e1000g_dma_type_lock;
301
302 /*
303 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
304 * Based on the information from Intel, the 82546 chipset has some hardware
305 * problem. When one port is being reset and the other port is trying to
306 * access the eeprom, it could cause system hang or panic. To workaround this
307 * hardware problem, we use a global mutex to prevent such operations from
308 * happening simultaneously on different instances. This workaround is applied
309 * to all the devices supported by this driver.
310 */
311 kmutex_t e1000g_nvm_lock;
312
313 /*
314 * Loadable module configuration entry points for the driver
315 */
316
317 /*
318 * _init - module initialization
319 */
320 int
_init(void)321 _init(void)
322 {
323 int status;
324
325 mac_init_ops(&ws_ops, WSNAME);
326 status = mod_install(&modlinkage);
327 if (status != DDI_SUCCESS)
328 mac_fini_ops(&ws_ops);
329 else {
330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
333 }
334
335 return (status);
336 }
337
338 /*
339 * _fini - module finalization
340 */
341 int
_fini(void)342 _fini(void)
343 {
344 int status;
345
346 if (e1000g_mblks_pending != 0)
347 return (EBUSY);
348
349 status = mod_remove(&modlinkage);
350 if (status == DDI_SUCCESS) {
351 mac_fini_ops(&ws_ops);
352
353 if (e1000g_force_detach) {
354 private_devi_list_t *devi_node;
355
356 mutex_enter(&e1000g_rx_detach_lock);
357 while (e1000g_private_devi_list != NULL) {
358 devi_node = e1000g_private_devi_list;
359 e1000g_private_devi_list =
360 e1000g_private_devi_list->next;
361
362 kmem_free(devi_node->priv_dip,
363 sizeof (struct dev_info));
364 kmem_free(devi_node,
365 sizeof (private_devi_list_t));
366 }
367 mutex_exit(&e1000g_rx_detach_lock);
368 }
369
370 mutex_destroy(&e1000g_rx_detach_lock);
371 rw_destroy(&e1000g_dma_type_lock);
372 mutex_destroy(&e1000g_nvm_lock);
373 }
374
375 return (status);
376 }
377
378 /*
379 * _info - module information
380 */
381 int
_info(struct modinfo * modinfop)382 _info(struct modinfo *modinfop)
383 {
384 return (mod_info(&modlinkage, modinfop));
385 }
386
387 /*
388 * e1000g_attach - driver attach
389 *
390 * This function is the device-specific initialization entry
391 * point. This entry point is required and must be written.
392 * The DDI_ATTACH command must be provided in the attach entry
393 * point. When attach() is called with cmd set to DDI_ATTACH,
394 * all normal kernel services (such as kmem_alloc(9F)) are
395 * available for use by the driver.
396 *
397 * The attach() function will be called once for each instance
398 * of the device on the system with cmd set to DDI_ATTACH.
399 * Until attach() succeeds, the only driver entry points which
400 * may be called are open(9E) and getinfo(9E).
401 */
402 static int
e1000g_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
404 {
405 struct e1000g *Adapter;
406 struct e1000_hw *hw;
407 struct e1000g_osdep *osdep;
408 int instance;
409
410 switch (cmd) {
411 default:
412 e1000g_log(NULL, CE_WARN,
413 "Unsupported command send to e1000g_attach... ");
414 return (DDI_FAILURE);
415
416 case DDI_RESUME:
417 return (e1000g_resume(devinfo));
418
419 case DDI_ATTACH:
420 break;
421 }
422
423 /*
424 * get device instance number
425 */
426 instance = ddi_get_instance(devinfo);
427
428 /*
429 * Allocate soft data structure
430 */
431 Adapter =
432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
433
434 Adapter->dip = devinfo;
435 Adapter->instance = instance;
436 Adapter->tx_ring->adapter = Adapter;
437 Adapter->rx_ring->adapter = Adapter;
438
439 hw = &Adapter->shared;
440 osdep = &Adapter->osdep;
441 hw->back = osdep;
442 osdep->adapter = Adapter;
443
444 ddi_set_driver_private(devinfo, (caddr_t)Adapter);
445
446 /*
447 * Initialize for fma support
448 */
449 (void) e1000g_get_prop(Adapter, "fm-capable",
450 0, 0x0f,
451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
453 &Adapter->fm_capabilities);
454 e1000g_fm_init(Adapter);
455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
456
457 /*
458 * PCI Configure
459 */
460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
462 goto attach_fail;
463 }
464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
465
466 /*
467 * Setup hardware
468 */
469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
471 goto attach_fail;
472 }
473
474 /*
475 * Map in the device registers.
476 */
477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
479 goto attach_fail;
480 }
481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
482
483 /*
484 * Initialize driver parameters
485 */
486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
487 goto attach_fail;
488 }
489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
490
491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
493 goto attach_fail;
494 }
495
496 /*
497 * Disable ULP support
498 */
499 (void) e1000_disable_ulp_lpt_lp(hw, TRUE);
500
501 /*
502 * Initialize interrupts
503 */
504 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
505 e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
506 goto attach_fail;
507 }
508 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
509
510 /*
511 * Initialize mutex's for this device.
512 * Do this before enabling the interrupt handler and
513 * register the softint to avoid the condition where
514 * interrupt handler can try using uninitialized mutex
515 */
516 e1000g_init_locks(Adapter);
517 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
518
519 /*
520 * Initialize Driver Counters
521 */
522 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
523 e1000g_log(Adapter, CE_WARN, "Init stats failed");
524 goto attach_fail;
525 }
526 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
527
528 /*
529 * Initialize chip hardware and software structures
530 */
531 rw_enter(&Adapter->chip_lock, RW_WRITER);
532 if (e1000g_init(Adapter) != DDI_SUCCESS) {
533 rw_exit(&Adapter->chip_lock);
534 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
535 goto attach_fail;
536 }
537 rw_exit(&Adapter->chip_lock);
538 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
539
540 /*
541 * Register the driver to the MAC
542 */
543 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
544 e1000g_log(Adapter, CE_WARN, "Register MAC failed");
545 goto attach_fail;
546 }
547 Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
548
549 /*
550 * Now that mutex locks are initialized, and the chip is also
551 * initialized, enable interrupts.
552 */
553 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
554 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
555 goto attach_fail;
556 }
557 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
558
559 /*
560 * If e1000g_force_detach is enabled, in global private dip list,
561 * we will create a new entry, which maintains the priv_dip for DR
562 * supports after driver detached.
563 */
564 if (e1000g_force_detach) {
565 private_devi_list_t *devi_node;
566
567 Adapter->priv_dip =
568 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
569 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
570 sizeof (struct dev_info));
571
572 devi_node =
573 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
574
575 mutex_enter(&e1000g_rx_detach_lock);
576 devi_node->priv_dip = Adapter->priv_dip;
577 devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
578 devi_node->pending_rx_count = 0;
579
580 Adapter->priv_devi_node = devi_node;
581
582 if (e1000g_private_devi_list == NULL) {
583 devi_node->prev = NULL;
584 devi_node->next = NULL;
585 e1000g_private_devi_list = devi_node;
586 } else {
587 devi_node->prev = NULL;
588 devi_node->next = e1000g_private_devi_list;
589 e1000g_private_devi_list->prev = devi_node;
590 e1000g_private_devi_list = devi_node;
591 }
592 mutex_exit(&e1000g_rx_detach_lock);
593 }
594
595 Adapter->e1000g_state = E1000G_INITIALIZED;
596 return (DDI_SUCCESS);
597
598 attach_fail:
599 e1000g_unattach(devinfo, Adapter);
600 return (DDI_FAILURE);
601 }
602
603 static int
e1000g_register_mac(struct e1000g * Adapter)604 e1000g_register_mac(struct e1000g *Adapter)
605 {
606 struct e1000_hw *hw = &Adapter->shared;
607 mac_register_t *mac;
608 int err;
609
610 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
611 return (DDI_FAILURE);
612
613 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
614 mac->m_driver = Adapter;
615 mac->m_dip = Adapter->dip;
616 mac->m_src_addr = hw->mac.addr;
617 mac->m_callbacks = &e1000g_m_callbacks;
618 mac->m_min_sdu = 0;
619 mac->m_max_sdu = Adapter->default_mtu;
620 mac->m_margin = VLAN_TAGSZ;
621 mac->m_priv_props = e1000g_priv_props;
622 mac->m_v12n = MAC_VIRT_LEVEL1;
623
624 err = mac_register(mac, &Adapter->mh);
625 mac_free(mac);
626
627 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
628 }
629
630 static int
e1000g_identify_hardware(struct e1000g * Adapter)631 e1000g_identify_hardware(struct e1000g *Adapter)
632 {
633 struct e1000_hw *hw = &Adapter->shared;
634 struct e1000g_osdep *osdep = &Adapter->osdep;
635
636 /* Get the device id */
637 hw->vendor_id =
638 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
639 hw->device_id =
640 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
641 hw->revision_id =
642 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
643 hw->subsystem_device_id =
644 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
645 hw->subsystem_vendor_id =
646 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
647
648 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
649 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
650 "MAC type could not be set properly.");
651 return (DDI_FAILURE);
652 }
653
654 return (DDI_SUCCESS);
655 }
656
657 static int
e1000g_regs_map(struct e1000g * Adapter)658 e1000g_regs_map(struct e1000g *Adapter)
659 {
660 dev_info_t *devinfo = Adapter->dip;
661 struct e1000_hw *hw = &Adapter->shared;
662 struct e1000g_osdep *osdep = &Adapter->osdep;
663 off_t mem_size;
664 bar_info_t bar_info;
665 int offset, rnumber;
666
667 rnumber = ADAPTER_REG_SET;
668 /* Get size of adapter register memory */
669 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
670 DDI_SUCCESS) {
671 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
672 "ddi_dev_regsize for registers failed");
673 return (DDI_FAILURE);
674 }
675
676 /* Map adapter register memory */
677 if ((ddi_regs_map_setup(devinfo, rnumber,
678 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
679 &osdep->reg_handle)) != DDI_SUCCESS) {
680 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
681 "ddi_regs_map_setup for registers failed");
682 goto regs_map_fail;
683 }
684
685 /* ICH needs to map flash memory */
686 switch (hw->mac.type) {
687 case e1000_ich8lan:
688 case e1000_ich9lan:
689 case e1000_ich10lan:
690 case e1000_pchlan:
691 case e1000_pch2lan:
692 case e1000_pch_lpt:
693 rnumber = ICH_FLASH_REG_SET;
694
695 /* get flash size */
696 if (ddi_dev_regsize(devinfo, rnumber,
697 &mem_size) != DDI_SUCCESS) {
698 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
699 "ddi_dev_regsize for ICH flash failed");
700 goto regs_map_fail;
701 }
702
703 /* map flash in */
704 if (ddi_regs_map_setup(devinfo, rnumber,
705 (caddr_t *)&hw->flash_address, 0,
706 mem_size, &e1000g_regs_acc_attr,
707 &osdep->ich_flash_handle) != DDI_SUCCESS) {
708 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
709 "ddi_regs_map_setup for ICH flash failed");
710 goto regs_map_fail;
711 }
712 break;
713 default:
714 break;
715 }
716
717 /* map io space */
718 switch (hw->mac.type) {
719 case e1000_82544:
720 case e1000_82540:
721 case e1000_82545:
722 case e1000_82546:
723 case e1000_82541:
724 case e1000_82541_rev_2:
725 /* find the IO bar */
726 rnumber = -1;
727 for (offset = PCI_CONF_BASE1;
728 offset <= PCI_CONF_BASE5; offset += 4) {
729 if (e1000g_get_bar_info(devinfo, offset, &bar_info)
730 != DDI_SUCCESS)
731 continue;
732 if (bar_info.type == E1000G_BAR_IO) {
733 rnumber = bar_info.rnumber;
734 break;
735 }
736 }
737
738 if (rnumber < 0) {
739 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
740 "No io space is found");
741 goto regs_map_fail;
742 }
743
744 /* get io space size */
745 if (ddi_dev_regsize(devinfo, rnumber,
746 &mem_size) != DDI_SUCCESS) {
747 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
748 "ddi_dev_regsize for io space failed");
749 goto regs_map_fail;
750 }
751
752 /* map io space */
753 if ((ddi_regs_map_setup(devinfo, rnumber,
754 (caddr_t *)&hw->io_base, 0, mem_size,
755 &e1000g_regs_acc_attr,
756 &osdep->io_reg_handle)) != DDI_SUCCESS) {
757 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
758 "ddi_regs_map_setup for io space failed");
759 goto regs_map_fail;
760 }
761 break;
762 default:
763 hw->io_base = 0;
764 break;
765 }
766
767 return (DDI_SUCCESS);
768
769 regs_map_fail:
770 if (osdep->reg_handle != NULL)
771 ddi_regs_map_free(&osdep->reg_handle);
772 if (osdep->ich_flash_handle != NULL)
773 ddi_regs_map_free(&osdep->ich_flash_handle);
774 return (DDI_FAILURE);
775 }
776
777 static int
e1000g_set_driver_params(struct e1000g * Adapter)778 e1000g_set_driver_params(struct e1000g *Adapter)
779 {
780 struct e1000_hw *hw;
781
782 hw = &Adapter->shared;
783
784 /* Set MAC type and initialize hardware functions */
785 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
786 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
787 "Could not setup hardware functions");
788 return (DDI_FAILURE);
789 }
790
791 /* Get bus information */
792 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
793 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
794 "Could not get bus information");
795 return (DDI_FAILURE);
796 }
797
798 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
799
800 hw->mac.autoneg_failed = B_TRUE;
801
802 /* Set the autoneg_wait_to_complete flag to B_FALSE */
803 hw->phy.autoneg_wait_to_complete = B_FALSE;
804
805 /* Adaptive IFS related changes */
806 hw->mac.adaptive_ifs = B_TRUE;
807
808 /* Enable phy init script for IGP phy of 82541/82547 */
809 if ((hw->mac.type == e1000_82547) ||
810 (hw->mac.type == e1000_82541) ||
811 (hw->mac.type == e1000_82547_rev_2) ||
812 (hw->mac.type == e1000_82541_rev_2))
813 e1000_init_script_state_82541(hw, B_TRUE);
814
815 /* Enable the TTL workaround for 82541/82547 */
816 e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
817
818 #ifdef __sparc
819 Adapter->strip_crc = B_TRUE;
820 #else
821 Adapter->strip_crc = B_FALSE;
822 #endif
823
824 /* setup the maximum MTU size of the chip */
825 e1000g_setup_max_mtu(Adapter);
826
827 /* Get speed/duplex settings in conf file */
828 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
829 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
830 e1000g_force_speed_duplex(Adapter);
831
832 /* Get Jumbo Frames settings in conf file */
833 e1000g_get_max_frame_size(Adapter);
834
835 /* Get conf file properties */
836 e1000g_get_conf(Adapter);
837
838 /* enforce PCH limits */
839 e1000g_pch_limits(Adapter);
840
841 /* Set Rx/Tx buffer size */
842 e1000g_set_bufsize(Adapter);
843
844 /* Master Latency Timer */
845 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
846
847 /* copper options */
848 if (hw->phy.media_type == e1000_media_type_copper) {
849 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
850 hw->phy.disable_polarity_correction = B_FALSE;
851 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
852 }
853
854 /* The initial link state should be "unknown" */
855 Adapter->link_state = LINK_STATE_UNKNOWN;
856
857 /* Initialize rx parameters */
858 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
859 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
860
861 /* Initialize tx parameters */
862 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
863 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
864 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
865 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
866
867 /* Initialize rx parameters */
868 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
869
870 return (DDI_SUCCESS);
871 }
872
873 static void
e1000g_setup_max_mtu(struct e1000g * Adapter)874 e1000g_setup_max_mtu(struct e1000g *Adapter)
875 {
876 struct e1000_mac_info *mac = &Adapter->shared.mac;
877 struct e1000_phy_info *phy = &Adapter->shared.phy;
878
879 switch (mac->type) {
880 /* types that do not support jumbo frames */
881 case e1000_ich8lan:
882 case e1000_82573:
883 case e1000_82583:
884 Adapter->max_mtu = ETHERMTU;
885 break;
886 /* ich9 supports jumbo frames except on one phy type */
887 case e1000_ich9lan:
888 if (phy->type == e1000_phy_ife)
889 Adapter->max_mtu = ETHERMTU;
890 else
891 Adapter->max_mtu = MAXIMUM_MTU_9K;
892 break;
893 /* pch can do jumbo frames up to 4K */
894 case e1000_pchlan:
895 Adapter->max_mtu = MAXIMUM_MTU_4K;
896 break;
897 /* pch2 can do jumbo frames up to 9K */
898 case e1000_pch2lan:
899 case e1000_pch_lpt:
900 Adapter->max_mtu = MAXIMUM_MTU_9K;
901 break;
902 /* types with a special limit */
903 case e1000_82571:
904 case e1000_82572:
905 case e1000_82574:
906 case e1000_80003es2lan:
907 case e1000_ich10lan:
908 if (e1000g_jumbo_mtu >= ETHERMTU &&
909 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
910 Adapter->max_mtu = e1000g_jumbo_mtu;
911 } else {
912 Adapter->max_mtu = MAXIMUM_MTU_9K;
913 }
914 break;
915 /* default limit is 16K */
916 default:
917 Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
918 sizeof (struct ether_vlan_header) - ETHERFCSL;
919 break;
920 }
921 }
922
923 static void
e1000g_set_bufsize(struct e1000g * Adapter)924 e1000g_set_bufsize(struct e1000g *Adapter)
925 {
926 struct e1000_mac_info *mac = &Adapter->shared.mac;
927 uint64_t rx_size;
928 uint64_t tx_size;
929
930 dev_info_t *devinfo = Adapter->dip;
931 #ifdef __sparc
932 ulong_t iommu_pagesize;
933 #endif
934 /* Get the system page size */
935 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
936
937 #ifdef __sparc
938 iommu_pagesize = dvma_pagesize(devinfo);
939 if (iommu_pagesize != 0) {
940 if (Adapter->sys_page_sz == iommu_pagesize) {
941 if (iommu_pagesize > 0x4000)
942 Adapter->sys_page_sz = 0x4000;
943 } else {
944 if (Adapter->sys_page_sz > iommu_pagesize)
945 Adapter->sys_page_sz = iommu_pagesize;
946 }
947 }
948 if (Adapter->lso_enable) {
949 Adapter->dvma_page_num = E1000_LSO_MAXLEN /
950 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
951 } else {
952 Adapter->dvma_page_num = Adapter->max_frame_size /
953 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
954 }
955 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
956 #endif
957
958 Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
959
960 if (Adapter->mem_workaround_82546 &&
961 ((mac->type == e1000_82545) ||
962 (mac->type == e1000_82546) ||
963 (mac->type == e1000_82546_rev_3))) {
964 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
965 } else {
966 rx_size = Adapter->max_frame_size;
967 if ((rx_size > FRAME_SIZE_UPTO_2K) &&
968 (rx_size <= FRAME_SIZE_UPTO_4K))
969 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
970 else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
971 (rx_size <= FRAME_SIZE_UPTO_8K))
972 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
973 else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
974 (rx_size <= FRAME_SIZE_UPTO_16K))
975 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
976 else
977 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
978 }
979 Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
980
981 tx_size = Adapter->max_frame_size;
982 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
983 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
984 else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
985 (tx_size <= FRAME_SIZE_UPTO_8K))
986 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
987 else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
988 (tx_size <= FRAME_SIZE_UPTO_16K))
989 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
990 else
991 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
992
993 /*
994 * For Wiseman adapters we have an requirement of having receive
995 * buffers aligned at 256 byte boundary. Since Livengood does not
996 * require this and forcing it for all hardwares will have
997 * performance implications, I am making it applicable only for
998 * Wiseman and for Jumbo frames enabled mode as rest of the time,
999 * it is okay to have normal frames...but it does involve a
1000 * potential risk where we may loose data if buffer is not
1001 * aligned...so all wiseman boards to have 256 byte aligned
1002 * buffers
1003 */
1004 if (mac->type < e1000_82543)
1005 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
1006 else
1007 Adapter->rx_buf_align = 1;
1008 }
1009
1010 /*
1011 * e1000g_detach - driver detach
1012 *
1013 * The detach() function is the complement of the attach routine.
1014 * If cmd is set to DDI_DETACH, detach() is used to remove the
1015 * state associated with a given instance of a device node
1016 * prior to the removal of that instance from the system.
1017 *
1018 * The detach() function will be called once for each instance
1019 * of the device for which there has been a successful attach()
1020 * once there are no longer any opens on the device.
1021 *
1022 * Interrupts routine are disabled, All memory allocated by this
1023 * driver are freed.
1024 */
1025 static int
e1000g_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)1026 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1027 {
1028 struct e1000g *Adapter;
1029 boolean_t rx_drain;
1030
1031 switch (cmd) {
1032 default:
1033 return (DDI_FAILURE);
1034
1035 case DDI_SUSPEND:
1036 return (e1000g_suspend(devinfo));
1037
1038 case DDI_DETACH:
1039 break;
1040 }
1041
1042 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1043 if (Adapter == NULL)
1044 return (DDI_FAILURE);
1045
1046 rx_drain = e1000g_rx_drain(Adapter);
1047 if (!rx_drain && !e1000g_force_detach)
1048 return (DDI_FAILURE);
1049
1050 if (mac_unregister(Adapter->mh) != 0) {
1051 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1052 return (DDI_FAILURE);
1053 }
1054 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1055
1056 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1057
1058 if (!e1000g_force_detach && !rx_drain)
1059 return (DDI_FAILURE);
1060
1061 e1000g_unattach(devinfo, Adapter);
1062
1063 return (DDI_SUCCESS);
1064 }
1065
1066 /*
1067 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1068 */
1069 void
e1000g_free_priv_devi_node(private_devi_list_t * devi_node)1070 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1071 {
1072 ASSERT(e1000g_private_devi_list != NULL);
1073 ASSERT(devi_node != NULL);
1074
1075 if (devi_node->prev != NULL)
1076 devi_node->prev->next = devi_node->next;
1077 if (devi_node->next != NULL)
1078 devi_node->next->prev = devi_node->prev;
1079 if (devi_node == e1000g_private_devi_list)
1080 e1000g_private_devi_list = devi_node->next;
1081
1082 kmem_free(devi_node->priv_dip,
1083 sizeof (struct dev_info));
1084 kmem_free(devi_node,
1085 sizeof (private_devi_list_t));
1086 }
1087
1088 static void
e1000g_unattach(dev_info_t * devinfo,struct e1000g * Adapter)1089 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1090 {
1091 private_devi_list_t *devi_node;
1092 int result;
1093
1094 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1095 (void) e1000g_disable_intrs(Adapter);
1096 }
1097
1098 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1099 (void) mac_unregister(Adapter->mh);
1100 }
1101
1102 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1103 (void) e1000g_rem_intrs(Adapter);
1104 }
1105
1106 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1107 (void) ddi_prop_remove_all(devinfo);
1108 }
1109
1110 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1111 kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1112 }
1113
1114 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1115 stop_link_timer(Adapter);
1116
1117 mutex_enter(&e1000g_nvm_lock);
1118 result = e1000_reset_hw(&Adapter->shared);
1119 mutex_exit(&e1000g_nvm_lock);
1120
1121 if (result != E1000_SUCCESS) {
1122 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1123 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1124 }
1125 }
1126
1127 e1000g_release_multicast(Adapter);
1128
1129 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1130 if (Adapter->osdep.reg_handle != NULL)
1131 ddi_regs_map_free(&Adapter->osdep.reg_handle);
1132 if (Adapter->osdep.ich_flash_handle != NULL)
1133 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1134 if (Adapter->osdep.io_reg_handle != NULL)
1135 ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1136 }
1137
1138 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1139 if (Adapter->osdep.cfg_handle != NULL)
1140 pci_config_teardown(&Adapter->osdep.cfg_handle);
1141 }
1142
1143 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1144 e1000g_destroy_locks(Adapter);
1145 }
1146
1147 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1148 e1000g_fm_fini(Adapter);
1149 }
1150
1151 mutex_enter(&e1000g_rx_detach_lock);
1152 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1153 devi_node = Adapter->priv_devi_node;
1154 devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1155
1156 if (devi_node->pending_rx_count == 0) {
1157 e1000g_free_priv_devi_node(devi_node);
1158 }
1159 }
1160 mutex_exit(&e1000g_rx_detach_lock);
1161
1162 kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1163
1164 /*
1165 * Another hotplug spec requirement,
1166 * run ddi_set_driver_private(devinfo, null);
1167 */
1168 ddi_set_driver_private(devinfo, NULL);
1169 }
1170
1171 /*
1172 * Get the BAR type and rnumber for a given PCI BAR offset
1173 */
1174 static int
e1000g_get_bar_info(dev_info_t * dip,int bar_offset,bar_info_t * bar_info)1175 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1176 {
1177 pci_regspec_t *regs;
1178 uint_t regs_length;
1179 int type, rnumber, rcount;
1180
1181 ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1182 (bar_offset <= PCI_CONF_BASE5));
1183
1184 /*
1185 * Get the DDI "reg" property
1186 */
1187 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1188 DDI_PROP_DONTPASS, "reg", (int **)®s,
1189 ®s_length) != DDI_PROP_SUCCESS) {
1190 return (DDI_FAILURE);
1191 }
1192
1193 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1194 /*
1195 * Check the BAR offset
1196 */
1197 for (rnumber = 0; rnumber < rcount; ++rnumber) {
1198 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1199 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1200 break;
1201 }
1202 }
1203
1204 ddi_prop_free(regs);
1205
1206 if (rnumber >= rcount)
1207 return (DDI_FAILURE);
1208
1209 switch (type) {
1210 case PCI_ADDR_CONFIG:
1211 bar_info->type = E1000G_BAR_CONFIG;
1212 break;
1213 case PCI_ADDR_IO:
1214 bar_info->type = E1000G_BAR_IO;
1215 break;
1216 case PCI_ADDR_MEM32:
1217 bar_info->type = E1000G_BAR_MEM32;
1218 break;
1219 case PCI_ADDR_MEM64:
1220 bar_info->type = E1000G_BAR_MEM64;
1221 break;
1222 default:
1223 return (DDI_FAILURE);
1224 }
1225 bar_info->rnumber = rnumber;
1226 return (DDI_SUCCESS);
1227 }
1228
1229 static void
e1000g_init_locks(struct e1000g * Adapter)1230 e1000g_init_locks(struct e1000g *Adapter)
1231 {
1232 e1000g_tx_ring_t *tx_ring;
1233 e1000g_rx_ring_t *rx_ring;
1234
1235 rw_init(&Adapter->chip_lock, NULL,
1236 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1237 mutex_init(&Adapter->link_lock, NULL,
1238 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1239 mutex_init(&Adapter->watchdog_lock, NULL,
1240 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1241
1242 tx_ring = Adapter->tx_ring;
1243
1244 mutex_init(&tx_ring->tx_lock, NULL,
1245 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1246 mutex_init(&tx_ring->usedlist_lock, NULL,
1247 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1248 mutex_init(&tx_ring->freelist_lock, NULL,
1249 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1250
1251 rx_ring = Adapter->rx_ring;
1252
1253 mutex_init(&rx_ring->rx_lock, NULL,
1254 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1255 }
1256
1257 static void
e1000g_destroy_locks(struct e1000g * Adapter)1258 e1000g_destroy_locks(struct e1000g *Adapter)
1259 {
1260 e1000g_tx_ring_t *tx_ring;
1261 e1000g_rx_ring_t *rx_ring;
1262
1263 tx_ring = Adapter->tx_ring;
1264 mutex_destroy(&tx_ring->tx_lock);
1265 mutex_destroy(&tx_ring->usedlist_lock);
1266 mutex_destroy(&tx_ring->freelist_lock);
1267
1268 rx_ring = Adapter->rx_ring;
1269 mutex_destroy(&rx_ring->rx_lock);
1270
1271 mutex_destroy(&Adapter->link_lock);
1272 mutex_destroy(&Adapter->watchdog_lock);
1273 rw_destroy(&Adapter->chip_lock);
1274
1275 /* destory mutex initialized in shared code */
1276 e1000_destroy_hw_mutex(&Adapter->shared);
1277 }
1278
1279 static int
e1000g_resume(dev_info_t * devinfo)1280 e1000g_resume(dev_info_t *devinfo)
1281 {
1282 struct e1000g *Adapter;
1283
1284 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1285 if (Adapter == NULL)
1286 e1000g_log(Adapter, CE_PANIC,
1287 "Instance pointer is null\n");
1288
1289 if (Adapter->dip != devinfo)
1290 e1000g_log(Adapter, CE_PANIC,
1291 "Devinfo is not the same as saved devinfo\n");
1292
1293 rw_enter(&Adapter->chip_lock, RW_WRITER);
1294
1295 if (Adapter->e1000g_state & E1000G_STARTED) {
1296 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1297 rw_exit(&Adapter->chip_lock);
1298 /*
1299 * We note the failure, but return success, as the
1300 * system is still usable without this controller.
1301 */
1302 e1000g_log(Adapter, CE_WARN,
1303 "e1000g_resume: failed to restart controller\n");
1304 return (DDI_SUCCESS);
1305 }
1306 /* Enable and start the watchdog timer */
1307 enable_watchdog_timer(Adapter);
1308 }
1309
1310 Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1311
1312 rw_exit(&Adapter->chip_lock);
1313
1314 return (DDI_SUCCESS);
1315 }
1316
1317 static int
e1000g_suspend(dev_info_t * devinfo)1318 e1000g_suspend(dev_info_t *devinfo)
1319 {
1320 struct e1000g *Adapter;
1321
1322 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1323 if (Adapter == NULL)
1324 return (DDI_FAILURE);
1325
1326 rw_enter(&Adapter->chip_lock, RW_WRITER);
1327
1328 Adapter->e1000g_state |= E1000G_SUSPENDED;
1329
1330 /* if the port isn't plumbed, we can simply return */
1331 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1332 rw_exit(&Adapter->chip_lock);
1333 return (DDI_SUCCESS);
1334 }
1335
1336 e1000g_stop(Adapter, B_FALSE);
1337
1338 rw_exit(&Adapter->chip_lock);
1339
1340 /* Disable and stop all the timers */
1341 disable_watchdog_timer(Adapter);
1342 stop_link_timer(Adapter);
1343 stop_82547_timer(Adapter->tx_ring);
1344
1345 return (DDI_SUCCESS);
1346 }
1347
1348 static int
e1000g_init(struct e1000g * Adapter)1349 e1000g_init(struct e1000g *Adapter)
1350 {
1351 uint32_t pba;
1352 uint32_t high_water;
1353 struct e1000_hw *hw;
1354 clock_t link_timeout;
1355 int result;
1356
1357 hw = &Adapter->shared;
1358
1359 /*
1360 * reset to put the hardware in a known state
1361 * before we try to do anything with the eeprom
1362 */
1363 mutex_enter(&e1000g_nvm_lock);
1364 result = e1000_reset_hw(hw);
1365 mutex_exit(&e1000g_nvm_lock);
1366
1367 if (result != E1000_SUCCESS) {
1368 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1369 goto init_fail;
1370 }
1371
1372 mutex_enter(&e1000g_nvm_lock);
1373 result = e1000_validate_nvm_checksum(hw);
1374 if (result < E1000_SUCCESS) {
1375 /*
1376 * Some PCI-E parts fail the first check due to
1377 * the link being in sleep state. Call it again,
1378 * if it fails a second time its a real issue.
1379 */
1380 result = e1000_validate_nvm_checksum(hw);
1381 }
1382 mutex_exit(&e1000g_nvm_lock);
1383
1384 if (result < E1000_SUCCESS) {
1385 e1000g_log(Adapter, CE_WARN,
1386 "Invalid NVM checksum. Please contact "
1387 "the vendor to update the NVM.");
1388 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1389 goto init_fail;
1390 }
1391
1392 result = 0;
1393 #ifdef __sparc
1394 /*
1395 * First, we try to get the local ethernet address from OBP. If
1396 * failed, then we get it from the EEPROM of NIC card.
1397 */
1398 result = e1000g_find_mac_address(Adapter);
1399 #endif
1400 /* Get the local ethernet address. */
1401 if (!result) {
1402 mutex_enter(&e1000g_nvm_lock);
1403 result = e1000_read_mac_addr(hw);
1404 mutex_exit(&e1000g_nvm_lock);
1405 }
1406
1407 if (result < E1000_SUCCESS) {
1408 e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1409 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1410 goto init_fail;
1411 }
1412
1413 /* check for valid mac address */
1414 if (!is_valid_mac_addr(hw->mac.addr)) {
1415 e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1416 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1417 goto init_fail;
1418 }
1419
1420 /* Set LAA state for 82571 chipset */
1421 e1000_set_laa_state_82571(hw, B_TRUE);
1422
1423 /* Master Latency Timer implementation */
1424 if (Adapter->master_latency_timer) {
1425 pci_config_put8(Adapter->osdep.cfg_handle,
1426 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1427 }
1428
1429 if (hw->mac.type < e1000_82547) {
1430 /*
1431 * Total FIFO is 64K
1432 */
1433 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1434 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1435 else
1436 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1437 } else if ((hw->mac.type == e1000_82571) ||
1438 (hw->mac.type == e1000_82572) ||
1439 (hw->mac.type == e1000_80003es2lan)) {
1440 /*
1441 * Total FIFO is 48K
1442 */
1443 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1444 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */
1445 else
1446 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */
1447 } else if (hw->mac.type == e1000_82573) {
1448 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */
1449 } else if (hw->mac.type == e1000_82574) {
1450 /* Keep adapter default: 20K for Rx, 20K for Tx */
1451 pba = E1000_READ_REG(hw, E1000_PBA);
1452 } else if (hw->mac.type == e1000_ich8lan) {
1453 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */
1454 } else if (hw->mac.type == e1000_ich9lan) {
1455 pba = E1000_PBA_10K;
1456 } else if (hw->mac.type == e1000_ich10lan) {
1457 pba = E1000_PBA_10K;
1458 } else if (hw->mac.type == e1000_pchlan) {
1459 pba = E1000_PBA_26K;
1460 } else if (hw->mac.type == e1000_pch2lan) {
1461 pba = E1000_PBA_26K;
1462 } else if (hw->mac.type == e1000_pch_lpt) {
1463 pba = E1000_PBA_26K;
1464 } else {
1465 /*
1466 * Total FIFO is 40K
1467 */
1468 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1469 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1470 else
1471 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1472 }
1473 E1000_WRITE_REG(hw, E1000_PBA, pba);
1474
1475 /*
1476 * These parameters set thresholds for the adapter's generation(Tx)
1477 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1478 * settings. Flow control is enabled or disabled in the configuration
1479 * file.
1480 * High-water mark is set down from the top of the rx fifo (not
1481 * sensitive to max_frame_size) and low-water is set just below
1482 * high-water mark.
1483 * The high water mark must be low enough to fit one full frame above
1484 * it in the rx FIFO. Should be the lower of:
1485 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1486 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1487 * Rx FIFO size minus one full frame.
1488 */
1489 high_water = min(((pba << 10) * 9 / 10),
1490 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1491 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1492 ((pba << 10) - (E1000_ERT_2048 << 3)) :
1493 ((pba << 10) - Adapter->max_frame_size)));
1494
1495 hw->fc.high_water = high_water & 0xFFF8;
1496 hw->fc.low_water = hw->fc.high_water - 8;
1497
1498 if (hw->mac.type == e1000_80003es2lan)
1499 hw->fc.pause_time = 0xFFFF;
1500 else
1501 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1502 hw->fc.send_xon = B_TRUE;
1503
1504 /*
1505 * Reset the adapter hardware the second time.
1506 */
1507 mutex_enter(&e1000g_nvm_lock);
1508 result = e1000_reset_hw(hw);
1509 mutex_exit(&e1000g_nvm_lock);
1510
1511 if (result != E1000_SUCCESS) {
1512 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1513 goto init_fail;
1514 }
1515
1516 /* disable wakeup control by default */
1517 if (hw->mac.type >= e1000_82544)
1518 E1000_WRITE_REG(hw, E1000_WUC, 0);
1519
1520 /*
1521 * MWI should be disabled on 82546.
1522 */
1523 if (hw->mac.type == e1000_82546)
1524 e1000_pci_clear_mwi(hw);
1525 else
1526 e1000_pci_set_mwi(hw);
1527
1528 /*
1529 * Configure/Initialize hardware
1530 */
1531 mutex_enter(&e1000g_nvm_lock);
1532 result = e1000_init_hw(hw);
1533 mutex_exit(&e1000g_nvm_lock);
1534
1535 if (result < E1000_SUCCESS) {
1536 e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1537 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1538 goto init_fail;
1539 }
1540
1541 /*
1542 * Restore LED settings to the default from EEPROM
1543 * to meet the standard for Sun platforms.
1544 */
1545 (void) e1000_cleanup_led(hw);
1546
1547 /* Disable Smart Power Down */
1548 phy_spd_state(hw, B_FALSE);
1549
1550 /* Make sure driver has control */
1551 e1000g_get_driver_control(hw);
1552
1553 /*
1554 * Initialize unicast addresses.
1555 */
1556 e1000g_init_unicst(Adapter);
1557
1558 /*
1559 * Setup and initialize the mctable structures. After this routine
1560 * completes Multicast table will be set
1561 */
1562 e1000_update_mc_addr_list(hw,
1563 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1564 msec_delay(5);
1565
1566 /*
1567 * Implement Adaptive IFS
1568 */
1569 e1000_reset_adaptive(hw);
1570
1571 /* Setup Interrupt Throttling Register */
1572 if (hw->mac.type >= e1000_82540) {
1573 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1574 } else
1575 Adapter->intr_adaptive = B_FALSE;
1576
1577 /* Start the timer for link setup */
1578 if (hw->mac.autoneg)
1579 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1580 else
1581 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1582
1583 mutex_enter(&Adapter->link_lock);
1584 if (hw->phy.autoneg_wait_to_complete) {
1585 Adapter->link_complete = B_TRUE;
1586 } else {
1587 Adapter->link_complete = B_FALSE;
1588 Adapter->link_tid = timeout(e1000g_link_timer,
1589 (void *)Adapter, link_timeout);
1590 }
1591 mutex_exit(&Adapter->link_lock);
1592
1593 /* Save the state of the phy */
1594 e1000g_get_phy_state(Adapter);
1595
1596 e1000g_param_sync(Adapter);
1597
1598 Adapter->init_count++;
1599
1600 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1601 goto init_fail;
1602 }
1603 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1604 goto init_fail;
1605 }
1606
1607 Adapter->poll_mode = e1000g_poll_mode;
1608
1609 return (DDI_SUCCESS);
1610
1611 init_fail:
1612 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1613 return (DDI_FAILURE);
1614 }
1615
1616 static int
e1000g_alloc_rx_data(struct e1000g * Adapter)1617 e1000g_alloc_rx_data(struct e1000g *Adapter)
1618 {
1619 e1000g_rx_ring_t *rx_ring;
1620 e1000g_rx_data_t *rx_data;
1621
1622 rx_ring = Adapter->rx_ring;
1623
1624 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1625
1626 if (rx_data == NULL)
1627 return (DDI_FAILURE);
1628
1629 rx_data->priv_devi_node = Adapter->priv_devi_node;
1630 rx_data->rx_ring = rx_ring;
1631
1632 mutex_init(&rx_data->freelist_lock, NULL,
1633 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1634 mutex_init(&rx_data->recycle_lock, NULL,
1635 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1636
1637 rx_ring->rx_data = rx_data;
1638
1639 return (DDI_SUCCESS);
1640 }
1641
1642 void
e1000g_free_rx_pending_buffers(e1000g_rx_data_t * rx_data)1643 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1644 {
1645 rx_sw_packet_t *packet, *next_packet;
1646
1647 if (rx_data == NULL)
1648 return;
1649
1650 packet = rx_data->packet_area;
1651 while (packet != NULL) {
1652 next_packet = packet->next;
1653 e1000g_free_rx_sw_packet(packet, B_TRUE);
1654 packet = next_packet;
1655 }
1656 rx_data->packet_area = NULL;
1657 }
1658
1659 void
e1000g_free_rx_data(e1000g_rx_data_t * rx_data)1660 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1661 {
1662 if (rx_data == NULL)
1663 return;
1664
1665 mutex_destroy(&rx_data->freelist_lock);
1666 mutex_destroy(&rx_data->recycle_lock);
1667
1668 kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1669 }
1670
1671 /*
1672 * Check if the link is up
1673 */
1674 static boolean_t
e1000g_link_up(struct e1000g * Adapter)1675 e1000g_link_up(struct e1000g *Adapter)
1676 {
1677 struct e1000_hw *hw = &Adapter->shared;
1678 boolean_t link_up = B_FALSE;
1679
1680 /*
1681 * get_link_status is set in the interrupt handler on link-status-change
1682 * or rx sequence error interrupt. get_link_status will stay
1683 * false until the e1000_check_for_link establishes link only
1684 * for copper adapters.
1685 */
1686 switch (hw->phy.media_type) {
1687 case e1000_media_type_copper:
1688 if (hw->mac.get_link_status) {
1689 (void) e1000_check_for_link(hw);
1690 if ((E1000_READ_REG(hw, E1000_STATUS) &
1691 E1000_STATUS_LU)) {
1692 link_up = B_TRUE;
1693 } else {
1694 link_up = !hw->mac.get_link_status;
1695 }
1696 } else {
1697 link_up = B_TRUE;
1698 }
1699 break;
1700 case e1000_media_type_fiber:
1701 (void) e1000_check_for_link(hw);
1702 link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1703 E1000_STATUS_LU);
1704 break;
1705 case e1000_media_type_internal_serdes:
1706 (void) e1000_check_for_link(hw);
1707 link_up = hw->mac.serdes_has_link;
1708 break;
1709 }
1710
1711 return (link_up);
1712 }
1713
1714 static void
e1000g_m_ioctl(void * arg,queue_t * q,mblk_t * mp)1715 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1716 {
1717 struct iocblk *iocp;
1718 struct e1000g *e1000gp;
1719 enum ioc_reply status;
1720
1721 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1722 iocp->ioc_error = 0;
1723 e1000gp = (struct e1000g *)arg;
1724
1725 ASSERT(e1000gp);
1726 if (e1000gp == NULL) {
1727 miocnak(q, mp, 0, EINVAL);
1728 return;
1729 }
1730
1731 rw_enter(&e1000gp->chip_lock, RW_READER);
1732 if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1733 rw_exit(&e1000gp->chip_lock);
1734 miocnak(q, mp, 0, EINVAL);
1735 return;
1736 }
1737 rw_exit(&e1000gp->chip_lock);
1738
1739 switch (iocp->ioc_cmd) {
1740
1741 case LB_GET_INFO_SIZE:
1742 case LB_GET_INFO:
1743 case LB_GET_MODE:
1744 case LB_SET_MODE:
1745 status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1746 break;
1747
1748
1749 #ifdef E1000G_DEBUG
1750 case E1000G_IOC_REG_PEEK:
1751 case E1000G_IOC_REG_POKE:
1752 status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1753 break;
1754 case E1000G_IOC_CHIP_RESET:
1755 e1000gp->reset_count++;
1756 if (e1000g_reset_adapter(e1000gp))
1757 status = IOC_ACK;
1758 else
1759 status = IOC_INVAL;
1760 break;
1761 #endif
1762 default:
1763 status = IOC_INVAL;
1764 break;
1765 }
1766
1767 /*
1768 * Decide how to reply
1769 */
1770 switch (status) {
1771 default:
1772 case IOC_INVAL:
1773 /*
1774 * Error, reply with a NAK and EINVAL or the specified error
1775 */
1776 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1777 EINVAL : iocp->ioc_error);
1778 break;
1779
1780 case IOC_DONE:
1781 /*
1782 * OK, reply already sent
1783 */
1784 break;
1785
1786 case IOC_ACK:
1787 /*
1788 * OK, reply with an ACK
1789 */
1790 miocack(q, mp, 0, 0);
1791 break;
1792
1793 case IOC_REPLY:
1794 /*
1795 * OK, send prepared reply as ACK or NAK
1796 */
1797 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1798 M_IOCACK : M_IOCNAK;
1799 qreply(q, mp);
1800 break;
1801 }
1802 }
1803
1804 /*
1805 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1806 * capable of supporting only one interrupt and we shouldn't disable
1807 * the physical interrupt. In this case we let the interrupt come and
1808 * we queue the packets in the rx ring itself in case we are in polling
1809 * mode (better latency but slightly lower performance and a very
1810 * high intrrupt count in mpstat which is harmless).
1811 *
1812 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1813 * which can be disabled in poll mode. This gives better overall
1814 * throughput (compared to the mode above), shows very low interrupt
1815 * count but has slightly higher latency since we pick the packets when
1816 * the poll thread does polling.
1817 *
1818 * Currently, this flag should be enabled only while doing performance
1819 * measurement or when it can be guaranteed that entire NIC going
1820 * in poll mode will not harm any traffic like cluster heartbeat etc.
1821 */
1822 int e1000g_poll_mode = 0;
1823
1824 /*
1825 * Called from the upper layers when driver is in polling mode to
1826 * pick up any queued packets. Care should be taken to not block
1827 * this thread.
1828 */
e1000g_poll_ring(void * arg,int bytes_to_pickup)1829 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1830 {
1831 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg;
1832 mblk_t *mp = NULL;
1833 mblk_t *tail;
1834 struct e1000g *adapter;
1835
1836 adapter = rx_ring->adapter;
1837
1838 rw_enter(&adapter->chip_lock, RW_READER);
1839
1840 if (adapter->e1000g_state & E1000G_SUSPENDED) {
1841 rw_exit(&adapter->chip_lock);
1842 return (NULL);
1843 }
1844
1845 mutex_enter(&rx_ring->rx_lock);
1846 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1847 mutex_exit(&rx_ring->rx_lock);
1848 rw_exit(&adapter->chip_lock);
1849 return (mp);
1850 }
1851
1852 static int
e1000g_m_start(void * arg)1853 e1000g_m_start(void *arg)
1854 {
1855 struct e1000g *Adapter = (struct e1000g *)arg;
1856
1857 rw_enter(&Adapter->chip_lock, RW_WRITER);
1858
1859 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1860 rw_exit(&Adapter->chip_lock);
1861 return (ECANCELED);
1862 }
1863
1864 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1865 rw_exit(&Adapter->chip_lock);
1866 return (ENOTACTIVE);
1867 }
1868
1869 Adapter->e1000g_state |= E1000G_STARTED;
1870
1871 rw_exit(&Adapter->chip_lock);
1872
1873 /* Enable and start the watchdog timer */
1874 enable_watchdog_timer(Adapter);
1875
1876 return (0);
1877 }
1878
1879 static int
e1000g_start(struct e1000g * Adapter,boolean_t global)1880 e1000g_start(struct e1000g *Adapter, boolean_t global)
1881 {
1882 e1000g_rx_data_t *rx_data;
1883
1884 if (global) {
1885 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1886 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1887 goto start_fail;
1888 }
1889
1890 /* Allocate dma resources for descriptors and buffers */
1891 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1892 e1000g_log(Adapter, CE_WARN,
1893 "Alloc DMA resources failed");
1894 goto start_fail;
1895 }
1896 Adapter->rx_buffer_setup = B_FALSE;
1897 }
1898
1899 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1900 if (e1000g_init(Adapter) != DDI_SUCCESS) {
1901 e1000g_log(Adapter, CE_WARN,
1902 "Adapter initialization failed");
1903 goto start_fail;
1904 }
1905 }
1906
1907 /* Setup and initialize the transmit structures */
1908 e1000g_tx_setup(Adapter);
1909 msec_delay(5);
1910
1911 /* Setup and initialize the receive structures */
1912 e1000g_rx_setup(Adapter);
1913 msec_delay(5);
1914
1915 /* Restore the e1000g promiscuous mode */
1916 e1000g_restore_promisc(Adapter);
1917
1918 e1000g_mask_interrupt(Adapter);
1919
1920 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1921
1922 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1923 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1924 goto start_fail;
1925 }
1926
1927 return (DDI_SUCCESS);
1928
1929 start_fail:
1930 rx_data = Adapter->rx_ring->rx_data;
1931
1932 if (global) {
1933 e1000g_release_dma_resources(Adapter);
1934 e1000g_free_rx_pending_buffers(rx_data);
1935 e1000g_free_rx_data(rx_data);
1936 }
1937
1938 mutex_enter(&e1000g_nvm_lock);
1939 (void) e1000_reset_hw(&Adapter->shared);
1940 mutex_exit(&e1000g_nvm_lock);
1941
1942 return (DDI_FAILURE);
1943 }
1944
1945 static void
e1000g_m_stop(void * arg)1946 e1000g_m_stop(void *arg)
1947 {
1948 struct e1000g *Adapter = (struct e1000g *)arg;
1949
1950 /* Drain tx sessions */
1951 (void) e1000g_tx_drain(Adapter);
1952
1953 rw_enter(&Adapter->chip_lock, RW_WRITER);
1954
1955 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1956 rw_exit(&Adapter->chip_lock);
1957 return;
1958 }
1959 Adapter->e1000g_state &= ~E1000G_STARTED;
1960 e1000g_stop(Adapter, B_TRUE);
1961
1962 rw_exit(&Adapter->chip_lock);
1963
1964 /* Disable and stop all the timers */
1965 disable_watchdog_timer(Adapter);
1966 stop_link_timer(Adapter);
1967 stop_82547_timer(Adapter->tx_ring);
1968 }
1969
1970 static void
e1000g_stop(struct e1000g * Adapter,boolean_t global)1971 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1972 {
1973 private_devi_list_t *devi_node;
1974 e1000g_rx_data_t *rx_data;
1975 int result;
1976
1977 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1978
1979 /* Stop the chip and release pending resources */
1980
1981 /* Tell firmware driver is no longer in control */
1982 e1000g_release_driver_control(&Adapter->shared);
1983
1984 e1000g_clear_all_interrupts(Adapter);
1985
1986 mutex_enter(&e1000g_nvm_lock);
1987 result = e1000_reset_hw(&Adapter->shared);
1988 mutex_exit(&e1000g_nvm_lock);
1989
1990 if (result != E1000_SUCCESS) {
1991 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1992 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1993 }
1994
1995 mutex_enter(&Adapter->link_lock);
1996 Adapter->link_complete = B_FALSE;
1997 mutex_exit(&Adapter->link_lock);
1998
1999 /* Release resources still held by the TX descriptors */
2000 e1000g_tx_clean(Adapter);
2001
2002 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2003 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2004
2005 /* Clean the pending rx jumbo packet fragment */
2006 e1000g_rx_clean(Adapter);
2007
2008 if (global) {
2009 e1000g_release_dma_resources(Adapter);
2010
2011 mutex_enter(&e1000g_rx_detach_lock);
2012 rx_data = Adapter->rx_ring->rx_data;
2013 rx_data->flag |= E1000G_RX_STOPPED;
2014
2015 if (rx_data->pending_count == 0) {
2016 e1000g_free_rx_pending_buffers(rx_data);
2017 e1000g_free_rx_data(rx_data);
2018 } else {
2019 devi_node = rx_data->priv_devi_node;
2020 if (devi_node != NULL)
2021 atomic_inc_32(&devi_node->pending_rx_count);
2022 else
2023 atomic_inc_32(&Adapter->pending_rx_count);
2024 }
2025 mutex_exit(&e1000g_rx_detach_lock);
2026 }
2027
2028 if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2029 Adapter->link_state = LINK_STATE_UNKNOWN;
2030 if (!Adapter->reset_flag)
2031 mac_link_update(Adapter->mh, Adapter->link_state);
2032 }
2033 }
2034
2035 static void
e1000g_rx_clean(struct e1000g * Adapter)2036 e1000g_rx_clean(struct e1000g *Adapter)
2037 {
2038 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2039
2040 if (rx_data == NULL)
2041 return;
2042
2043 if (rx_data->rx_mblk != NULL) {
2044 freemsg(rx_data->rx_mblk);
2045 rx_data->rx_mblk = NULL;
2046 rx_data->rx_mblk_tail = NULL;
2047 rx_data->rx_mblk_len = 0;
2048 }
2049 }
2050
2051 static void
e1000g_tx_clean(struct e1000g * Adapter)2052 e1000g_tx_clean(struct e1000g *Adapter)
2053 {
2054 e1000g_tx_ring_t *tx_ring;
2055 p_tx_sw_packet_t packet;
2056 mblk_t *mp;
2057 mblk_t *nmp;
2058 uint32_t packet_count;
2059
2060 tx_ring = Adapter->tx_ring;
2061
2062 /*
2063 * Here we don't need to protect the lists using
2064 * the usedlist_lock and freelist_lock, for they
2065 * have been protected by the chip_lock.
2066 */
2067 mp = NULL;
2068 nmp = NULL;
2069 packet_count = 0;
2070 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2071 while (packet != NULL) {
2072 if (packet->mp != NULL) {
2073 /* Assemble the message chain */
2074 if (mp == NULL) {
2075 mp = packet->mp;
2076 nmp = packet->mp;
2077 } else {
2078 nmp->b_next = packet->mp;
2079 nmp = packet->mp;
2080 }
2081 /* Disconnect the message from the sw packet */
2082 packet->mp = NULL;
2083 }
2084
2085 e1000g_free_tx_swpkt(packet);
2086 packet_count++;
2087
2088 packet = (p_tx_sw_packet_t)
2089 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2090 }
2091
2092 if (mp != NULL)
2093 freemsgchain(mp);
2094
2095 if (packet_count > 0) {
2096 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2097 QUEUE_INIT_LIST(&tx_ring->used_list);
2098
2099 /* Setup TX descriptor pointers */
2100 tx_ring->tbd_next = tx_ring->tbd_first;
2101 tx_ring->tbd_oldest = tx_ring->tbd_first;
2102
2103 /* Setup our HW Tx Head & Tail descriptor pointers */
2104 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2105 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2106 }
2107 }
2108
2109 static boolean_t
e1000g_tx_drain(struct e1000g * Adapter)2110 e1000g_tx_drain(struct e1000g *Adapter)
2111 {
2112 int i;
2113 boolean_t done;
2114 e1000g_tx_ring_t *tx_ring;
2115
2116 tx_ring = Adapter->tx_ring;
2117
2118 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2119 for (i = 0; i < TX_DRAIN_TIME; i++) {
2120 mutex_enter(&tx_ring->usedlist_lock);
2121 done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2122 mutex_exit(&tx_ring->usedlist_lock);
2123
2124 if (done)
2125 break;
2126
2127 msec_delay(1);
2128 }
2129
2130 return (done);
2131 }
2132
2133 static boolean_t
e1000g_rx_drain(struct e1000g * Adapter)2134 e1000g_rx_drain(struct e1000g *Adapter)
2135 {
2136 int i;
2137 boolean_t done;
2138
2139 /*
2140 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2141 */
2142 for (i = 0; i < RX_DRAIN_TIME; i++) {
2143 done = (Adapter->pending_rx_count == 0);
2144
2145 if (done)
2146 break;
2147
2148 msec_delay(1);
2149 }
2150
2151 return (done);
2152 }
2153
2154 static boolean_t
e1000g_reset_adapter(struct e1000g * Adapter)2155 e1000g_reset_adapter(struct e1000g *Adapter)
2156 {
2157 /* Disable and stop all the timers */
2158 disable_watchdog_timer(Adapter);
2159 stop_link_timer(Adapter);
2160 stop_82547_timer(Adapter->tx_ring);
2161
2162 rw_enter(&Adapter->chip_lock, RW_WRITER);
2163
2164 if (Adapter->stall_flag) {
2165 Adapter->stall_flag = B_FALSE;
2166 Adapter->reset_flag = B_TRUE;
2167 }
2168
2169 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2170 rw_exit(&Adapter->chip_lock);
2171 return (B_TRUE);
2172 }
2173
2174 e1000g_stop(Adapter, B_FALSE);
2175
2176 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2177 rw_exit(&Adapter->chip_lock);
2178 e1000g_log(Adapter, CE_WARN, "Reset failed");
2179 return (B_FALSE);
2180 }
2181
2182 rw_exit(&Adapter->chip_lock);
2183
2184 /* Enable and start the watchdog timer */
2185 enable_watchdog_timer(Adapter);
2186
2187 return (B_TRUE);
2188 }
2189
2190 boolean_t
e1000g_global_reset(struct e1000g * Adapter)2191 e1000g_global_reset(struct e1000g *Adapter)
2192 {
2193 /* Disable and stop all the timers */
2194 disable_watchdog_timer(Adapter);
2195 stop_link_timer(Adapter);
2196 stop_82547_timer(Adapter->tx_ring);
2197
2198 rw_enter(&Adapter->chip_lock, RW_WRITER);
2199
2200 e1000g_stop(Adapter, B_TRUE);
2201
2202 Adapter->init_count = 0;
2203
2204 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2205 rw_exit(&Adapter->chip_lock);
2206 e1000g_log(Adapter, CE_WARN, "Reset failed");
2207 return (B_FALSE);
2208 }
2209
2210 rw_exit(&Adapter->chip_lock);
2211
2212 /* Enable and start the watchdog timer */
2213 enable_watchdog_timer(Adapter);
2214
2215 return (B_TRUE);
2216 }
2217
2218 /*
2219 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2220 *
2221 * This interrupt service routine is for PCI-Express adapters.
2222 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2223 * bit is set.
2224 */
2225 static uint_t
e1000g_intr_pciexpress(caddr_t arg)2226 e1000g_intr_pciexpress(caddr_t arg)
2227 {
2228 struct e1000g *Adapter;
2229 uint32_t icr;
2230
2231 Adapter = (struct e1000g *)(uintptr_t)arg;
2232 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2233
2234 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2235 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2236 return (DDI_INTR_CLAIMED);
2237 }
2238
2239 if (icr & E1000_ICR_INT_ASSERTED) {
2240 /*
2241 * E1000_ICR_INT_ASSERTED bit was set:
2242 * Read(Clear) the ICR, claim this interrupt,
2243 * look for work to do.
2244 */
2245 e1000g_intr_work(Adapter, icr);
2246 return (DDI_INTR_CLAIMED);
2247 } else {
2248 /*
2249 * E1000_ICR_INT_ASSERTED bit was not set:
2250 * Don't claim this interrupt, return immediately.
2251 */
2252 return (DDI_INTR_UNCLAIMED);
2253 }
2254 }
2255
2256 /*
2257 * e1000g_intr - ISR for PCI/PCI-X chipsets
2258 *
2259 * This interrupt service routine is for PCI/PCI-X adapters.
2260 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2261 * bit is set or not.
2262 */
2263 static uint_t
e1000g_intr(caddr_t arg)2264 e1000g_intr(caddr_t arg)
2265 {
2266 struct e1000g *Adapter;
2267 uint32_t icr;
2268
2269 Adapter = (struct e1000g *)(uintptr_t)arg;
2270 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2271
2272 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2273 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2274 return (DDI_INTR_CLAIMED);
2275 }
2276
2277 if (icr) {
2278 /*
2279 * Any bit was set in ICR:
2280 * Read(Clear) the ICR, claim this interrupt,
2281 * look for work to do.
2282 */
2283 e1000g_intr_work(Adapter, icr);
2284 return (DDI_INTR_CLAIMED);
2285 } else {
2286 /*
2287 * No bit was set in ICR:
2288 * Don't claim this interrupt, return immediately.
2289 */
2290 return (DDI_INTR_UNCLAIMED);
2291 }
2292 }
2293
2294 /*
2295 * e1000g_intr_work - actual processing of ISR
2296 *
2297 * Read(clear) the ICR contents and call appropriate interrupt
2298 * processing routines.
2299 */
2300 static void
e1000g_intr_work(struct e1000g * Adapter,uint32_t icr)2301 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2302 {
2303 struct e1000_hw *hw;
2304 hw = &Adapter->shared;
2305 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2306
2307 Adapter->rx_pkt_cnt = 0;
2308 Adapter->tx_pkt_cnt = 0;
2309
2310 rw_enter(&Adapter->chip_lock, RW_READER);
2311
2312 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2313 rw_exit(&Adapter->chip_lock);
2314 return;
2315 }
2316 /*
2317 * Here we need to check the "e1000g_state" flag within the chip_lock to
2318 * ensure the receive routine will not execute when the adapter is
2319 * being reset.
2320 */
2321 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2322 rw_exit(&Adapter->chip_lock);
2323 return;
2324 }
2325
2326 if (icr & E1000_ICR_RXT0) {
2327 mblk_t *mp = NULL;
2328 mblk_t *tail = NULL;
2329 e1000g_rx_ring_t *rx_ring;
2330
2331 rx_ring = Adapter->rx_ring;
2332 mutex_enter(&rx_ring->rx_lock);
2333 /*
2334 * Sometimes with legacy interrupts, it possible that
2335 * there is a single interrupt for Rx/Tx. In which
2336 * case, if poll flag is set, we shouldn't really
2337 * be doing Rx processing.
2338 */
2339 if (!rx_ring->poll_flag)
2340 mp = e1000g_receive(rx_ring, &tail,
2341 E1000G_CHAIN_NO_LIMIT);
2342 mutex_exit(&rx_ring->rx_lock);
2343 rw_exit(&Adapter->chip_lock);
2344 if (mp != NULL)
2345 mac_rx_ring(Adapter->mh, rx_ring->mrh,
2346 mp, rx_ring->ring_gen_num);
2347 } else
2348 rw_exit(&Adapter->chip_lock);
2349
2350 if (icr & E1000_ICR_TXDW) {
2351 if (!Adapter->tx_intr_enable)
2352 e1000g_clear_tx_interrupt(Adapter);
2353
2354 /* Recycle the tx descriptors */
2355 rw_enter(&Adapter->chip_lock, RW_READER);
2356 (void) e1000g_recycle(tx_ring);
2357 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2358 rw_exit(&Adapter->chip_lock);
2359
2360 if (tx_ring->resched_needed &&
2361 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2362 tx_ring->resched_needed = B_FALSE;
2363 mac_tx_update(Adapter->mh);
2364 E1000G_STAT(tx_ring->stat_reschedule);
2365 }
2366 }
2367
2368 /*
2369 * The Receive Sequence errors RXSEQ and the link status change LSC
2370 * are checked to detect that the cable has been pulled out. For
2371 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2372 * are an indication that cable is not connected.
2373 */
2374 if ((icr & E1000_ICR_RXSEQ) ||
2375 (icr & E1000_ICR_LSC) ||
2376 (icr & E1000_ICR_GPI_EN1)) {
2377 boolean_t link_changed;
2378 timeout_id_t tid = 0;
2379
2380 stop_watchdog_timer(Adapter);
2381
2382 rw_enter(&Adapter->chip_lock, RW_WRITER);
2383
2384 /*
2385 * Because we got a link-status-change interrupt, force
2386 * e1000_check_for_link() to look at phy
2387 */
2388 Adapter->shared.mac.get_link_status = B_TRUE;
2389
2390 /* e1000g_link_check takes care of link status change */
2391 link_changed = e1000g_link_check(Adapter);
2392
2393 /* Get new phy state */
2394 e1000g_get_phy_state(Adapter);
2395
2396 /*
2397 * If the link timer has not timed out, we'll not notify
2398 * the upper layer with any link state until the link is up.
2399 */
2400 if (link_changed && !Adapter->link_complete) {
2401 if (Adapter->link_state == LINK_STATE_UP) {
2402 mutex_enter(&Adapter->link_lock);
2403 Adapter->link_complete = B_TRUE;
2404 tid = Adapter->link_tid;
2405 Adapter->link_tid = 0;
2406 mutex_exit(&Adapter->link_lock);
2407 } else {
2408 link_changed = B_FALSE;
2409 }
2410 }
2411 rw_exit(&Adapter->chip_lock);
2412
2413 if (link_changed) {
2414 if (tid != 0)
2415 (void) untimeout(tid);
2416
2417 /*
2418 * Workaround for esb2. Data stuck in fifo on a link
2419 * down event. Stop receiver here and reset in watchdog.
2420 */
2421 if ((Adapter->link_state == LINK_STATE_DOWN) &&
2422 (Adapter->shared.mac.type == e1000_80003es2lan)) {
2423 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2424 E1000_WRITE_REG(hw, E1000_RCTL,
2425 rctl & ~E1000_RCTL_EN);
2426 e1000g_log(Adapter, CE_WARN,
2427 "ESB2 receiver disabled");
2428 Adapter->esb2_workaround = B_TRUE;
2429 }
2430 if (!Adapter->reset_flag)
2431 mac_link_update(Adapter->mh,
2432 Adapter->link_state);
2433 if (Adapter->link_state == LINK_STATE_UP)
2434 Adapter->reset_flag = B_FALSE;
2435 }
2436
2437 start_watchdog_timer(Adapter);
2438 }
2439 }
2440
2441 static void
e1000g_init_unicst(struct e1000g * Adapter)2442 e1000g_init_unicst(struct e1000g *Adapter)
2443 {
2444 struct e1000_hw *hw;
2445 int slot;
2446
2447 hw = &Adapter->shared;
2448
2449 if (Adapter->init_count == 0) {
2450 /* Initialize the multiple unicast addresses */
2451 Adapter->unicst_total = min(hw->mac.rar_entry_count,
2452 MAX_NUM_UNICAST_ADDRESSES);
2453
2454 /*
2455 * The common code does not correctly calculate the number of
2456 * rar's that could be reserved by firmware for the pch_lpt
2457 * macs. The interface has one primary rar, and 11 additional
2458 * ones. Those 11 additional ones are not always available.
2459 * According to the datasheet, we need to check a few of the
2460 * bits set in the FWSM register. If the value is zero,
2461 * everything is available. If the value is 1, none of the
2462 * additional registers are available. If the value is 2-7, only
2463 * that number are available.
2464 */
2465 if (hw->mac.type == e1000_pch_lpt) {
2466 uint32_t locked, rar;
2467
2468 locked = E1000_READ_REG(hw, E1000_FWSM) &
2469 E1000_FWSM_WLOCK_MAC_MASK;
2470 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2471 rar = 1;
2472 if (locked == 0)
2473 rar += 11;
2474 else if (locked == 1)
2475 rar += 0;
2476 else
2477 rar += locked;
2478 Adapter->unicst_total = min(rar,
2479 MAX_NUM_UNICAST_ADDRESSES);
2480 }
2481
2482 /* Workaround for an erratum of 82571 chipst */
2483 if ((hw->mac.type == e1000_82571) &&
2484 (e1000_get_laa_state_82571(hw) == B_TRUE))
2485 Adapter->unicst_total--;
2486
2487 /* VMware doesn't support multiple mac addresses properly */
2488 if (hw->subsystem_vendor_id == 0x15ad)
2489 Adapter->unicst_total = 1;
2490
2491 Adapter->unicst_avail = Adapter->unicst_total;
2492
2493 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2494 /* Clear both the flag and MAC address */
2495 Adapter->unicst_addr[slot].reg.high = 0;
2496 Adapter->unicst_addr[slot].reg.low = 0;
2497 }
2498 } else {
2499 /* Workaround for an erratum of 82571 chipst */
2500 if ((hw->mac.type == e1000_82571) &&
2501 (e1000_get_laa_state_82571(hw) == B_TRUE))
2502 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2503
2504 /* Re-configure the RAR registers */
2505 for (slot = 0; slot < Adapter->unicst_total; slot++)
2506 if (Adapter->unicst_addr[slot].mac.set == 1)
2507 (void) e1000_rar_set(hw,
2508 Adapter->unicst_addr[slot].mac.addr, slot);
2509 }
2510
2511 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2512 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2513 }
2514
2515 static int
e1000g_unicst_set(struct e1000g * Adapter,const uint8_t * mac_addr,int slot)2516 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2517 int slot)
2518 {
2519 struct e1000_hw *hw;
2520
2521 hw = &Adapter->shared;
2522
2523 /*
2524 * The first revision of Wiseman silicon (rev 2.0) has an errata
2525 * that requires the receiver to be in reset when any of the
2526 * receive address registers (RAR regs) are accessed. The first
2527 * rev of Wiseman silicon also requires MWI to be disabled when
2528 * a global reset or a receive reset is issued. So before we
2529 * initialize the RARs, we check the rev of the Wiseman controller
2530 * and work around any necessary HW errata.
2531 */
2532 if ((hw->mac.type == e1000_82542) &&
2533 (hw->revision_id == E1000_REVISION_2)) {
2534 e1000_pci_clear_mwi(hw);
2535 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2536 msec_delay(5);
2537 }
2538 if (mac_addr == NULL) {
2539 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2540 E1000_WRITE_FLUSH(hw);
2541 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2542 E1000_WRITE_FLUSH(hw);
2543 /* Clear both the flag and MAC address */
2544 Adapter->unicst_addr[slot].reg.high = 0;
2545 Adapter->unicst_addr[slot].reg.low = 0;
2546 } else {
2547 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2548 ETHERADDRL);
2549 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2550 Adapter->unicst_addr[slot].mac.set = 1;
2551 }
2552
2553 /* Workaround for an erratum of 82571 chipst */
2554 if (slot == 0) {
2555 if ((hw->mac.type == e1000_82571) &&
2556 (e1000_get_laa_state_82571(hw) == B_TRUE))
2557 if (mac_addr == NULL) {
2558 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2559 slot << 1, 0);
2560 E1000_WRITE_FLUSH(hw);
2561 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2562 (slot << 1) + 1, 0);
2563 E1000_WRITE_FLUSH(hw);
2564 } else {
2565 (void) e1000_rar_set(hw, (uint8_t *)mac_addr,
2566 LAST_RAR_ENTRY);
2567 }
2568 }
2569
2570 /*
2571 * If we are using Wiseman rev 2.0 silicon, we will have previously
2572 * put the receive in reset, and disabled MWI, to work around some
2573 * HW errata. Now we should take the receiver out of reset, and
2574 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2575 */
2576 if ((hw->mac.type == e1000_82542) &&
2577 (hw->revision_id == E1000_REVISION_2)) {
2578 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2579 msec_delay(1);
2580 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2581 e1000_pci_set_mwi(hw);
2582 e1000g_rx_setup(Adapter);
2583 }
2584
2585 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2586 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2587 return (EIO);
2588 }
2589
2590 return (0);
2591 }
2592
2593 static int
multicst_add(struct e1000g * Adapter,const uint8_t * multiaddr)2594 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2595 {
2596 struct e1000_hw *hw = &Adapter->shared;
2597 struct ether_addr *newtable;
2598 size_t new_len;
2599 size_t old_len;
2600 int res = 0;
2601
2602 if ((multiaddr[0] & 01) == 0) {
2603 res = EINVAL;
2604 e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2605 goto done;
2606 }
2607
2608 if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2609 res = ENOENT;
2610 e1000g_log(Adapter, CE_WARN,
2611 "Adapter requested more than %d mcast addresses",
2612 Adapter->mcast_max_num);
2613 goto done;
2614 }
2615
2616
2617 if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2618 old_len = Adapter->mcast_alloc_count *
2619 sizeof (struct ether_addr);
2620 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2621 sizeof (struct ether_addr);
2622
2623 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2624 if (newtable == NULL) {
2625 res = ENOMEM;
2626 e1000g_log(Adapter, CE_WARN,
2627 "Not enough memory to alloc mcast table");
2628 goto done;
2629 }
2630
2631 if (Adapter->mcast_table != NULL) {
2632 bcopy(Adapter->mcast_table, newtable, old_len);
2633 kmem_free(Adapter->mcast_table, old_len);
2634 }
2635 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2636 Adapter->mcast_table = newtable;
2637 }
2638
2639 bcopy(multiaddr,
2640 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2641 Adapter->mcast_count++;
2642
2643 /*
2644 * Update the MC table in the hardware
2645 */
2646 e1000g_clear_interrupt(Adapter);
2647
2648 e1000_update_mc_addr_list(hw,
2649 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2650
2651 e1000g_mask_interrupt(Adapter);
2652
2653 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2654 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2655 res = EIO;
2656 }
2657
2658 done:
2659 return (res);
2660 }
2661
2662 static int
multicst_remove(struct e1000g * Adapter,const uint8_t * multiaddr)2663 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2664 {
2665 struct e1000_hw *hw = &Adapter->shared;
2666 struct ether_addr *newtable;
2667 size_t new_len;
2668 size_t old_len;
2669 unsigned i;
2670
2671 for (i = 0; i < Adapter->mcast_count; i++) {
2672 if (bcmp(multiaddr, &Adapter->mcast_table[i],
2673 ETHERADDRL) == 0) {
2674 for (i++; i < Adapter->mcast_count; i++) {
2675 Adapter->mcast_table[i - 1] =
2676 Adapter->mcast_table[i];
2677 }
2678 Adapter->mcast_count--;
2679 break;
2680 }
2681 }
2682
2683 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2684 MCAST_ALLOC_SIZE) {
2685 old_len = Adapter->mcast_alloc_count *
2686 sizeof (struct ether_addr);
2687 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2688 sizeof (struct ether_addr);
2689
2690 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2691 if (newtable != NULL) {
2692 bcopy(Adapter->mcast_table, newtable, new_len);
2693 kmem_free(Adapter->mcast_table, old_len);
2694
2695 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2696 Adapter->mcast_table = newtable;
2697 }
2698 }
2699
2700 /*
2701 * Update the MC table in the hardware
2702 */
2703 e1000g_clear_interrupt(Adapter);
2704
2705 e1000_update_mc_addr_list(hw,
2706 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2707
2708 e1000g_mask_interrupt(Adapter);
2709
2710 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2711 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2712 return (EIO);
2713 }
2714
2715 return (0);
2716 }
2717
2718 static void
e1000g_release_multicast(struct e1000g * Adapter)2719 e1000g_release_multicast(struct e1000g *Adapter)
2720 {
2721 if (Adapter->mcast_table != NULL) {
2722 kmem_free(Adapter->mcast_table,
2723 Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2724 Adapter->mcast_table = NULL;
2725 }
2726 }
2727
2728 int
e1000g_m_multicst(void * arg,boolean_t add,const uint8_t * addr)2729 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2730 {
2731 struct e1000g *Adapter = (struct e1000g *)arg;
2732 int result;
2733
2734 rw_enter(&Adapter->chip_lock, RW_WRITER);
2735
2736 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2737 result = ECANCELED;
2738 goto done;
2739 }
2740
2741 result = (add) ? multicst_add(Adapter, addr)
2742 : multicst_remove(Adapter, addr);
2743
2744 done:
2745 rw_exit(&Adapter->chip_lock);
2746 return (result);
2747
2748 }
2749
2750 int
e1000g_m_promisc(void * arg,boolean_t on)2751 e1000g_m_promisc(void *arg, boolean_t on)
2752 {
2753 struct e1000g *Adapter = (struct e1000g *)arg;
2754 uint32_t rctl;
2755
2756 rw_enter(&Adapter->chip_lock, RW_WRITER);
2757
2758 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2759 rw_exit(&Adapter->chip_lock);
2760 return (ECANCELED);
2761 }
2762
2763 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2764
2765 if (on)
2766 rctl |=
2767 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2768 else
2769 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2770
2771 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2772
2773 Adapter->e1000g_promisc = on;
2774
2775 rw_exit(&Adapter->chip_lock);
2776
2777 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2778 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2779 return (EIO);
2780 }
2781
2782 return (0);
2783 }
2784
2785 /*
2786 * Entry points to enable and disable interrupts at the granularity of
2787 * a group.
2788 * Turns the poll_mode for the whole adapter on and off to enable or
2789 * override the ring level polling control over the hardware interrupts.
2790 */
2791 static int
e1000g_rx_group_intr_enable(mac_intr_handle_t arg)2792 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2793 {
2794 struct e1000g *adapter = (struct e1000g *)arg;
2795 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2796
2797 /*
2798 * Later interrupts at the granularity of the this ring will
2799 * invoke mac_rx() with NULL, indicating the need for another
2800 * software classification.
2801 * We have a single ring usable per adapter now, so we only need to
2802 * reset the rx handle for that one.
2803 * When more RX rings can be used, we should update each one of them.
2804 */
2805 mutex_enter(&rx_ring->rx_lock);
2806 rx_ring->mrh = NULL;
2807 adapter->poll_mode = B_FALSE;
2808 mutex_exit(&rx_ring->rx_lock);
2809 return (0);
2810 }
2811
2812 static int
e1000g_rx_group_intr_disable(mac_intr_handle_t arg)2813 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2814 {
2815 struct e1000g *adapter = (struct e1000g *)arg;
2816 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2817
2818 mutex_enter(&rx_ring->rx_lock);
2819
2820 /*
2821 * Later interrupts at the granularity of the this ring will
2822 * invoke mac_rx() with the handle for this ring;
2823 */
2824 adapter->poll_mode = B_TRUE;
2825 rx_ring->mrh = rx_ring->mrh_init;
2826 mutex_exit(&rx_ring->rx_lock);
2827 return (0);
2828 }
2829
2830 /*
2831 * Entry points to enable and disable interrupts at the granularity of
2832 * a ring.
2833 * adapter poll_mode controls whether we actually proceed with hardware
2834 * interrupt toggling.
2835 */
2836 static int
e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)2837 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2838 {
2839 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2840 struct e1000g *adapter = rx_ring->adapter;
2841 struct e1000_hw *hw = &adapter->shared;
2842 uint32_t intr_mask;
2843
2844 rw_enter(&adapter->chip_lock, RW_READER);
2845
2846 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2847 rw_exit(&adapter->chip_lock);
2848 return (0);
2849 }
2850
2851 mutex_enter(&rx_ring->rx_lock);
2852 rx_ring->poll_flag = 0;
2853 mutex_exit(&rx_ring->rx_lock);
2854
2855 /* Rx interrupt enabling for MSI and legacy */
2856 intr_mask = E1000_READ_REG(hw, E1000_IMS);
2857 intr_mask |= E1000_IMS_RXT0;
2858 E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2859 E1000_WRITE_FLUSH(hw);
2860
2861 /* Trigger a Rx interrupt to check Rx ring */
2862 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2863 E1000_WRITE_FLUSH(hw);
2864
2865 rw_exit(&adapter->chip_lock);
2866 return (0);
2867 }
2868
2869 static int
e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)2870 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2871 {
2872 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2873 struct e1000g *adapter = rx_ring->adapter;
2874 struct e1000_hw *hw = &adapter->shared;
2875
2876 rw_enter(&adapter->chip_lock, RW_READER);
2877
2878 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2879 rw_exit(&adapter->chip_lock);
2880 return (0);
2881 }
2882 mutex_enter(&rx_ring->rx_lock);
2883 rx_ring->poll_flag = 1;
2884 mutex_exit(&rx_ring->rx_lock);
2885
2886 /* Rx interrupt disabling for MSI and legacy */
2887 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2888 E1000_WRITE_FLUSH(hw);
2889
2890 rw_exit(&adapter->chip_lock);
2891 return (0);
2892 }
2893
2894 /*
2895 * e1000g_unicst_find - Find the slot for the specified unicast address
2896 */
2897 static int
e1000g_unicst_find(struct e1000g * Adapter,const uint8_t * mac_addr)2898 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2899 {
2900 int slot;
2901
2902 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2903 if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2904 (bcmp(Adapter->unicst_addr[slot].mac.addr,
2905 mac_addr, ETHERADDRL) == 0))
2906 return (slot);
2907 }
2908
2909 return (-1);
2910 }
2911
2912 /*
2913 * Entry points to add and remove a MAC address to a ring group.
2914 * The caller takes care of adding and removing the MAC addresses
2915 * to the filter via these two routines.
2916 */
2917
2918 static int
e1000g_addmac(void * arg,const uint8_t * mac_addr)2919 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2920 {
2921 struct e1000g *Adapter = (struct e1000g *)arg;
2922 int slot, err;
2923
2924 rw_enter(&Adapter->chip_lock, RW_WRITER);
2925
2926 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2927 rw_exit(&Adapter->chip_lock);
2928 return (ECANCELED);
2929 }
2930
2931 if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2932 /* The same address is already in slot */
2933 rw_exit(&Adapter->chip_lock);
2934 return (0);
2935 }
2936
2937 if (Adapter->unicst_avail == 0) {
2938 /* no slots available */
2939 rw_exit(&Adapter->chip_lock);
2940 return (ENOSPC);
2941 }
2942
2943 /* Search for a free slot */
2944 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2945 if (Adapter->unicst_addr[slot].mac.set == 0)
2946 break;
2947 }
2948 ASSERT(slot < Adapter->unicst_total);
2949
2950 err = e1000g_unicst_set(Adapter, mac_addr, slot);
2951 if (err == 0)
2952 Adapter->unicst_avail--;
2953
2954 rw_exit(&Adapter->chip_lock);
2955
2956 return (err);
2957 }
2958
2959 static int
e1000g_remmac(void * arg,const uint8_t * mac_addr)2960 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2961 {
2962 struct e1000g *Adapter = (struct e1000g *)arg;
2963 int slot, err;
2964
2965 rw_enter(&Adapter->chip_lock, RW_WRITER);
2966
2967 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2968 rw_exit(&Adapter->chip_lock);
2969 return (ECANCELED);
2970 }
2971
2972 slot = e1000g_unicst_find(Adapter, mac_addr);
2973 if (slot == -1) {
2974 rw_exit(&Adapter->chip_lock);
2975 return (EINVAL);
2976 }
2977
2978 ASSERT(Adapter->unicst_addr[slot].mac.set);
2979
2980 /* Clear this slot */
2981 err = e1000g_unicst_set(Adapter, NULL, slot);
2982 if (err == 0)
2983 Adapter->unicst_avail++;
2984
2985 rw_exit(&Adapter->chip_lock);
2986
2987 return (err);
2988 }
2989
2990 static int
e1000g_ring_start(mac_ring_driver_t rh,uint64_t mr_gen_num)2991 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2992 {
2993 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2994
2995 mutex_enter(&rx_ring->rx_lock);
2996 rx_ring->ring_gen_num = mr_gen_num;
2997 mutex_exit(&rx_ring->rx_lock);
2998 return (0);
2999 }
3000
3001 /*
3002 * Callback funtion for MAC layer to register all rings.
3003 *
3004 * The hardware supports a single group with currently only one ring
3005 * available.
3006 * Though not offering virtualization ability per se, exposing the
3007 * group/ring still enables the polling and interrupt toggling.
3008 */
3009 /* ARGSUSED */
3010 void
e1000g_fill_ring(void * arg,mac_ring_type_t rtype,const int grp_index,const int ring_index,mac_ring_info_t * infop,mac_ring_handle_t rh)3011 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
3012 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3013 {
3014 struct e1000g *Adapter = (struct e1000g *)arg;
3015 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
3016 mac_intr_t *mintr;
3017
3018 /*
3019 * We advertised only RX group/rings, so the MAC framework shouldn't
3020 * ask for any thing else.
3021 */
3022 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
3023
3024 rx_ring->mrh = rx_ring->mrh_init = rh;
3025 infop->mri_driver = (mac_ring_driver_t)rx_ring;
3026 infop->mri_start = e1000g_ring_start;
3027 infop->mri_stop = NULL;
3028 infop->mri_poll = e1000g_poll_ring;
3029 infop->mri_stat = e1000g_rx_ring_stat;
3030
3031 /* Ring level interrupts */
3032 mintr = &infop->mri_intr;
3033 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
3034 mintr->mi_enable = e1000g_rx_ring_intr_enable;
3035 mintr->mi_disable = e1000g_rx_ring_intr_disable;
3036 if (Adapter->msi_enable)
3037 mintr->mi_ddi_handle = Adapter->htable[0];
3038 }
3039
3040 /* ARGSUSED */
3041 static void
e1000g_fill_group(void * arg,mac_ring_type_t rtype,const int grp_index,mac_group_info_t * infop,mac_group_handle_t gh)3042 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3043 mac_group_info_t *infop, mac_group_handle_t gh)
3044 {
3045 struct e1000g *Adapter = (struct e1000g *)arg;
3046 mac_intr_t *mintr;
3047
3048 /*
3049 * We advertised a single RX ring. Getting a request for anything else
3050 * signifies a bug in the MAC framework.
3051 */
3052 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3053
3054 Adapter->rx_group = gh;
3055
3056 infop->mgi_driver = (mac_group_driver_t)Adapter;
3057 infop->mgi_start = NULL;
3058 infop->mgi_stop = NULL;
3059 infop->mgi_addmac = e1000g_addmac;
3060 infop->mgi_remmac = e1000g_remmac;
3061 infop->mgi_count = 1;
3062
3063 /* Group level interrupts */
3064 mintr = &infop->mgi_intr;
3065 mintr->mi_handle = (mac_intr_handle_t)Adapter;
3066 mintr->mi_enable = e1000g_rx_group_intr_enable;
3067 mintr->mi_disable = e1000g_rx_group_intr_disable;
3068 }
3069
3070 static boolean_t
e1000g_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)3071 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3072 {
3073 struct e1000g *Adapter = (struct e1000g *)arg;
3074
3075 switch (cap) {
3076 case MAC_CAPAB_HCKSUM: {
3077 uint32_t *txflags = cap_data;
3078
3079 if (Adapter->tx_hcksum_enable)
3080 *txflags = HCKSUM_IPHDRCKSUM |
3081 HCKSUM_INET_PARTIAL;
3082 else
3083 return (B_FALSE);
3084 break;
3085 }
3086
3087 case MAC_CAPAB_LSO: {
3088 mac_capab_lso_t *cap_lso = cap_data;
3089
3090 if (Adapter->lso_enable) {
3091 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3092 cap_lso->lso_basic_tcp_ipv4.lso_max =
3093 E1000_LSO_MAXLEN;
3094 } else
3095 return (B_FALSE);
3096 break;
3097 }
3098 case MAC_CAPAB_RINGS: {
3099 mac_capab_rings_t *cap_rings = cap_data;
3100
3101 /* No TX rings exposed yet */
3102 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3103 return (B_FALSE);
3104
3105 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3106 cap_rings->mr_rnum = 1;
3107 cap_rings->mr_gnum = 1;
3108 cap_rings->mr_rget = e1000g_fill_ring;
3109 cap_rings->mr_gget = e1000g_fill_group;
3110 break;
3111 }
3112 default:
3113 return (B_FALSE);
3114 }
3115 return (B_TRUE);
3116 }
3117
3118 static boolean_t
e1000g_param_locked(mac_prop_id_t pr_num)3119 e1000g_param_locked(mac_prop_id_t pr_num)
3120 {
3121 /*
3122 * All en_* parameters are locked (read-only) while
3123 * the device is in any sort of loopback mode ...
3124 */
3125 switch (pr_num) {
3126 case MAC_PROP_EN_1000FDX_CAP:
3127 case MAC_PROP_EN_1000HDX_CAP:
3128 case MAC_PROP_EN_100FDX_CAP:
3129 case MAC_PROP_EN_100HDX_CAP:
3130 case MAC_PROP_EN_10FDX_CAP:
3131 case MAC_PROP_EN_10HDX_CAP:
3132 case MAC_PROP_AUTONEG:
3133 case MAC_PROP_FLOWCTRL:
3134 return (B_TRUE);
3135 }
3136 return (B_FALSE);
3137 }
3138
3139 /*
3140 * callback function for set/get of properties
3141 */
3142 static int
e1000g_m_setprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3143 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3144 uint_t pr_valsize, const void *pr_val)
3145 {
3146 struct e1000g *Adapter = arg;
3147 struct e1000_hw *hw = &Adapter->shared;
3148 struct e1000_fc_info *fc = &Adapter->shared.fc;
3149 int err = 0;
3150 link_flowctrl_t flowctrl;
3151 uint32_t cur_mtu, new_mtu;
3152
3153 rw_enter(&Adapter->chip_lock, RW_WRITER);
3154
3155 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3156 rw_exit(&Adapter->chip_lock);
3157 return (ECANCELED);
3158 }
3159
3160 if (Adapter->loopback_mode != E1000G_LB_NONE &&
3161 e1000g_param_locked(pr_num)) {
3162 /*
3163 * All en_* parameters are locked (read-only)
3164 * while the device is in any sort of loopback mode.
3165 */
3166 rw_exit(&Adapter->chip_lock);
3167 return (EBUSY);
3168 }
3169
3170 switch (pr_num) {
3171 case MAC_PROP_EN_1000FDX_CAP:
3172 if (hw->phy.media_type != e1000_media_type_copper) {
3173 err = ENOTSUP;
3174 break;
3175 }
3176 Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3177 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3178 goto reset;
3179 case MAC_PROP_EN_100FDX_CAP:
3180 if (hw->phy.media_type != e1000_media_type_copper) {
3181 err = ENOTSUP;
3182 break;
3183 }
3184 Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3185 Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3186 goto reset;
3187 case MAC_PROP_EN_100HDX_CAP:
3188 if (hw->phy.media_type != e1000_media_type_copper) {
3189 err = ENOTSUP;
3190 break;
3191 }
3192 Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3193 Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3194 goto reset;
3195 case MAC_PROP_EN_10FDX_CAP:
3196 if (hw->phy.media_type != e1000_media_type_copper) {
3197 err = ENOTSUP;
3198 break;
3199 }
3200 Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3201 Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3202 goto reset;
3203 case MAC_PROP_EN_10HDX_CAP:
3204 if (hw->phy.media_type != e1000_media_type_copper) {
3205 err = ENOTSUP;
3206 break;
3207 }
3208 Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3209 Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3210 goto reset;
3211 case MAC_PROP_AUTONEG:
3212 if (hw->phy.media_type != e1000_media_type_copper) {
3213 err = ENOTSUP;
3214 break;
3215 }
3216 Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3217 goto reset;
3218 case MAC_PROP_FLOWCTRL:
3219 fc->send_xon = B_TRUE;
3220 bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3221
3222 switch (flowctrl) {
3223 default:
3224 err = EINVAL;
3225 break;
3226 case LINK_FLOWCTRL_NONE:
3227 fc->requested_mode = e1000_fc_none;
3228 break;
3229 case LINK_FLOWCTRL_RX:
3230 fc->requested_mode = e1000_fc_rx_pause;
3231 break;
3232 case LINK_FLOWCTRL_TX:
3233 fc->requested_mode = e1000_fc_tx_pause;
3234 break;
3235 case LINK_FLOWCTRL_BI:
3236 fc->requested_mode = e1000_fc_full;
3237 break;
3238 }
3239 reset:
3240 if (err == 0) {
3241 /* check PCH limits & reset the link */
3242 e1000g_pch_limits(Adapter);
3243 if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3244 err = EINVAL;
3245 }
3246 break;
3247 case MAC_PROP_ADV_1000FDX_CAP:
3248 case MAC_PROP_ADV_1000HDX_CAP:
3249 case MAC_PROP_ADV_100FDX_CAP:
3250 case MAC_PROP_ADV_100HDX_CAP:
3251 case MAC_PROP_ADV_10FDX_CAP:
3252 case MAC_PROP_ADV_10HDX_CAP:
3253 case MAC_PROP_EN_1000HDX_CAP:
3254 case MAC_PROP_STATUS:
3255 case MAC_PROP_SPEED:
3256 case MAC_PROP_DUPLEX:
3257 err = ENOTSUP; /* read-only prop. Can't set this. */
3258 break;
3259 case MAC_PROP_MTU:
3260 /* adapter must be stopped for an MTU change */
3261 if (Adapter->e1000g_state & E1000G_STARTED) {
3262 err = EBUSY;
3263 break;
3264 }
3265
3266 cur_mtu = Adapter->default_mtu;
3267
3268 /* get new requested MTU */
3269 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3270 if (new_mtu == cur_mtu) {
3271 err = 0;
3272 break;
3273 }
3274
3275 if ((new_mtu < DEFAULT_MTU) ||
3276 (new_mtu > Adapter->max_mtu)) {
3277 err = EINVAL;
3278 break;
3279 }
3280
3281 /* inform MAC framework of new MTU */
3282 err = mac_maxsdu_update(Adapter->mh, new_mtu);
3283
3284 if (err == 0) {
3285 Adapter->default_mtu = new_mtu;
3286 Adapter->max_frame_size =
3287 e1000g_mtu2maxframe(new_mtu);
3288
3289 /*
3290 * check PCH limits & set buffer sizes to
3291 * match new MTU
3292 */
3293 e1000g_pch_limits(Adapter);
3294 e1000g_set_bufsize(Adapter);
3295
3296 /*
3297 * decrease the number of descriptors and free
3298 * packets for jumbo frames to reduce tx/rx
3299 * resource consumption
3300 */
3301 if (Adapter->max_frame_size >=
3302 (FRAME_SIZE_UPTO_4K)) {
3303 if (Adapter->tx_desc_num_flag == 0)
3304 Adapter->tx_desc_num =
3305 DEFAULT_JUMBO_NUM_TX_DESC;
3306
3307 if (Adapter->rx_desc_num_flag == 0)
3308 Adapter->rx_desc_num =
3309 DEFAULT_JUMBO_NUM_RX_DESC;
3310
3311 if (Adapter->tx_buf_num_flag == 0)
3312 Adapter->tx_freelist_num =
3313 DEFAULT_JUMBO_NUM_TX_BUF;
3314
3315 if (Adapter->rx_buf_num_flag == 0)
3316 Adapter->rx_freelist_limit =
3317 DEFAULT_JUMBO_NUM_RX_BUF;
3318 } else {
3319 if (Adapter->tx_desc_num_flag == 0)
3320 Adapter->tx_desc_num =
3321 DEFAULT_NUM_TX_DESCRIPTOR;
3322
3323 if (Adapter->rx_desc_num_flag == 0)
3324 Adapter->rx_desc_num =
3325 DEFAULT_NUM_RX_DESCRIPTOR;
3326
3327 if (Adapter->tx_buf_num_flag == 0)
3328 Adapter->tx_freelist_num =
3329 DEFAULT_NUM_TX_FREELIST;
3330
3331 if (Adapter->rx_buf_num_flag == 0)
3332 Adapter->rx_freelist_limit =
3333 DEFAULT_NUM_RX_FREELIST;
3334 }
3335 }
3336 break;
3337 case MAC_PROP_PRIVATE:
3338 err = e1000g_set_priv_prop(Adapter, pr_name,
3339 pr_valsize, pr_val);
3340 break;
3341 default:
3342 err = ENOTSUP;
3343 break;
3344 }
3345 rw_exit(&Adapter->chip_lock);
3346 return (err);
3347 }
3348
3349 static int
e1000g_m_getprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3350 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3351 uint_t pr_valsize, void *pr_val)
3352 {
3353 struct e1000g *Adapter = arg;
3354 struct e1000_fc_info *fc = &Adapter->shared.fc;
3355 int err = 0;
3356 link_flowctrl_t flowctrl;
3357 uint64_t tmp = 0;
3358
3359 switch (pr_num) {
3360 case MAC_PROP_DUPLEX:
3361 ASSERT(pr_valsize >= sizeof (link_duplex_t));
3362 bcopy(&Adapter->link_duplex, pr_val,
3363 sizeof (link_duplex_t));
3364 break;
3365 case MAC_PROP_SPEED:
3366 ASSERT(pr_valsize >= sizeof (uint64_t));
3367 tmp = Adapter->link_speed * 1000000ull;
3368 bcopy(&tmp, pr_val, sizeof (tmp));
3369 break;
3370 case MAC_PROP_AUTONEG:
3371 *(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3372 break;
3373 case MAC_PROP_FLOWCTRL:
3374 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3375 switch (fc->current_mode) {
3376 case e1000_fc_none:
3377 flowctrl = LINK_FLOWCTRL_NONE;
3378 break;
3379 case e1000_fc_rx_pause:
3380 flowctrl = LINK_FLOWCTRL_RX;
3381 break;
3382 case e1000_fc_tx_pause:
3383 flowctrl = LINK_FLOWCTRL_TX;
3384 break;
3385 case e1000_fc_full:
3386 flowctrl = LINK_FLOWCTRL_BI;
3387 break;
3388 }
3389 bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3390 break;
3391 case MAC_PROP_ADV_1000FDX_CAP:
3392 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3393 break;
3394 case MAC_PROP_EN_1000FDX_CAP:
3395 *(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3396 break;
3397 case MAC_PROP_ADV_1000HDX_CAP:
3398 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3399 break;
3400 case MAC_PROP_EN_1000HDX_CAP:
3401 *(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3402 break;
3403 case MAC_PROP_ADV_100FDX_CAP:
3404 *(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3405 break;
3406 case MAC_PROP_EN_100FDX_CAP:
3407 *(uint8_t *)pr_val = Adapter->param_en_100fdx;
3408 break;
3409 case MAC_PROP_ADV_100HDX_CAP:
3410 *(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3411 break;
3412 case MAC_PROP_EN_100HDX_CAP:
3413 *(uint8_t *)pr_val = Adapter->param_en_100hdx;
3414 break;
3415 case MAC_PROP_ADV_10FDX_CAP:
3416 *(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3417 break;
3418 case MAC_PROP_EN_10FDX_CAP:
3419 *(uint8_t *)pr_val = Adapter->param_en_10fdx;
3420 break;
3421 case MAC_PROP_ADV_10HDX_CAP:
3422 *(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3423 break;
3424 case MAC_PROP_EN_10HDX_CAP:
3425 *(uint8_t *)pr_val = Adapter->param_en_10hdx;
3426 break;
3427 case MAC_PROP_ADV_100T4_CAP:
3428 case MAC_PROP_EN_100T4_CAP:
3429 *(uint8_t *)pr_val = Adapter->param_adv_100t4;
3430 break;
3431 case MAC_PROP_PRIVATE:
3432 err = e1000g_get_priv_prop(Adapter, pr_name,
3433 pr_valsize, pr_val);
3434 break;
3435 default:
3436 err = ENOTSUP;
3437 break;
3438 }
3439
3440 return (err);
3441 }
3442
3443 static void
e1000g_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3444 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3445 mac_prop_info_handle_t prh)
3446 {
3447 struct e1000g *Adapter = arg;
3448 struct e1000_hw *hw = &Adapter->shared;
3449
3450 switch (pr_num) {
3451 case MAC_PROP_DUPLEX:
3452 case MAC_PROP_SPEED:
3453 case MAC_PROP_ADV_1000FDX_CAP:
3454 case MAC_PROP_ADV_1000HDX_CAP:
3455 case MAC_PROP_ADV_100FDX_CAP:
3456 case MAC_PROP_ADV_100HDX_CAP:
3457 case MAC_PROP_ADV_10FDX_CAP:
3458 case MAC_PROP_ADV_10HDX_CAP:
3459 case MAC_PROP_ADV_100T4_CAP:
3460 case MAC_PROP_EN_100T4_CAP:
3461 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3462 break;
3463
3464 case MAC_PROP_EN_1000FDX_CAP:
3465 if (hw->phy.media_type != e1000_media_type_copper) {
3466 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3467 } else {
3468 mac_prop_info_set_default_uint8(prh,
3469 ((Adapter->phy_ext_status &
3470 IEEE_ESR_1000T_FD_CAPS) ||
3471 (Adapter->phy_ext_status &
3472 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3473 }
3474 break;
3475
3476 case MAC_PROP_EN_100FDX_CAP:
3477 if (hw->phy.media_type != e1000_media_type_copper) {
3478 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3479 } else {
3480 mac_prop_info_set_default_uint8(prh,
3481 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3482 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3483 ? 1 : 0);
3484 }
3485 break;
3486
3487 case MAC_PROP_EN_100HDX_CAP:
3488 if (hw->phy.media_type != e1000_media_type_copper) {
3489 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3490 } else {
3491 mac_prop_info_set_default_uint8(prh,
3492 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3493 (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3494 ? 1 : 0);
3495 }
3496 break;
3497
3498 case MAC_PROP_EN_10FDX_CAP:
3499 if (hw->phy.media_type != e1000_media_type_copper) {
3500 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3501 } else {
3502 mac_prop_info_set_default_uint8(prh,
3503 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3504 }
3505 break;
3506
3507 case MAC_PROP_EN_10HDX_CAP:
3508 if (hw->phy.media_type != e1000_media_type_copper) {
3509 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3510 } else {
3511 mac_prop_info_set_default_uint8(prh,
3512 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3513 }
3514 break;
3515
3516 case MAC_PROP_EN_1000HDX_CAP:
3517 if (hw->phy.media_type != e1000_media_type_copper)
3518 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3519 break;
3520
3521 case MAC_PROP_AUTONEG:
3522 if (hw->phy.media_type != e1000_media_type_copper) {
3523 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3524 } else {
3525 mac_prop_info_set_default_uint8(prh,
3526 (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3527 ? 1 : 0);
3528 }
3529 break;
3530
3531 case MAC_PROP_FLOWCTRL:
3532 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3533 break;
3534
3535 case MAC_PROP_MTU: {
3536 struct e1000_mac_info *mac = &Adapter->shared.mac;
3537 struct e1000_phy_info *phy = &Adapter->shared.phy;
3538 uint32_t max;
3539
3540 /* some MAC types do not support jumbo frames */
3541 if ((mac->type == e1000_ich8lan) ||
3542 ((mac->type == e1000_ich9lan) && (phy->type ==
3543 e1000_phy_ife))) {
3544 max = DEFAULT_MTU;
3545 } else {
3546 max = Adapter->max_mtu;
3547 }
3548
3549 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3550 break;
3551 }
3552 case MAC_PROP_PRIVATE: {
3553 char valstr[64];
3554 int value;
3555
3556 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3557 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3558 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3559 return;
3560 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3561 value = DEFAULT_TX_BCOPY_THRESHOLD;
3562 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3563 value = DEFAULT_TX_INTR_ENABLE;
3564 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3565 value = DEFAULT_TX_INTR_DELAY;
3566 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3567 value = DEFAULT_TX_INTR_ABS_DELAY;
3568 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3569 value = DEFAULT_RX_BCOPY_THRESHOLD;
3570 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3571 value = DEFAULT_RX_LIMIT_ON_INTR;
3572 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3573 value = DEFAULT_RX_INTR_DELAY;
3574 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3575 value = DEFAULT_RX_INTR_ABS_DELAY;
3576 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3577 value = DEFAULT_INTR_THROTTLING;
3578 } else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3579 value = 1;
3580 } else {
3581 return;
3582 }
3583
3584 (void) snprintf(valstr, sizeof (valstr), "%d", value);
3585 mac_prop_info_set_default_str(prh, valstr);
3586 break;
3587 }
3588 }
3589 }
3590
3591 /* ARGSUSED2 */
3592 static int
e1000g_set_priv_prop(struct e1000g * Adapter,const char * pr_name,uint_t pr_valsize,const void * pr_val)3593 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3594 uint_t pr_valsize, const void *pr_val)
3595 {
3596 int err = 0;
3597 long result;
3598 struct e1000_hw *hw = &Adapter->shared;
3599
3600 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3601 if (pr_val == NULL) {
3602 err = EINVAL;
3603 return (err);
3604 }
3605 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3606 if (result < MIN_TX_BCOPY_THRESHOLD ||
3607 result > MAX_TX_BCOPY_THRESHOLD)
3608 err = EINVAL;
3609 else {
3610 Adapter->tx_bcopy_thresh = (uint32_t)result;
3611 }
3612 return (err);
3613 }
3614 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3615 if (pr_val == NULL) {
3616 err = EINVAL;
3617 return (err);
3618 }
3619 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3620 if (result < 0 || result > 1)
3621 err = EINVAL;
3622 else {
3623 Adapter->tx_intr_enable = (result == 1) ?
3624 B_TRUE: B_FALSE;
3625 if (Adapter->tx_intr_enable)
3626 e1000g_mask_tx_interrupt(Adapter);
3627 else
3628 e1000g_clear_tx_interrupt(Adapter);
3629 if (e1000g_check_acc_handle(
3630 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3631 ddi_fm_service_impact(Adapter->dip,
3632 DDI_SERVICE_DEGRADED);
3633 err = EIO;
3634 }
3635 }
3636 return (err);
3637 }
3638 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3639 if (pr_val == NULL) {
3640 err = EINVAL;
3641 return (err);
3642 }
3643 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3644 if (result < MIN_TX_INTR_DELAY ||
3645 result > MAX_TX_INTR_DELAY)
3646 err = EINVAL;
3647 else {
3648 Adapter->tx_intr_delay = (uint32_t)result;
3649 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3650 if (e1000g_check_acc_handle(
3651 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3652 ddi_fm_service_impact(Adapter->dip,
3653 DDI_SERVICE_DEGRADED);
3654 err = EIO;
3655 }
3656 }
3657 return (err);
3658 }
3659 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3660 if (pr_val == NULL) {
3661 err = EINVAL;
3662 return (err);
3663 }
3664 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3665 if (result < MIN_TX_INTR_ABS_DELAY ||
3666 result > MAX_TX_INTR_ABS_DELAY)
3667 err = EINVAL;
3668 else {
3669 Adapter->tx_intr_abs_delay = (uint32_t)result;
3670 E1000_WRITE_REG(hw, E1000_TADV,
3671 Adapter->tx_intr_abs_delay);
3672 if (e1000g_check_acc_handle(
3673 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3674 ddi_fm_service_impact(Adapter->dip,
3675 DDI_SERVICE_DEGRADED);
3676 err = EIO;
3677 }
3678 }
3679 return (err);
3680 }
3681 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3682 if (pr_val == NULL) {
3683 err = EINVAL;
3684 return (err);
3685 }
3686 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3687 if (result < MIN_RX_BCOPY_THRESHOLD ||
3688 result > MAX_RX_BCOPY_THRESHOLD)
3689 err = EINVAL;
3690 else
3691 Adapter->rx_bcopy_thresh = (uint32_t)result;
3692 return (err);
3693 }
3694 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3695 if (pr_val == NULL) {
3696 err = EINVAL;
3697 return (err);
3698 }
3699 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3700 if (result < MIN_RX_LIMIT_ON_INTR ||
3701 result > MAX_RX_LIMIT_ON_INTR)
3702 err = EINVAL;
3703 else
3704 Adapter->rx_limit_onintr = (uint32_t)result;
3705 return (err);
3706 }
3707 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3708 if (pr_val == NULL) {
3709 err = EINVAL;
3710 return (err);
3711 }
3712 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3713 if (result < MIN_RX_INTR_DELAY ||
3714 result > MAX_RX_INTR_DELAY)
3715 err = EINVAL;
3716 else {
3717 Adapter->rx_intr_delay = (uint32_t)result;
3718 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3719 if (e1000g_check_acc_handle(
3720 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3721 ddi_fm_service_impact(Adapter->dip,
3722 DDI_SERVICE_DEGRADED);
3723 err = EIO;
3724 }
3725 }
3726 return (err);
3727 }
3728 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3729 if (pr_val == NULL) {
3730 err = EINVAL;
3731 return (err);
3732 }
3733 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3734 if (result < MIN_RX_INTR_ABS_DELAY ||
3735 result > MAX_RX_INTR_ABS_DELAY)
3736 err = EINVAL;
3737 else {
3738 Adapter->rx_intr_abs_delay = (uint32_t)result;
3739 E1000_WRITE_REG(hw, E1000_RADV,
3740 Adapter->rx_intr_abs_delay);
3741 if (e1000g_check_acc_handle(
3742 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3743 ddi_fm_service_impact(Adapter->dip,
3744 DDI_SERVICE_DEGRADED);
3745 err = EIO;
3746 }
3747 }
3748 return (err);
3749 }
3750 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3751 if (pr_val == NULL) {
3752 err = EINVAL;
3753 return (err);
3754 }
3755 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3756 if (result < MIN_INTR_THROTTLING ||
3757 result > MAX_INTR_THROTTLING)
3758 err = EINVAL;
3759 else {
3760 if (hw->mac.type >= e1000_82540) {
3761 Adapter->intr_throttling_rate =
3762 (uint32_t)result;
3763 E1000_WRITE_REG(hw, E1000_ITR,
3764 Adapter->intr_throttling_rate);
3765 if (e1000g_check_acc_handle(
3766 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3767 ddi_fm_service_impact(Adapter->dip,
3768 DDI_SERVICE_DEGRADED);
3769 err = EIO;
3770 }
3771 } else
3772 err = EINVAL;
3773 }
3774 return (err);
3775 }
3776 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3777 if (pr_val == NULL) {
3778 err = EINVAL;
3779 return (err);
3780 }
3781 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3782 if (result < 0 || result > 1)
3783 err = EINVAL;
3784 else {
3785 if (hw->mac.type >= e1000_82540) {
3786 Adapter->intr_adaptive = (result == 1) ?
3787 B_TRUE : B_FALSE;
3788 } else {
3789 err = EINVAL;
3790 }
3791 }
3792 return (err);
3793 }
3794 return (ENOTSUP);
3795 }
3796
3797 static int
e1000g_get_priv_prop(struct e1000g * Adapter,const char * pr_name,uint_t pr_valsize,void * pr_val)3798 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3799 uint_t pr_valsize, void *pr_val)
3800 {
3801 int err = ENOTSUP;
3802 int value;
3803
3804 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3805 value = Adapter->param_adv_pause;
3806 err = 0;
3807 goto done;
3808 }
3809 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3810 value = Adapter->param_adv_asym_pause;
3811 err = 0;
3812 goto done;
3813 }
3814 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3815 value = Adapter->tx_bcopy_thresh;
3816 err = 0;
3817 goto done;
3818 }
3819 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3820 value = Adapter->tx_intr_enable;
3821 err = 0;
3822 goto done;
3823 }
3824 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3825 value = Adapter->tx_intr_delay;
3826 err = 0;
3827 goto done;
3828 }
3829 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3830 value = Adapter->tx_intr_abs_delay;
3831 err = 0;
3832 goto done;
3833 }
3834 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3835 value = Adapter->rx_bcopy_thresh;
3836 err = 0;
3837 goto done;
3838 }
3839 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3840 value = Adapter->rx_limit_onintr;
3841 err = 0;
3842 goto done;
3843 }
3844 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3845 value = Adapter->rx_intr_delay;
3846 err = 0;
3847 goto done;
3848 }
3849 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3850 value = Adapter->rx_intr_abs_delay;
3851 err = 0;
3852 goto done;
3853 }
3854 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3855 value = Adapter->intr_throttling_rate;
3856 err = 0;
3857 goto done;
3858 }
3859 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3860 value = Adapter->intr_adaptive;
3861 err = 0;
3862 goto done;
3863 }
3864 done:
3865 if (err == 0) {
3866 (void) snprintf(pr_val, pr_valsize, "%d", value);
3867 }
3868 return (err);
3869 }
3870
3871 /*
3872 * e1000g_get_conf - get configurations set in e1000g.conf
3873 * This routine gets user-configured values out of the configuration
3874 * file e1000g.conf.
3875 *
3876 * For each configurable value, there is a minimum, a maximum, and a
3877 * default.
3878 * If user does not configure a value, use the default.
3879 * If user configures below the minimum, use the minumum.
3880 * If user configures above the maximum, use the maxumum.
3881 */
3882 static void
e1000g_get_conf(struct e1000g * Adapter)3883 e1000g_get_conf(struct e1000g *Adapter)
3884 {
3885 struct e1000_hw *hw = &Adapter->shared;
3886 boolean_t tbi_compatibility = B_FALSE;
3887 boolean_t is_jumbo = B_FALSE;
3888 int propval;
3889 /*
3890 * decrease the number of descriptors and free packets
3891 * for jumbo frames to reduce tx/rx resource consumption
3892 */
3893 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
3894 is_jumbo = B_TRUE;
3895 }
3896
3897 /*
3898 * get each configurable property from e1000g.conf
3899 */
3900
3901 /*
3902 * NumTxDescriptors
3903 */
3904 Adapter->tx_desc_num_flag =
3905 e1000g_get_prop(Adapter, "NumTxDescriptors",
3906 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3907 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
3908 : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
3909 Adapter->tx_desc_num = propval;
3910
3911 /*
3912 * NumRxDescriptors
3913 */
3914 Adapter->rx_desc_num_flag =
3915 e1000g_get_prop(Adapter, "NumRxDescriptors",
3916 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3917 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
3918 : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
3919 Adapter->rx_desc_num = propval;
3920
3921 /*
3922 * NumRxFreeList
3923 */
3924 Adapter->rx_buf_num_flag =
3925 e1000g_get_prop(Adapter, "NumRxFreeList",
3926 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3927 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
3928 : DEFAULT_NUM_RX_FREELIST, &propval);
3929 Adapter->rx_freelist_limit = propval;
3930
3931 /*
3932 * NumTxPacketList
3933 */
3934 Adapter->tx_buf_num_flag =
3935 e1000g_get_prop(Adapter, "NumTxPacketList",
3936 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3937 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
3938 : DEFAULT_NUM_TX_FREELIST, &propval);
3939 Adapter->tx_freelist_num = propval;
3940
3941 /*
3942 * FlowControl
3943 */
3944 hw->fc.send_xon = B_TRUE;
3945 (void) e1000g_get_prop(Adapter, "FlowControl",
3946 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
3947 hw->fc.requested_mode = propval;
3948 /* 4 is the setting that says "let the eeprom decide" */
3949 if (hw->fc.requested_mode == 4)
3950 hw->fc.requested_mode = e1000_fc_default;
3951
3952 /*
3953 * Max Num Receive Packets on Interrupt
3954 */
3955 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3956 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3957 DEFAULT_RX_LIMIT_ON_INTR, &propval);
3958 Adapter->rx_limit_onintr = propval;
3959
3960 /*
3961 * PHY master slave setting
3962 */
3963 (void) e1000g_get_prop(Adapter, "SetMasterSlave",
3964 e1000_ms_hw_default, e1000_ms_auto,
3965 e1000_ms_hw_default, &propval);
3966 hw->phy.ms_type = propval;
3967
3968 /*
3969 * Parameter which controls TBI mode workaround, which is only
3970 * needed on certain switches such as Cisco 6500/Foundry
3971 */
3972 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3973 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
3974 tbi_compatibility = (propval == 1);
3975 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3976
3977 /*
3978 * MSI Enable
3979 */
3980 (void) e1000g_get_prop(Adapter, "MSIEnable",
3981 0, 1, DEFAULT_MSI_ENABLE, &propval);
3982 Adapter->msi_enable = (propval == 1);
3983
3984 /*
3985 * Interrupt Throttling Rate
3986 */
3987 (void) e1000g_get_prop(Adapter, "intr_throttling_rate",
3988 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3989 DEFAULT_INTR_THROTTLING, &propval);
3990 Adapter->intr_throttling_rate = propval;
3991
3992 /*
3993 * Adaptive Interrupt Blanking Enable/Disable
3994 * It is enabled by default
3995 */
3996 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
3997 &propval);
3998 Adapter->intr_adaptive = (propval == 1);
3999
4000 /*
4001 * Hardware checksum enable/disable parameter
4002 */
4003 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
4004 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
4005 Adapter->tx_hcksum_enable = (propval == 1);
4006 /*
4007 * Checksum on/off selection via global parameters.
4008 *
4009 * If the chip is flagged as not capable of (correctly)
4010 * handling checksumming, we don't enable it on either
4011 * Rx or Tx side. Otherwise, we take this chip's settings
4012 * from the patchable global defaults.
4013 *
4014 * We advertise our capabilities only if TX offload is
4015 * enabled. On receive, the stack will accept checksummed
4016 * packets anyway, even if we haven't said we can deliver
4017 * them.
4018 */
4019 switch (hw->mac.type) {
4020 case e1000_82540:
4021 case e1000_82544:
4022 case e1000_82545:
4023 case e1000_82545_rev_3:
4024 case e1000_82546:
4025 case e1000_82546_rev_3:
4026 case e1000_82571:
4027 case e1000_82572:
4028 case e1000_82573:
4029 case e1000_80003es2lan:
4030 break;
4031 /*
4032 * For the following Intel PRO/1000 chipsets, we have not
4033 * tested the hardware checksum offload capability, so we
4034 * disable the capability for them.
4035 * e1000_82542,
4036 * e1000_82543,
4037 * e1000_82541,
4038 * e1000_82541_rev_2,
4039 * e1000_82547,
4040 * e1000_82547_rev_2,
4041 */
4042 default:
4043 Adapter->tx_hcksum_enable = B_FALSE;
4044 }
4045
4046 /*
4047 * Large Send Offloading(LSO) Enable/Disable
4048 * If the tx hardware checksum is not enabled, LSO should be
4049 * disabled.
4050 */
4051 (void) e1000g_get_prop(Adapter, "lso_enable",
4052 0, 1, DEFAULT_LSO_ENABLE, &propval);
4053 Adapter->lso_enable = (propval == 1);
4054
4055 switch (hw->mac.type) {
4056 case e1000_82546:
4057 case e1000_82546_rev_3:
4058 if (Adapter->lso_enable)
4059 Adapter->lso_premature_issue = B_TRUE;
4060 /* FALLTHRU */
4061 case e1000_82571:
4062 case e1000_82572:
4063 case e1000_82573:
4064 case e1000_80003es2lan:
4065 break;
4066 default:
4067 Adapter->lso_enable = B_FALSE;
4068 }
4069
4070 if (!Adapter->tx_hcksum_enable) {
4071 Adapter->lso_premature_issue = B_FALSE;
4072 Adapter->lso_enable = B_FALSE;
4073 }
4074
4075 /*
4076 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4077 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4078 * will not cross 64k boundary.
4079 */
4080 (void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4081 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4082 Adapter->mem_workaround_82546 = (propval == 1);
4083
4084 /*
4085 * Max number of multicast addresses
4086 */
4087 (void) e1000g_get_prop(Adapter, "mcast_max_num",
4088 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4089 &propval);
4090 Adapter->mcast_max_num = propval;
4091 }
4092
4093 /*
4094 * e1000g_get_prop - routine to read properties
4095 *
4096 * Get a user-configure property value out of the configuration
4097 * file e1000g.conf.
4098 *
4099 * Caller provides name of the property, a default value, a minimum
4100 * value, a maximum value and a pointer to the returned property
4101 * value.
4102 *
4103 * Return B_TRUE if the configured value of the property is not a default
4104 * value, otherwise return B_FALSE.
4105 */
4106 static boolean_t
e1000g_get_prop(struct e1000g * Adapter,char * propname,int minval,int maxval,int defval,int * propvalue)4107 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */
4108 char *propname, /* name of the property */
4109 int minval, /* minimum acceptable value */
4110 int maxval, /* maximim acceptable value */
4111 int defval, /* default value */
4112 int *propvalue) /* property value return to caller */
4113 {
4114 int propval; /* value returned for requested property */
4115 int *props; /* point to array of properties returned */
4116 uint_t nprops; /* number of property value returned */
4117 boolean_t ret = B_TRUE;
4118
4119 /*
4120 * get the array of properties from the config file
4121 */
4122 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4123 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4124 /* got some properties, test if we got enough */
4125 if (Adapter->instance < nprops) {
4126 propval = props[Adapter->instance];
4127 } else {
4128 /* not enough properties configured */
4129 propval = defval;
4130 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4131 "Not Enough %s values found in e1000g.conf"
4132 " - set to %d\n",
4133 propname, propval);
4134 ret = B_FALSE;
4135 }
4136
4137 /* free memory allocated for properties */
4138 ddi_prop_free(props);
4139
4140 } else {
4141 propval = defval;
4142 ret = B_FALSE;
4143 }
4144
4145 /*
4146 * enforce limits
4147 */
4148 if (propval > maxval) {
4149 propval = maxval;
4150 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4151 "Too High %s value in e1000g.conf - set to %d\n",
4152 propname, propval);
4153 }
4154
4155 if (propval < minval) {
4156 propval = minval;
4157 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4158 "Too Low %s value in e1000g.conf - set to %d\n",
4159 propname, propval);
4160 }
4161
4162 *propvalue = propval;
4163 return (ret);
4164 }
4165
4166 static boolean_t
e1000g_link_check(struct e1000g * Adapter)4167 e1000g_link_check(struct e1000g *Adapter)
4168 {
4169 uint16_t speed, duplex, phydata;
4170 boolean_t link_changed = B_FALSE;
4171 struct e1000_hw *hw;
4172 uint32_t reg_tarc;
4173
4174 hw = &Adapter->shared;
4175
4176 if (e1000g_link_up(Adapter)) {
4177 /*
4178 * The Link is up, check whether it was marked as down earlier
4179 */
4180 if (Adapter->link_state != LINK_STATE_UP) {
4181 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4182 Adapter->link_speed = speed;
4183 Adapter->link_duplex = duplex;
4184 Adapter->link_state = LINK_STATE_UP;
4185 link_changed = B_TRUE;
4186
4187 if (Adapter->link_speed == SPEED_1000)
4188 Adapter->stall_threshold = TX_STALL_TIME_2S;
4189 else
4190 Adapter->stall_threshold = TX_STALL_TIME_8S;
4191
4192 Adapter->tx_link_down_timeout = 0;
4193
4194 if ((hw->mac.type == e1000_82571) ||
4195 (hw->mac.type == e1000_82572)) {
4196 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4197 if (speed == SPEED_1000)
4198 reg_tarc |= (1 << 21);
4199 else
4200 reg_tarc &= ~(1 << 21);
4201 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4202 }
4203 }
4204 Adapter->smartspeed = 0;
4205 } else {
4206 if (Adapter->link_state != LINK_STATE_DOWN) {
4207 Adapter->link_speed = 0;
4208 Adapter->link_duplex = 0;
4209 Adapter->link_state = LINK_STATE_DOWN;
4210 link_changed = B_TRUE;
4211
4212 /*
4213 * SmartSpeed workaround for Tabor/TanaX, When the
4214 * driver loses link disable auto master/slave
4215 * resolution.
4216 */
4217 if (hw->phy.type == e1000_phy_igp) {
4218 (void) e1000_read_phy_reg(hw,
4219 PHY_1000T_CTRL, &phydata);
4220 phydata |= CR_1000T_MS_ENABLE;
4221 (void) e1000_write_phy_reg(hw,
4222 PHY_1000T_CTRL, phydata);
4223 }
4224 } else {
4225 e1000g_smartspeed(Adapter);
4226 }
4227
4228 if (Adapter->e1000g_state & E1000G_STARTED) {
4229 if (Adapter->tx_link_down_timeout <
4230 MAX_TX_LINK_DOWN_TIMEOUT) {
4231 Adapter->tx_link_down_timeout++;
4232 } else if (Adapter->tx_link_down_timeout ==
4233 MAX_TX_LINK_DOWN_TIMEOUT) {
4234 e1000g_tx_clean(Adapter);
4235 Adapter->tx_link_down_timeout++;
4236 }
4237 }
4238 }
4239
4240 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4241 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4242
4243 return (link_changed);
4244 }
4245
4246 /*
4247 * e1000g_reset_link - Using the link properties to setup the link
4248 */
4249 int
e1000g_reset_link(struct e1000g * Adapter)4250 e1000g_reset_link(struct e1000g *Adapter)
4251 {
4252 struct e1000_mac_info *mac;
4253 struct e1000_phy_info *phy;
4254 struct e1000_hw *hw;
4255 boolean_t invalid;
4256
4257 mac = &Adapter->shared.mac;
4258 phy = &Adapter->shared.phy;
4259 hw = &Adapter->shared;
4260 invalid = B_FALSE;
4261
4262 if (hw->phy.media_type != e1000_media_type_copper)
4263 goto out;
4264
4265 if (Adapter->param_adv_autoneg == 1) {
4266 mac->autoneg = B_TRUE;
4267 phy->autoneg_advertised = 0;
4268
4269 /*
4270 * 1000hdx is not supported for autonegotiation
4271 */
4272 if (Adapter->param_adv_1000fdx == 1)
4273 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4274
4275 if (Adapter->param_adv_100fdx == 1)
4276 phy->autoneg_advertised |= ADVERTISE_100_FULL;
4277
4278 if (Adapter->param_adv_100hdx == 1)
4279 phy->autoneg_advertised |= ADVERTISE_100_HALF;
4280
4281 if (Adapter->param_adv_10fdx == 1)
4282 phy->autoneg_advertised |= ADVERTISE_10_FULL;
4283
4284 if (Adapter->param_adv_10hdx == 1)
4285 phy->autoneg_advertised |= ADVERTISE_10_HALF;
4286
4287 if (phy->autoneg_advertised == 0)
4288 invalid = B_TRUE;
4289 } else {
4290 mac->autoneg = B_FALSE;
4291
4292 /*
4293 * For Intel copper cards, 1000fdx and 1000hdx are not
4294 * supported for forced link
4295 */
4296 if (Adapter->param_adv_100fdx == 1)
4297 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4298 else if (Adapter->param_adv_100hdx == 1)
4299 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4300 else if (Adapter->param_adv_10fdx == 1)
4301 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4302 else if (Adapter->param_adv_10hdx == 1)
4303 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4304 else
4305 invalid = B_TRUE;
4306
4307 }
4308
4309 if (invalid) {
4310 e1000g_log(Adapter, CE_WARN,
4311 "Invalid link settings. Setup link to "
4312 "support autonegotiation with all link capabilities.");
4313 mac->autoneg = B_TRUE;
4314 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4315 }
4316
4317 out:
4318 return (e1000_setup_link(&Adapter->shared));
4319 }
4320
4321 static void
e1000g_timer_tx_resched(struct e1000g * Adapter)4322 e1000g_timer_tx_resched(struct e1000g *Adapter)
4323 {
4324 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4325
4326 rw_enter(&Adapter->chip_lock, RW_READER);
4327
4328 if (tx_ring->resched_needed &&
4329 ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4330 drv_usectohz(1000000)) &&
4331 (Adapter->e1000g_state & E1000G_STARTED) &&
4332 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4333 tx_ring->resched_needed = B_FALSE;
4334 mac_tx_update(Adapter->mh);
4335 E1000G_STAT(tx_ring->stat_reschedule);
4336 E1000G_STAT(tx_ring->stat_timer_reschedule);
4337 }
4338
4339 rw_exit(&Adapter->chip_lock);
4340 }
4341
4342 static void
e1000g_local_timer(void * ws)4343 e1000g_local_timer(void *ws)
4344 {
4345 struct e1000g *Adapter = (struct e1000g *)ws;
4346 struct e1000_hw *hw;
4347 e1000g_ether_addr_t ether_addr;
4348 boolean_t link_changed;
4349
4350 hw = &Adapter->shared;
4351
4352 if (Adapter->e1000g_state & E1000G_ERROR) {
4353 rw_enter(&Adapter->chip_lock, RW_WRITER);
4354 Adapter->e1000g_state &= ~E1000G_ERROR;
4355 rw_exit(&Adapter->chip_lock);
4356
4357 Adapter->reset_count++;
4358 if (e1000g_global_reset(Adapter)) {
4359 ddi_fm_service_impact(Adapter->dip,
4360 DDI_SERVICE_RESTORED);
4361 e1000g_timer_tx_resched(Adapter);
4362 } else
4363 ddi_fm_service_impact(Adapter->dip,
4364 DDI_SERVICE_LOST);
4365 return;
4366 }
4367
4368 if (e1000g_stall_check(Adapter)) {
4369 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4370 "Tx stall detected. Activate automatic recovery.\n");
4371 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4372 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4373 Adapter->reset_count++;
4374 if (e1000g_reset_adapter(Adapter)) {
4375 ddi_fm_service_impact(Adapter->dip,
4376 DDI_SERVICE_RESTORED);
4377 e1000g_timer_tx_resched(Adapter);
4378 }
4379 return;
4380 }
4381
4382 link_changed = B_FALSE;
4383 rw_enter(&Adapter->chip_lock, RW_READER);
4384 if (Adapter->link_complete)
4385 link_changed = e1000g_link_check(Adapter);
4386 rw_exit(&Adapter->chip_lock);
4387
4388 if (link_changed) {
4389 if (!Adapter->reset_flag &&
4390 (Adapter->e1000g_state & E1000G_STARTED) &&
4391 !(Adapter->e1000g_state & E1000G_SUSPENDED))
4392 mac_link_update(Adapter->mh, Adapter->link_state);
4393 if (Adapter->link_state == LINK_STATE_UP)
4394 Adapter->reset_flag = B_FALSE;
4395 }
4396 /*
4397 * Workaround for esb2. Data stuck in fifo on a link
4398 * down event. Reset the adapter to recover it.
4399 */
4400 if (Adapter->esb2_workaround) {
4401 Adapter->esb2_workaround = B_FALSE;
4402 (void) e1000g_reset_adapter(Adapter);
4403 return;
4404 }
4405
4406 /*
4407 * With 82571 controllers, any locally administered address will
4408 * be overwritten when there is a reset on the other port.
4409 * Detect this circumstance and correct it.
4410 */
4411 if ((hw->mac.type == e1000_82571) &&
4412 (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4413 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4414 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4415
4416 ether_addr.reg.low = ntohl(ether_addr.reg.low);
4417 ether_addr.reg.high = ntohl(ether_addr.reg.high);
4418
4419 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4420 (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4421 (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4422 (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4423 (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4424 (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4425 (void) e1000_rar_set(hw, hw->mac.addr, 0);
4426 }
4427 }
4428
4429 /*
4430 * Long TTL workaround for 82541/82547
4431 */
4432 (void) e1000_igp_ttl_workaround_82547(hw);
4433
4434 /*
4435 * Check for Adaptive IFS settings If there are lots of collisions
4436 * change the value in steps...
4437 * These properties should only be set for 10/100
4438 */
4439 if ((hw->phy.media_type == e1000_media_type_copper) &&
4440 ((Adapter->link_speed == SPEED_100) ||
4441 (Adapter->link_speed == SPEED_10))) {
4442 e1000_update_adaptive(hw);
4443 }
4444 /*
4445 * Set Timer Interrupts
4446 */
4447 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4448
4449 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4450 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4451 else
4452 e1000g_timer_tx_resched(Adapter);
4453
4454 restart_watchdog_timer(Adapter);
4455 }
4456
4457 /*
4458 * The function e1000g_link_timer() is called when the timer for link setup
4459 * is expired, which indicates the completion of the link setup. The link
4460 * state will not be updated until the link setup is completed. And the
4461 * link state will not be sent to the upper layer through mac_link_update()
4462 * in this function. It will be updated in the local timer routine or the
4463 * interrupt service routine after the interface is started (plumbed).
4464 */
4465 static void
e1000g_link_timer(void * arg)4466 e1000g_link_timer(void *arg)
4467 {
4468 struct e1000g *Adapter = (struct e1000g *)arg;
4469
4470 mutex_enter(&Adapter->link_lock);
4471 Adapter->link_complete = B_TRUE;
4472 Adapter->link_tid = 0;
4473 mutex_exit(&Adapter->link_lock);
4474 }
4475
4476 /*
4477 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4478 *
4479 * This function read the forced speed and duplex for 10/100 Mbps speeds
4480 * and also for 1000 Mbps speeds from the e1000g.conf file
4481 */
4482 static void
e1000g_force_speed_duplex(struct e1000g * Adapter)4483 e1000g_force_speed_duplex(struct e1000g *Adapter)
4484 {
4485 int forced;
4486 int propval;
4487 struct e1000_mac_info *mac = &Adapter->shared.mac;
4488 struct e1000_phy_info *phy = &Adapter->shared.phy;
4489
4490 /*
4491 * get value out of config file
4492 */
4493 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4494 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4495
4496 switch (forced) {
4497 case GDIAG_10_HALF:
4498 /*
4499 * Disable Auto Negotiation
4500 */
4501 mac->autoneg = B_FALSE;
4502 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4503 break;
4504 case GDIAG_10_FULL:
4505 /*
4506 * Disable Auto Negotiation
4507 */
4508 mac->autoneg = B_FALSE;
4509 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4510 break;
4511 case GDIAG_100_HALF:
4512 /*
4513 * Disable Auto Negotiation
4514 */
4515 mac->autoneg = B_FALSE;
4516 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4517 break;
4518 case GDIAG_100_FULL:
4519 /*
4520 * Disable Auto Negotiation
4521 */
4522 mac->autoneg = B_FALSE;
4523 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4524 break;
4525 case GDIAG_1000_FULL:
4526 /*
4527 * The gigabit spec requires autonegotiation. Therefore,
4528 * when the user wants to force the speed to 1000Mbps, we
4529 * enable AutoNeg, but only allow the harware to advertise
4530 * 1000Mbps. This is different from 10/100 operation, where
4531 * we are allowed to link without any negotiation.
4532 */
4533 mac->autoneg = B_TRUE;
4534 phy->autoneg_advertised = ADVERTISE_1000_FULL;
4535 break;
4536 default: /* obey the setting of AutoNegAdvertised */
4537 mac->autoneg = B_TRUE;
4538 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4539 0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4540 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4541 phy->autoneg_advertised = (uint16_t)propval;
4542 break;
4543 } /* switch */
4544 }
4545
4546 /*
4547 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4548 *
4549 * This function reads MaxFrameSize from e1000g.conf
4550 */
4551 static void
e1000g_get_max_frame_size(struct e1000g * Adapter)4552 e1000g_get_max_frame_size(struct e1000g *Adapter)
4553 {
4554 int max_frame;
4555
4556 /*
4557 * get value out of config file
4558 */
4559 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4560 &max_frame);
4561
4562 switch (max_frame) {
4563 case 0:
4564 Adapter->default_mtu = ETHERMTU;
4565 break;
4566 case 1:
4567 Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4568 sizeof (struct ether_vlan_header) - ETHERFCSL;
4569 break;
4570 case 2:
4571 Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4572 sizeof (struct ether_vlan_header) - ETHERFCSL;
4573 break;
4574 case 3:
4575 Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4576 sizeof (struct ether_vlan_header) - ETHERFCSL;
4577 break;
4578 default:
4579 Adapter->default_mtu = ETHERMTU;
4580 break;
4581 } /* switch */
4582
4583 /*
4584 * If the user configed MTU is larger than the deivce's maximum MTU,
4585 * the MTU is set to the deivce's maximum value.
4586 */
4587 if (Adapter->default_mtu > Adapter->max_mtu)
4588 Adapter->default_mtu = Adapter->max_mtu;
4589
4590 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4591 }
4592
4593 /*
4594 * e1000g_pch_limits - Apply limits of the PCH silicon type
4595 *
4596 * At any frame size larger than the ethernet default,
4597 * prevent linking at 10/100 speeds.
4598 */
4599 static void
e1000g_pch_limits(struct e1000g * Adapter)4600 e1000g_pch_limits(struct e1000g *Adapter)
4601 {
4602 struct e1000_hw *hw = &Adapter->shared;
4603
4604 /* only applies to PCH silicon type */
4605 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4606 return;
4607
4608 /* only applies to frames larger than ethernet default */
4609 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4610 hw->mac.autoneg = B_TRUE;
4611 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4612
4613 Adapter->param_adv_autoneg = 1;
4614 Adapter->param_adv_1000fdx = 1;
4615
4616 Adapter->param_adv_100fdx = 0;
4617 Adapter->param_adv_100hdx = 0;
4618 Adapter->param_adv_10fdx = 0;
4619 Adapter->param_adv_10hdx = 0;
4620
4621 e1000g_param_sync(Adapter);
4622 }
4623 }
4624
4625 /*
4626 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4627 */
4628 static uint32_t
e1000g_mtu2maxframe(uint32_t mtu)4629 e1000g_mtu2maxframe(uint32_t mtu)
4630 {
4631 uint32_t maxframe;
4632
4633 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4634
4635 return (maxframe);
4636 }
4637
4638 static void
arm_watchdog_timer(struct e1000g * Adapter)4639 arm_watchdog_timer(struct e1000g *Adapter)
4640 {
4641 Adapter->watchdog_tid =
4642 timeout(e1000g_local_timer,
4643 (void *)Adapter, 1 * drv_usectohz(1000000));
4644 }
4645 #pragma inline(arm_watchdog_timer)
4646
4647 static void
enable_watchdog_timer(struct e1000g * Adapter)4648 enable_watchdog_timer(struct e1000g *Adapter)
4649 {
4650 mutex_enter(&Adapter->watchdog_lock);
4651
4652 if (!Adapter->watchdog_timer_enabled) {
4653 Adapter->watchdog_timer_enabled = B_TRUE;
4654 Adapter->watchdog_timer_started = B_TRUE;
4655 arm_watchdog_timer(Adapter);
4656 }
4657
4658 mutex_exit(&Adapter->watchdog_lock);
4659 }
4660
4661 static void
disable_watchdog_timer(struct e1000g * Adapter)4662 disable_watchdog_timer(struct e1000g *Adapter)
4663 {
4664 timeout_id_t tid;
4665
4666 mutex_enter(&Adapter->watchdog_lock);
4667
4668 Adapter->watchdog_timer_enabled = B_FALSE;
4669 Adapter->watchdog_timer_started = B_FALSE;
4670 tid = Adapter->watchdog_tid;
4671 Adapter->watchdog_tid = 0;
4672
4673 mutex_exit(&Adapter->watchdog_lock);
4674
4675 if (tid != 0)
4676 (void) untimeout(tid);
4677 }
4678
4679 static void
start_watchdog_timer(struct e1000g * Adapter)4680 start_watchdog_timer(struct e1000g *Adapter)
4681 {
4682 mutex_enter(&Adapter->watchdog_lock);
4683
4684 if (Adapter->watchdog_timer_enabled) {
4685 if (!Adapter->watchdog_timer_started) {
4686 Adapter->watchdog_timer_started = B_TRUE;
4687 arm_watchdog_timer(Adapter);
4688 }
4689 }
4690
4691 mutex_exit(&Adapter->watchdog_lock);
4692 }
4693
4694 static void
restart_watchdog_timer(struct e1000g * Adapter)4695 restart_watchdog_timer(struct e1000g *Adapter)
4696 {
4697 mutex_enter(&Adapter->watchdog_lock);
4698
4699 if (Adapter->watchdog_timer_started)
4700 arm_watchdog_timer(Adapter);
4701
4702 mutex_exit(&Adapter->watchdog_lock);
4703 }
4704
4705 static void
stop_watchdog_timer(struct e1000g * Adapter)4706 stop_watchdog_timer(struct e1000g *Adapter)
4707 {
4708 timeout_id_t tid;
4709
4710 mutex_enter(&Adapter->watchdog_lock);
4711
4712 Adapter->watchdog_timer_started = B_FALSE;
4713 tid = Adapter->watchdog_tid;
4714 Adapter->watchdog_tid = 0;
4715
4716 mutex_exit(&Adapter->watchdog_lock);
4717
4718 if (tid != 0)
4719 (void) untimeout(tid);
4720 }
4721
4722 static void
stop_link_timer(struct e1000g * Adapter)4723 stop_link_timer(struct e1000g *Adapter)
4724 {
4725 timeout_id_t tid;
4726
4727 /* Disable the link timer */
4728 mutex_enter(&Adapter->link_lock);
4729
4730 tid = Adapter->link_tid;
4731 Adapter->link_tid = 0;
4732
4733 mutex_exit(&Adapter->link_lock);
4734
4735 if (tid != 0)
4736 (void) untimeout(tid);
4737 }
4738
4739 static void
stop_82547_timer(e1000g_tx_ring_t * tx_ring)4740 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4741 {
4742 timeout_id_t tid;
4743
4744 /* Disable the tx timer for 82547 chipset */
4745 mutex_enter(&tx_ring->tx_lock);
4746
4747 tx_ring->timer_enable_82547 = B_FALSE;
4748 tid = tx_ring->timer_id_82547;
4749 tx_ring->timer_id_82547 = 0;
4750
4751 mutex_exit(&tx_ring->tx_lock);
4752
4753 if (tid != 0)
4754 (void) untimeout(tid);
4755 }
4756
4757 void
e1000g_clear_interrupt(struct e1000g * Adapter)4758 e1000g_clear_interrupt(struct e1000g *Adapter)
4759 {
4760 E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4761 0xffffffff & ~E1000_IMS_RXSEQ);
4762 }
4763
4764 void
e1000g_mask_interrupt(struct e1000g * Adapter)4765 e1000g_mask_interrupt(struct e1000g *Adapter)
4766 {
4767 E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4768 IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4769
4770 if (Adapter->tx_intr_enable)
4771 e1000g_mask_tx_interrupt(Adapter);
4772 }
4773
4774 /*
4775 * This routine is called by e1000g_quiesce(), therefore must not block.
4776 */
4777 void
e1000g_clear_all_interrupts(struct e1000g * Adapter)4778 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4779 {
4780 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4781 }
4782
4783 void
e1000g_mask_tx_interrupt(struct e1000g * Adapter)4784 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4785 {
4786 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4787 }
4788
4789 void
e1000g_clear_tx_interrupt(struct e1000g * Adapter)4790 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4791 {
4792 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4793 }
4794
4795 static void
e1000g_smartspeed(struct e1000g * Adapter)4796 e1000g_smartspeed(struct e1000g *Adapter)
4797 {
4798 struct e1000_hw *hw = &Adapter->shared;
4799 uint16_t phy_status;
4800 uint16_t phy_ctrl;
4801
4802 /*
4803 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4804 * advertising 1000Full, we don't even use the workaround
4805 */
4806 if ((hw->phy.type != e1000_phy_igp) ||
4807 !hw->mac.autoneg ||
4808 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4809 return;
4810
4811 /*
4812 * True if this is the first call of this function or after every
4813 * 30 seconds of not having link
4814 */
4815 if (Adapter->smartspeed == 0) {
4816 /*
4817 * If Master/Slave config fault is asserted twice, we
4818 * assume back-to-back
4819 */
4820 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4821 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4822 return;
4823
4824 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4825 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4826 return;
4827 /*
4828 * We're assuming back-2-back because our status register
4829 * insists! there's a fault in the master/slave
4830 * relationship that was "negotiated"
4831 */
4832 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4833 /*
4834 * Is the phy configured for manual configuration of
4835 * master/slave?
4836 */
4837 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4838 /*
4839 * Yes. Then disable manual configuration (enable
4840 * auto configuration) of master/slave
4841 */
4842 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4843 (void) e1000_write_phy_reg(hw,
4844 PHY_1000T_CTRL, phy_ctrl);
4845 /*
4846 * Effectively starting the clock
4847 */
4848 Adapter->smartspeed++;
4849 /*
4850 * Restart autonegotiation
4851 */
4852 if (!e1000_phy_setup_autoneg(hw) &&
4853 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4854 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4855 MII_CR_RESTART_AUTO_NEG);
4856 (void) e1000_write_phy_reg(hw,
4857 PHY_CONTROL, phy_ctrl);
4858 }
4859 }
4860 return;
4861 /*
4862 * Has 6 seconds transpired still without link? Remember,
4863 * you should reset the smartspeed counter once you obtain
4864 * link
4865 */
4866 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4867 /*
4868 * Yes. Remember, we did at the start determine that
4869 * there's a master/slave configuration fault, so we're
4870 * still assuming there's someone on the other end, but we
4871 * just haven't yet been able to talk to it. We then
4872 * re-enable auto configuration of master/slave to see if
4873 * we're running 2/3 pair cables.
4874 */
4875 /*
4876 * If still no link, perhaps using 2/3 pair cable
4877 */
4878 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4879 phy_ctrl |= CR_1000T_MS_ENABLE;
4880 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4881 /*
4882 * Restart autoneg with phy enabled for manual
4883 * configuration of master/slave
4884 */
4885 if (!e1000_phy_setup_autoneg(hw) &&
4886 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4887 phy_ctrl |=
4888 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4889 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4890 }
4891 /*
4892 * Hopefully, there are no more faults and we've obtained
4893 * link as a result.
4894 */
4895 }
4896 /*
4897 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4898 * seconds)
4899 */
4900 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4901 Adapter->smartspeed = 0;
4902 }
4903
4904 static boolean_t
is_valid_mac_addr(uint8_t * mac_addr)4905 is_valid_mac_addr(uint8_t *mac_addr)
4906 {
4907 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4908 const uint8_t addr_test2[6] =
4909 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4910
4911 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4912 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4913 return (B_FALSE);
4914
4915 return (B_TRUE);
4916 }
4917
4918 /*
4919 * e1000g_stall_check - check for tx stall
4920 *
4921 * This function checks if the adapter is stalled (in transmit).
4922 *
4923 * It is called each time the watchdog timeout is invoked.
4924 * If the transmit descriptor reclaim continuously fails,
4925 * the watchdog value will increment by 1. If the watchdog
4926 * value exceeds the threshold, the adapter is assumed to
4927 * have stalled and need to be reset.
4928 */
4929 static boolean_t
e1000g_stall_check(struct e1000g * Adapter)4930 e1000g_stall_check(struct e1000g *Adapter)
4931 {
4932 e1000g_tx_ring_t *tx_ring;
4933
4934 tx_ring = Adapter->tx_ring;
4935
4936 if (Adapter->link_state != LINK_STATE_UP)
4937 return (B_FALSE);
4938
4939 (void) e1000g_recycle(tx_ring);
4940
4941 if (Adapter->stall_flag)
4942 return (B_TRUE);
4943
4944 return (B_FALSE);
4945 }
4946
4947 #ifdef E1000G_DEBUG
4948 static enum ioc_reply
e1000g_pp_ioctl(struct e1000g * e1000gp,struct iocblk * iocp,mblk_t * mp)4949 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4950 {
4951 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4952 e1000g_peekpoke_t *ppd;
4953 uint64_t mem_va;
4954 uint64_t maxoff;
4955 boolean_t peek;
4956
4957 switch (iocp->ioc_cmd) {
4958
4959 case E1000G_IOC_REG_PEEK:
4960 peek = B_TRUE;
4961 break;
4962
4963 case E1000G_IOC_REG_POKE:
4964 peek = B_FALSE;
4965 break;
4966
4967 deault:
4968 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4969 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4970 iocp->ioc_cmd);
4971 return (IOC_INVAL);
4972 }
4973
4974 /*
4975 * Validate format of ioctl
4976 */
4977 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4978 return (IOC_INVAL);
4979 if (mp->b_cont == NULL)
4980 return (IOC_INVAL);
4981
4982 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4983
4984 /*
4985 * Validate request parameters
4986 */
4987 switch (ppd->pp_acc_space) {
4988
4989 default:
4990 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4991 "e1000g_diag_ioctl: invalid access space 0x%X\n",
4992 ppd->pp_acc_space);
4993 return (IOC_INVAL);
4994
4995 case E1000G_PP_SPACE_REG:
4996 /*
4997 * Memory-mapped I/O space
4998 */
4999 ASSERT(ppd->pp_acc_size == 4);
5000 if (ppd->pp_acc_size != 4)
5001 return (IOC_INVAL);
5002
5003 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5004 return (IOC_INVAL);
5005
5006 mem_va = 0;
5007 maxoff = 0x10000;
5008 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
5009 break;
5010
5011 case E1000G_PP_SPACE_E1000G:
5012 /*
5013 * E1000g data structure!
5014 */
5015 mem_va = (uintptr_t)e1000gp;
5016 maxoff = sizeof (struct e1000g);
5017 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
5018 break;
5019
5020 }
5021
5022 if (ppd->pp_acc_offset >= maxoff)
5023 return (IOC_INVAL);
5024
5025 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
5026 return (IOC_INVAL);
5027
5028 /*
5029 * All OK - go!
5030 */
5031 ppd->pp_acc_offset += mem_va;
5032 (*ppfn)(e1000gp, ppd);
5033 return (peek ? IOC_REPLY : IOC_ACK);
5034 }
5035
5036 static void
e1000g_ioc_peek_reg(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5037 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5038 {
5039 ddi_acc_handle_t handle;
5040 uint32_t *regaddr;
5041
5042 handle = e1000gp->osdep.reg_handle;
5043 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5044 (uintptr_t)ppd->pp_acc_offset);
5045
5046 ppd->pp_acc_data = ddi_get32(handle, regaddr);
5047 }
5048
5049 static void
e1000g_ioc_poke_reg(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5050 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5051 {
5052 ddi_acc_handle_t handle;
5053 uint32_t *regaddr;
5054 uint32_t value;
5055
5056 handle = e1000gp->osdep.reg_handle;
5057 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5058 (uintptr_t)ppd->pp_acc_offset);
5059 value = (uint32_t)ppd->pp_acc_data;
5060
5061 ddi_put32(handle, regaddr, value);
5062 }
5063
5064 static void
e1000g_ioc_peek_mem(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5065 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5066 {
5067 uint64_t value;
5068 void *vaddr;
5069
5070 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5071
5072 switch (ppd->pp_acc_size) {
5073 case 1:
5074 value = *(uint8_t *)vaddr;
5075 break;
5076
5077 case 2:
5078 value = *(uint16_t *)vaddr;
5079 break;
5080
5081 case 4:
5082 value = *(uint32_t *)vaddr;
5083 break;
5084
5085 case 8:
5086 value = *(uint64_t *)vaddr;
5087 break;
5088 }
5089
5090 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5091 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5092 (void *)e1000gp, (void *)ppd, value, vaddr);
5093
5094 ppd->pp_acc_data = value;
5095 }
5096
5097 static void
e1000g_ioc_poke_mem(struct e1000g * e1000gp,e1000g_peekpoke_t * ppd)5098 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5099 {
5100 uint64_t value;
5101 void *vaddr;
5102
5103 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5104 value = ppd->pp_acc_data;
5105
5106 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5107 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5108 (void *)e1000gp, (void *)ppd, value, vaddr);
5109
5110 switch (ppd->pp_acc_size) {
5111 case 1:
5112 *(uint8_t *)vaddr = (uint8_t)value;
5113 break;
5114
5115 case 2:
5116 *(uint16_t *)vaddr = (uint16_t)value;
5117 break;
5118
5119 case 4:
5120 *(uint32_t *)vaddr = (uint32_t)value;
5121 break;
5122
5123 case 8:
5124 *(uint64_t *)vaddr = (uint64_t)value;
5125 break;
5126 }
5127 }
5128 #endif
5129
5130 /*
5131 * Loopback Support
5132 */
5133 static lb_property_t lb_normal =
5134 { normal, "normal", E1000G_LB_NONE };
5135 static lb_property_t lb_external1000 =
5136 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 };
5137 static lb_property_t lb_external100 =
5138 { external, "100Mbps", E1000G_LB_EXTERNAL_100 };
5139 static lb_property_t lb_external10 =
5140 { external, "10Mbps", E1000G_LB_EXTERNAL_10 };
5141 static lb_property_t lb_phy =
5142 { internal, "PHY", E1000G_LB_INTERNAL_PHY };
5143
5144 static enum ioc_reply
e1000g_loopback_ioctl(struct e1000g * Adapter,struct iocblk * iocp,mblk_t * mp)5145 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5146 {
5147 lb_info_sz_t *lbsp;
5148 lb_property_t *lbpp;
5149 struct e1000_hw *hw;
5150 uint32_t *lbmp;
5151 uint32_t size;
5152 uint32_t value;
5153
5154 hw = &Adapter->shared;
5155
5156 if (mp->b_cont == NULL)
5157 return (IOC_INVAL);
5158
5159 if (!e1000g_check_loopback_support(hw)) {
5160 e1000g_log(NULL, CE_WARN,
5161 "Loopback is not supported on e1000g%d", Adapter->instance);
5162 return (IOC_INVAL);
5163 }
5164
5165 switch (iocp->ioc_cmd) {
5166 default:
5167 return (IOC_INVAL);
5168
5169 case LB_GET_INFO_SIZE:
5170 size = sizeof (lb_info_sz_t);
5171 if (iocp->ioc_count != size)
5172 return (IOC_INVAL);
5173
5174 rw_enter(&Adapter->chip_lock, RW_WRITER);
5175 e1000g_get_phy_state(Adapter);
5176
5177 /*
5178 * Workaround for hardware faults. In order to get a stable
5179 * state of phy, we will wait for a specific interval and
5180 * try again. The time delay is an experiential value based
5181 * on our testing.
5182 */
5183 msec_delay(100);
5184 e1000g_get_phy_state(Adapter);
5185 rw_exit(&Adapter->chip_lock);
5186
5187 value = sizeof (lb_normal);
5188 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5189 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5190 (hw->phy.media_type == e1000_media_type_fiber) ||
5191 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5192 value += sizeof (lb_phy);
5193 switch (hw->mac.type) {
5194 case e1000_82571:
5195 case e1000_82572:
5196 case e1000_80003es2lan:
5197 value += sizeof (lb_external1000);
5198 break;
5199 }
5200 }
5201 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5202 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5203 value += sizeof (lb_external100);
5204 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5205 value += sizeof (lb_external10);
5206
5207 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5208 *lbsp = value;
5209 break;
5210
5211 case LB_GET_INFO:
5212 value = sizeof (lb_normal);
5213 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5214 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5215 (hw->phy.media_type == e1000_media_type_fiber) ||
5216 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5217 value += sizeof (lb_phy);
5218 switch (hw->mac.type) {
5219 case e1000_82571:
5220 case e1000_82572:
5221 case e1000_80003es2lan:
5222 value += sizeof (lb_external1000);
5223 break;
5224 }
5225 }
5226 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5227 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5228 value += sizeof (lb_external100);
5229 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5230 value += sizeof (lb_external10);
5231
5232 size = value;
5233 if (iocp->ioc_count != size)
5234 return (IOC_INVAL);
5235
5236 value = 0;
5237 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5238 lbpp[value++] = lb_normal;
5239 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5240 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5241 (hw->phy.media_type == e1000_media_type_fiber) ||
5242 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5243 lbpp[value++] = lb_phy;
5244 switch (hw->mac.type) {
5245 case e1000_82571:
5246 case e1000_82572:
5247 case e1000_80003es2lan:
5248 lbpp[value++] = lb_external1000;
5249 break;
5250 }
5251 }
5252 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5253 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5254 lbpp[value++] = lb_external100;
5255 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5256 lbpp[value++] = lb_external10;
5257 break;
5258
5259 case LB_GET_MODE:
5260 size = sizeof (uint32_t);
5261 if (iocp->ioc_count != size)
5262 return (IOC_INVAL);
5263
5264 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5265 *lbmp = Adapter->loopback_mode;
5266 break;
5267
5268 case LB_SET_MODE:
5269 size = 0;
5270 if (iocp->ioc_count != sizeof (uint32_t))
5271 return (IOC_INVAL);
5272
5273 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5274 if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5275 return (IOC_INVAL);
5276 break;
5277 }
5278
5279 iocp->ioc_count = size;
5280 iocp->ioc_error = 0;
5281
5282 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5283 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5284 return (IOC_INVAL);
5285 }
5286
5287 return (IOC_REPLY);
5288 }
5289
5290 static boolean_t
e1000g_check_loopback_support(struct e1000_hw * hw)5291 e1000g_check_loopback_support(struct e1000_hw *hw)
5292 {
5293 switch (hw->mac.type) {
5294 case e1000_82540:
5295 case e1000_82545:
5296 case e1000_82545_rev_3:
5297 case e1000_82546:
5298 case e1000_82546_rev_3:
5299 case e1000_82541:
5300 case e1000_82541_rev_2:
5301 case e1000_82547:
5302 case e1000_82547_rev_2:
5303 case e1000_82571:
5304 case e1000_82572:
5305 case e1000_82573:
5306 case e1000_82574:
5307 case e1000_80003es2lan:
5308 case e1000_ich9lan:
5309 case e1000_ich10lan:
5310 return (B_TRUE);
5311 }
5312 return (B_FALSE);
5313 }
5314
5315 static boolean_t
e1000g_set_loopback_mode(struct e1000g * Adapter,uint32_t mode)5316 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5317 {
5318 struct e1000_hw *hw;
5319 int i, times;
5320 boolean_t link_up;
5321
5322 if (mode == Adapter->loopback_mode)
5323 return (B_TRUE);
5324
5325 hw = &Adapter->shared;
5326 times = 0;
5327
5328 Adapter->loopback_mode = mode;
5329
5330 if (mode == E1000G_LB_NONE) {
5331 /* Reset the chip */
5332 hw->phy.autoneg_wait_to_complete = B_TRUE;
5333 (void) e1000g_reset_adapter(Adapter);
5334 hw->phy.autoneg_wait_to_complete = B_FALSE;
5335 return (B_TRUE);
5336 }
5337
5338 again:
5339
5340 rw_enter(&Adapter->chip_lock, RW_WRITER);
5341
5342 switch (mode) {
5343 default:
5344 rw_exit(&Adapter->chip_lock);
5345 return (B_FALSE);
5346
5347 case E1000G_LB_EXTERNAL_1000:
5348 e1000g_set_external_loopback_1000(Adapter);
5349 break;
5350
5351 case E1000G_LB_EXTERNAL_100:
5352 e1000g_set_external_loopback_100(Adapter);
5353 break;
5354
5355 case E1000G_LB_EXTERNAL_10:
5356 e1000g_set_external_loopback_10(Adapter);
5357 break;
5358
5359 case E1000G_LB_INTERNAL_PHY:
5360 e1000g_set_internal_loopback(Adapter);
5361 break;
5362 }
5363
5364 times++;
5365
5366 rw_exit(&Adapter->chip_lock);
5367
5368 /* Wait for link up */
5369 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5370 msec_delay(100);
5371
5372 rw_enter(&Adapter->chip_lock, RW_WRITER);
5373
5374 link_up = e1000g_link_up(Adapter);
5375
5376 rw_exit(&Adapter->chip_lock);
5377
5378 if (!link_up) {
5379 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5380 "Failed to get the link up");
5381 if (times < 2) {
5382 /* Reset the link */
5383 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5384 "Reset the link ...");
5385 (void) e1000g_reset_adapter(Adapter);
5386 goto again;
5387 }
5388
5389 /*
5390 * Reset driver to loopback none when set loopback failed
5391 * for the second time.
5392 */
5393 Adapter->loopback_mode = E1000G_LB_NONE;
5394
5395 /* Reset the chip */
5396 hw->phy.autoneg_wait_to_complete = B_TRUE;
5397 (void) e1000g_reset_adapter(Adapter);
5398 hw->phy.autoneg_wait_to_complete = B_FALSE;
5399
5400 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5401 "Set loopback mode failed, reset to loopback none");
5402
5403 return (B_FALSE);
5404 }
5405
5406 return (B_TRUE);
5407 }
5408
5409 /*
5410 * The following loopback settings are from Intel's technical
5411 * document - "How To Loopback". All the register settings and
5412 * time delay values are directly inherited from the document
5413 * without more explanations available.
5414 */
5415 static void
e1000g_set_internal_loopback(struct e1000g * Adapter)5416 e1000g_set_internal_loopback(struct e1000g *Adapter)
5417 {
5418 struct e1000_hw *hw;
5419 uint32_t ctrl;
5420 uint32_t status;
5421 uint16_t phy_ctrl;
5422 uint16_t phy_reg;
5423 uint32_t txcw;
5424
5425 hw = &Adapter->shared;
5426
5427 /* Disable Smart Power Down */
5428 phy_spd_state(hw, B_FALSE);
5429
5430 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5431 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5432 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5433
5434 switch (hw->mac.type) {
5435 case e1000_82540:
5436 case e1000_82545:
5437 case e1000_82545_rev_3:
5438 case e1000_82546:
5439 case e1000_82546_rev_3:
5440 case e1000_82573:
5441 /* Auto-MDI/MDIX off */
5442 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5443 /* Reset PHY to update Auto-MDI/MDIX */
5444 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5445 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5446 /* Reset PHY to auto-neg off and force 1000 */
5447 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5448 phy_ctrl | MII_CR_RESET);
5449 /*
5450 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5451 * See comments above e1000g_set_internal_loopback() for the
5452 * background.
5453 */
5454 (void) e1000_write_phy_reg(hw, 29, 0x001F);
5455 (void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5456 (void) e1000_write_phy_reg(hw, 29, 0x001A);
5457 (void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5458 break;
5459 case e1000_80003es2lan:
5460 /* Force Link Up */
5461 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5462 0x1CC);
5463 /* Sets PCS loopback at 1Gbs */
5464 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5465 0x1046);
5466 break;
5467 }
5468
5469 /*
5470 * The following registers should be set for e1000_phy_bm phy type.
5471 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5472 * For others, we do not need to set these registers.
5473 */
5474 if (hw->phy.type == e1000_phy_bm) {
5475 /* Set Default MAC Interface speed to 1GB */
5476 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5477 phy_reg &= ~0x0007;
5478 phy_reg |= 0x006;
5479 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5480 /* Assert SW reset for above settings to take effect */
5481 (void) e1000_phy_commit(hw);
5482 msec_delay(1);
5483 /* Force Full Duplex */
5484 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5485 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5486 phy_reg | 0x000C);
5487 /* Set Link Up (in force link) */
5488 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5489 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5490 phy_reg | 0x0040);
5491 /* Force Link */
5492 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5493 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5494 phy_reg | 0x0040);
5495 /* Set Early Link Enable */
5496 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5497 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5498 phy_reg | 0x0400);
5499 }
5500
5501 /* Set loopback */
5502 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5503
5504 msec_delay(250);
5505
5506 /* Now set up the MAC to the same speed/duplex as the PHY. */
5507 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5508 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5509 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5510 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5511 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
5512 E1000_CTRL_FD); /* Force Duplex to FULL */
5513
5514 switch (hw->mac.type) {
5515 case e1000_82540:
5516 case e1000_82545:
5517 case e1000_82545_rev_3:
5518 case e1000_82546:
5519 case e1000_82546_rev_3:
5520 /*
5521 * For some serdes we'll need to commit the writes now
5522 * so that the status is updated on link
5523 */
5524 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5525 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5526 msec_delay(100);
5527 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5528 }
5529
5530 if (hw->phy.media_type == e1000_media_type_copper) {
5531 /* Invert Loss of Signal */
5532 ctrl |= E1000_CTRL_ILOS;
5533 } else {
5534 /* Set ILOS on fiber nic if half duplex is detected */
5535 status = E1000_READ_REG(hw, E1000_STATUS);
5536 if ((status & E1000_STATUS_FD) == 0)
5537 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5538 }
5539 break;
5540
5541 case e1000_82571:
5542 case e1000_82572:
5543 /*
5544 * The fiber/SerDes versions of this adapter do not contain an
5545 * accessible PHY. Therefore, loopback beyond MAC must be done
5546 * using SerDes analog loopback.
5547 */
5548 if (hw->phy.media_type != e1000_media_type_copper) {
5549 /* Disable autoneg by setting bit 31 of TXCW to zero */
5550 txcw = E1000_READ_REG(hw, E1000_TXCW);
5551 txcw &= ~((uint32_t)1 << 31);
5552 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5553
5554 /*
5555 * Write 0x410 to Serdes Control register
5556 * to enable Serdes analog loopback
5557 */
5558 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5559 msec_delay(10);
5560 }
5561
5562 status = E1000_READ_REG(hw, E1000_STATUS);
5563 /* Set ILOS on fiber nic if half duplex is detected */
5564 if ((hw->phy.media_type == e1000_media_type_fiber) &&
5565 ((status & E1000_STATUS_FD) == 0 ||
5566 (status & E1000_STATUS_LU) == 0))
5567 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5568 else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5569 ctrl |= E1000_CTRL_SLU;
5570 break;
5571
5572 case e1000_82573:
5573 ctrl |= E1000_CTRL_ILOS;
5574 break;
5575 case e1000_ich9lan:
5576 case e1000_ich10lan:
5577 ctrl |= E1000_CTRL_SLU;
5578 break;
5579 }
5580 if (hw->phy.type == e1000_phy_bm)
5581 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5582
5583 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5584 }
5585
5586 static void
e1000g_set_external_loopback_1000(struct e1000g * Adapter)5587 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5588 {
5589 struct e1000_hw *hw;
5590 uint32_t rctl;
5591 uint32_t ctrl_ext;
5592 uint32_t ctrl;
5593 uint32_t status;
5594 uint32_t txcw;
5595 uint16_t phydata;
5596
5597 hw = &Adapter->shared;
5598
5599 /* Disable Smart Power Down */
5600 phy_spd_state(hw, B_FALSE);
5601
5602 switch (hw->mac.type) {
5603 case e1000_82571:
5604 case e1000_82572:
5605 switch (hw->phy.media_type) {
5606 case e1000_media_type_copper:
5607 /* Force link up (Must be done before the PHY writes) */
5608 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5609 ctrl |= E1000_CTRL_SLU; /* Force Link Up */
5610 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5611
5612 rctl = E1000_READ_REG(hw, E1000_RCTL);
5613 rctl |= (E1000_RCTL_EN |
5614 E1000_RCTL_SBP |
5615 E1000_RCTL_UPE |
5616 E1000_RCTL_MPE |
5617 E1000_RCTL_LPE |
5618 E1000_RCTL_BAM); /* 0x803E */
5619 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5620
5621 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5622 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5623 E1000_CTRL_EXT_SDP6_DATA |
5624 E1000_CTRL_EXT_SDP3_DATA |
5625 E1000_CTRL_EXT_SDP4_DIR |
5626 E1000_CTRL_EXT_SDP6_DIR |
5627 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */
5628 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5629
5630 /*
5631 * This sequence tunes the PHY's SDP and no customer
5632 * settable values. For background, see comments above
5633 * e1000g_set_internal_loopback().
5634 */
5635 (void) e1000_write_phy_reg(hw, 0x0, 0x140);
5636 msec_delay(10);
5637 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5638 (void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5639 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5640 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5641 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5642 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5643
5644 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5645 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5646 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5647 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5648 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5649
5650 msec_delay(50);
5651 break;
5652 case e1000_media_type_fiber:
5653 case e1000_media_type_internal_serdes:
5654 status = E1000_READ_REG(hw, E1000_STATUS);
5655 if (((status & E1000_STATUS_LU) == 0) ||
5656 (hw->phy.media_type ==
5657 e1000_media_type_internal_serdes)) {
5658 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5659 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5660 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5661 }
5662
5663 /* Disable autoneg by setting bit 31 of TXCW to zero */
5664 txcw = E1000_READ_REG(hw, E1000_TXCW);
5665 txcw &= ~((uint32_t)1 << 31);
5666 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5667
5668 /*
5669 * Write 0x410 to Serdes Control register
5670 * to enable Serdes analog loopback
5671 */
5672 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5673 msec_delay(10);
5674 break;
5675 default:
5676 break;
5677 }
5678 break;
5679 case e1000_82574:
5680 case e1000_80003es2lan:
5681 case e1000_ich9lan:
5682 case e1000_ich10lan:
5683 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5684 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5685 phydata | (1 << 5));
5686 Adapter->param_adv_autoneg = 1;
5687 Adapter->param_adv_1000fdx = 1;
5688 (void) e1000g_reset_link(Adapter);
5689 break;
5690 }
5691 }
5692
5693 static void
e1000g_set_external_loopback_100(struct e1000g * Adapter)5694 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5695 {
5696 struct e1000_hw *hw;
5697 uint32_t ctrl;
5698 uint16_t phy_ctrl;
5699
5700 hw = &Adapter->shared;
5701
5702 /* Disable Smart Power Down */
5703 phy_spd_state(hw, B_FALSE);
5704
5705 phy_ctrl = (MII_CR_FULL_DUPLEX |
5706 MII_CR_SPEED_100);
5707
5708 /* Force 100/FD, reset PHY */
5709 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5710 phy_ctrl | MII_CR_RESET); /* 0xA100 */
5711 msec_delay(10);
5712
5713 /* Force 100/FD */
5714 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5715 phy_ctrl); /* 0x2100 */
5716 msec_delay(10);
5717
5718 /* Now setup the MAC to the same speed/duplex as the PHY. */
5719 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5720 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5721 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5722 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5723 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5724 E1000_CTRL_SPD_100 | /* Force Speed to 100 */
5725 E1000_CTRL_FD); /* Force Duplex to FULL */
5726
5727 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5728 }
5729
5730 static void
e1000g_set_external_loopback_10(struct e1000g * Adapter)5731 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5732 {
5733 struct e1000_hw *hw;
5734 uint32_t ctrl;
5735 uint16_t phy_ctrl;
5736
5737 hw = &Adapter->shared;
5738
5739 /* Disable Smart Power Down */
5740 phy_spd_state(hw, B_FALSE);
5741
5742 phy_ctrl = (MII_CR_FULL_DUPLEX |
5743 MII_CR_SPEED_10);
5744
5745 /* Force 10/FD, reset PHY */
5746 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5747 phy_ctrl | MII_CR_RESET); /* 0x8100 */
5748 msec_delay(10);
5749
5750 /* Force 10/FD */
5751 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5752 phy_ctrl); /* 0x0100 */
5753 msec_delay(10);
5754
5755 /* Now setup the MAC to the same speed/duplex as the PHY. */
5756 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5757 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5758 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5759 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5760 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5761 E1000_CTRL_SPD_10 | /* Force Speed to 10 */
5762 E1000_CTRL_FD); /* Force Duplex to FULL */
5763
5764 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5765 }
5766
5767 #ifdef __sparc
5768 static boolean_t
e1000g_find_mac_address(struct e1000g * Adapter)5769 e1000g_find_mac_address(struct e1000g *Adapter)
5770 {
5771 struct e1000_hw *hw = &Adapter->shared;
5772 uchar_t *bytes;
5773 struct ether_addr sysaddr;
5774 uint_t nelts;
5775 int err;
5776 boolean_t found = B_FALSE;
5777
5778 /*
5779 * The "vendor's factory-set address" may already have
5780 * been extracted from the chip, but if the property
5781 * "local-mac-address" is set we use that instead.
5782 *
5783 * We check whether it looks like an array of 6
5784 * bytes (which it should, if OBP set it). If we can't
5785 * make sense of it this way, we'll ignore it.
5786 */
5787 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5788 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5789 if (err == DDI_PROP_SUCCESS) {
5790 if (nelts == ETHERADDRL) {
5791 while (nelts--)
5792 hw->mac.addr[nelts] = bytes[nelts];
5793 found = B_TRUE;
5794 }
5795 ddi_prop_free(bytes);
5796 }
5797
5798 /*
5799 * Look up the OBP property "local-mac-address?". If the user has set
5800 * 'local-mac-address? = false', use "the system address" instead.
5801 */
5802 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5803 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5804 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5805 if (localetheraddr(NULL, &sysaddr) != 0) {
5806 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5807 found = B_TRUE;
5808 }
5809 }
5810 ddi_prop_free(bytes);
5811 }
5812
5813 /*
5814 * Finally(!), if there's a valid "mac-address" property (created
5815 * if we netbooted from this interface), we must use this instead
5816 * of any of the above to ensure that the NFS/install server doesn't
5817 * get confused by the address changing as Solaris takes over!
5818 */
5819 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5820 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5821 if (err == DDI_PROP_SUCCESS) {
5822 if (nelts == ETHERADDRL) {
5823 while (nelts--)
5824 hw->mac.addr[nelts] = bytes[nelts];
5825 found = B_TRUE;
5826 }
5827 ddi_prop_free(bytes);
5828 }
5829
5830 if (found) {
5831 bcopy(hw->mac.addr, hw->mac.perm_addr,
5832 ETHERADDRL);
5833 }
5834
5835 return (found);
5836 }
5837 #endif
5838
5839 static int
e1000g_add_intrs(struct e1000g * Adapter)5840 e1000g_add_intrs(struct e1000g *Adapter)
5841 {
5842 dev_info_t *devinfo;
5843 int intr_types;
5844 int rc;
5845
5846 devinfo = Adapter->dip;
5847
5848 /* Get supported interrupt types */
5849 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5850
5851 if (rc != DDI_SUCCESS) {
5852 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5853 "Get supported interrupt types failed: %d\n", rc);
5854 return (DDI_FAILURE);
5855 }
5856
5857 /*
5858 * Based on Intel Technical Advisory document (TA-160), there are some
5859 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5860 * that it supports MSI, but in fact has problems.
5861 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5862 * PCI/PCI-X NICs.
5863 */
5864 if (Adapter->shared.mac.type < e1000_82571)
5865 Adapter->msi_enable = B_FALSE;
5866
5867 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5868 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5869
5870 if (rc != DDI_SUCCESS) {
5871 /* EMPTY */
5872 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5873 "Add MSI failed, trying Legacy interrupts\n");
5874 } else {
5875 Adapter->intr_type = DDI_INTR_TYPE_MSI;
5876 }
5877 }
5878
5879 if ((Adapter->intr_type == 0) &&
5880 (intr_types & DDI_INTR_TYPE_FIXED)) {
5881 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5882
5883 if (rc != DDI_SUCCESS) {
5884 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5885 "Add Legacy interrupts failed\n");
5886 return (DDI_FAILURE);
5887 }
5888
5889 Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5890 }
5891
5892 if (Adapter->intr_type == 0) {
5893 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5894 "No interrupts registered\n");
5895 return (DDI_FAILURE);
5896 }
5897
5898 return (DDI_SUCCESS);
5899 }
5900
5901 /*
5902 * e1000g_intr_add() handles MSI/Legacy interrupts
5903 */
5904 static int
e1000g_intr_add(struct e1000g * Adapter,int intr_type)5905 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5906 {
5907 dev_info_t *devinfo;
5908 int count, avail, actual;
5909 int x, y, rc, inum = 0;
5910 int flag;
5911 ddi_intr_handler_t *intr_handler;
5912
5913 devinfo = Adapter->dip;
5914
5915 /* get number of interrupts */
5916 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5917 if ((rc != DDI_SUCCESS) || (count == 0)) {
5918 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5919 "Get interrupt number failed. Return: %d, count: %d\n",
5920 rc, count);
5921 return (DDI_FAILURE);
5922 }
5923
5924 /* get number of available interrupts */
5925 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5926 if ((rc != DDI_SUCCESS) || (avail == 0)) {
5927 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5928 "Get interrupt available number failed. "
5929 "Return: %d, available: %d\n", rc, avail);
5930 return (DDI_FAILURE);
5931 }
5932
5933 if (avail < count) {
5934 /* EMPTY */
5935 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5936 "Interrupts count: %d, available: %d\n",
5937 count, avail);
5938 }
5939
5940 /* Allocate an array of interrupt handles */
5941 Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5942 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5943
5944 /* Set NORMAL behavior for both MSI and FIXED interrupt */
5945 flag = DDI_INTR_ALLOC_NORMAL;
5946
5947 /* call ddi_intr_alloc() */
5948 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5949 count, &actual, flag);
5950
5951 if ((rc != DDI_SUCCESS) || (actual == 0)) {
5952 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5953 "Allocate interrupts failed: %d\n", rc);
5954
5955 kmem_free(Adapter->htable, Adapter->intr_size);
5956 return (DDI_FAILURE);
5957 }
5958
5959 if (actual < count) {
5960 /* EMPTY */
5961 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5962 "Interrupts requested: %d, received: %d\n",
5963 count, actual);
5964 }
5965
5966 Adapter->intr_cnt = actual;
5967
5968 /* Get priority for first msi, assume remaining are all the same */
5969 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5970
5971 if (rc != DDI_SUCCESS) {
5972 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5973 "Get interrupt priority failed: %d\n", rc);
5974
5975 /* Free already allocated intr */
5976 for (y = 0; y < actual; y++)
5977 (void) ddi_intr_free(Adapter->htable[y]);
5978
5979 kmem_free(Adapter->htable, Adapter->intr_size);
5980 return (DDI_FAILURE);
5981 }
5982
5983 /*
5984 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5985 * use the interrupt service routine e1000g_intr_pciexpress()
5986 * to avoid interrupt stealing when sharing interrupt with other
5987 * devices.
5988 */
5989 if (Adapter->shared.mac.type < e1000_82571)
5990 intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5991 else
5992 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5993
5994 /* Call ddi_intr_add_handler() */
5995 for (x = 0; x < actual; x++) {
5996 rc = ddi_intr_add_handler(Adapter->htable[x],
5997 intr_handler, (caddr_t)Adapter, NULL);
5998
5999 if (rc != DDI_SUCCESS) {
6000 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6001 "Add interrupt handler failed: %d\n", rc);
6002
6003 /* Remove already added handler */
6004 for (y = 0; y < x; y++)
6005 (void) ddi_intr_remove_handler(
6006 Adapter->htable[y]);
6007
6008 /* Free already allocated intr */
6009 for (y = 0; y < actual; y++)
6010 (void) ddi_intr_free(Adapter->htable[y]);
6011
6012 kmem_free(Adapter->htable, Adapter->intr_size);
6013 return (DDI_FAILURE);
6014 }
6015 }
6016
6017 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
6018
6019 if (rc != DDI_SUCCESS) {
6020 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6021 "Get interrupt cap failed: %d\n", rc);
6022
6023 /* Free already allocated intr */
6024 for (y = 0; y < actual; y++) {
6025 (void) ddi_intr_remove_handler(Adapter->htable[y]);
6026 (void) ddi_intr_free(Adapter->htable[y]);
6027 }
6028
6029 kmem_free(Adapter->htable, Adapter->intr_size);
6030 return (DDI_FAILURE);
6031 }
6032
6033 return (DDI_SUCCESS);
6034 }
6035
6036 static int
e1000g_rem_intrs(struct e1000g * Adapter)6037 e1000g_rem_intrs(struct e1000g *Adapter)
6038 {
6039 int x;
6040 int rc;
6041
6042 for (x = 0; x < Adapter->intr_cnt; x++) {
6043 rc = ddi_intr_remove_handler(Adapter->htable[x]);
6044 if (rc != DDI_SUCCESS) {
6045 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6046 "Remove intr handler failed: %d\n", rc);
6047 return (DDI_FAILURE);
6048 }
6049
6050 rc = ddi_intr_free(Adapter->htable[x]);
6051 if (rc != DDI_SUCCESS) {
6052 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6053 "Free intr failed: %d\n", rc);
6054 return (DDI_FAILURE);
6055 }
6056 }
6057
6058 kmem_free(Adapter->htable, Adapter->intr_size);
6059
6060 return (DDI_SUCCESS);
6061 }
6062
6063 static int
e1000g_enable_intrs(struct e1000g * Adapter)6064 e1000g_enable_intrs(struct e1000g *Adapter)
6065 {
6066 int x;
6067 int rc;
6068
6069 /* Enable interrupts */
6070 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6071 /* Call ddi_intr_block_enable() for MSI */
6072 rc = ddi_intr_block_enable(Adapter->htable,
6073 Adapter->intr_cnt);
6074 if (rc != DDI_SUCCESS) {
6075 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6076 "Enable block intr failed: %d\n", rc);
6077 return (DDI_FAILURE);
6078 }
6079 } else {
6080 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6081 for (x = 0; x < Adapter->intr_cnt; x++) {
6082 rc = ddi_intr_enable(Adapter->htable[x]);
6083 if (rc != DDI_SUCCESS) {
6084 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6085 "Enable intr failed: %d\n", rc);
6086 return (DDI_FAILURE);
6087 }
6088 }
6089 }
6090
6091 return (DDI_SUCCESS);
6092 }
6093
6094 static int
e1000g_disable_intrs(struct e1000g * Adapter)6095 e1000g_disable_intrs(struct e1000g *Adapter)
6096 {
6097 int x;
6098 int rc;
6099
6100 /* Disable all interrupts */
6101 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6102 rc = ddi_intr_block_disable(Adapter->htable,
6103 Adapter->intr_cnt);
6104 if (rc != DDI_SUCCESS) {
6105 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6106 "Disable block intr failed: %d\n", rc);
6107 return (DDI_FAILURE);
6108 }
6109 } else {
6110 for (x = 0; x < Adapter->intr_cnt; x++) {
6111 rc = ddi_intr_disable(Adapter->htable[x]);
6112 if (rc != DDI_SUCCESS) {
6113 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6114 "Disable intr failed: %d\n", rc);
6115 return (DDI_FAILURE);
6116 }
6117 }
6118 }
6119
6120 return (DDI_SUCCESS);
6121 }
6122
6123 /*
6124 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6125 */
6126 static void
e1000g_get_phy_state(struct e1000g * Adapter)6127 e1000g_get_phy_state(struct e1000g *Adapter)
6128 {
6129 struct e1000_hw *hw = &Adapter->shared;
6130
6131 if (hw->phy.media_type == e1000_media_type_copper) {
6132 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6133 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6134 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6135 &Adapter->phy_an_adv);
6136 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6137 &Adapter->phy_an_exp);
6138 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6139 &Adapter->phy_ext_status);
6140 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6141 &Adapter->phy_1000t_ctrl);
6142 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6143 &Adapter->phy_1000t_status);
6144 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6145 &Adapter->phy_lp_able);
6146
6147 Adapter->param_autoneg_cap =
6148 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6149 Adapter->param_pause_cap =
6150 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6151 Adapter->param_asym_pause_cap =
6152 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6153 Adapter->param_1000fdx_cap =
6154 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6155 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6156 Adapter->param_1000hdx_cap =
6157 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6158 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6159 Adapter->param_100t4_cap =
6160 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6161 Adapter->param_100fdx_cap =
6162 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6163 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6164 Adapter->param_100hdx_cap =
6165 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6166 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6167 Adapter->param_10fdx_cap =
6168 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6169 Adapter->param_10hdx_cap =
6170 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6171
6172 Adapter->param_adv_autoneg = hw->mac.autoneg;
6173 Adapter->param_adv_pause =
6174 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6175 Adapter->param_adv_asym_pause =
6176 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6177 Adapter->param_adv_1000hdx =
6178 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6179 Adapter->param_adv_100t4 =
6180 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6181 if (Adapter->param_adv_autoneg == 1) {
6182 Adapter->param_adv_1000fdx =
6183 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6184 ? 1 : 0;
6185 Adapter->param_adv_100fdx =
6186 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6187 ? 1 : 0;
6188 Adapter->param_adv_100hdx =
6189 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6190 ? 1 : 0;
6191 Adapter->param_adv_10fdx =
6192 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6193 Adapter->param_adv_10hdx =
6194 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6195 }
6196
6197 Adapter->param_lp_autoneg =
6198 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6199 Adapter->param_lp_pause =
6200 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6201 Adapter->param_lp_asym_pause =
6202 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6203 Adapter->param_lp_1000fdx =
6204 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6205 Adapter->param_lp_1000hdx =
6206 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6207 Adapter->param_lp_100t4 =
6208 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6209 Adapter->param_lp_100fdx =
6210 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6211 Adapter->param_lp_100hdx =
6212 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6213 Adapter->param_lp_10fdx =
6214 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6215 Adapter->param_lp_10hdx =
6216 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6217 } else {
6218 /*
6219 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6220 * it can only work with 1Gig Full Duplex Link Partner.
6221 */
6222 Adapter->param_autoneg_cap = 0;
6223 Adapter->param_pause_cap = 1;
6224 Adapter->param_asym_pause_cap = 1;
6225 Adapter->param_1000fdx_cap = 1;
6226 Adapter->param_1000hdx_cap = 0;
6227 Adapter->param_100t4_cap = 0;
6228 Adapter->param_100fdx_cap = 0;
6229 Adapter->param_100hdx_cap = 0;
6230 Adapter->param_10fdx_cap = 0;
6231 Adapter->param_10hdx_cap = 0;
6232
6233 Adapter->param_adv_autoneg = 0;
6234 Adapter->param_adv_pause = 1;
6235 Adapter->param_adv_asym_pause = 1;
6236 Adapter->param_adv_1000fdx = 1;
6237 Adapter->param_adv_1000hdx = 0;
6238 Adapter->param_adv_100t4 = 0;
6239 Adapter->param_adv_100fdx = 0;
6240 Adapter->param_adv_100hdx = 0;
6241 Adapter->param_adv_10fdx = 0;
6242 Adapter->param_adv_10hdx = 0;
6243
6244 Adapter->param_lp_autoneg = 0;
6245 Adapter->param_lp_pause = 0;
6246 Adapter->param_lp_asym_pause = 0;
6247 Adapter->param_lp_1000fdx = 0;
6248 Adapter->param_lp_1000hdx = 0;
6249 Adapter->param_lp_100t4 = 0;
6250 Adapter->param_lp_100fdx = 0;
6251 Adapter->param_lp_100hdx = 0;
6252 Adapter->param_lp_10fdx = 0;
6253 Adapter->param_lp_10hdx = 0;
6254 }
6255 }
6256
6257 /*
6258 * FMA support
6259 */
6260
6261 int
e1000g_check_acc_handle(ddi_acc_handle_t handle)6262 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6263 {
6264 ddi_fm_error_t de;
6265
6266 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6267 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6268 return (de.fme_status);
6269 }
6270
6271 int
e1000g_check_dma_handle(ddi_dma_handle_t handle)6272 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6273 {
6274 ddi_fm_error_t de;
6275
6276 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6277 return (de.fme_status);
6278 }
6279
6280 /*
6281 * The IO fault service error handling callback function
6282 */
6283 /* ARGSUSED2 */
6284 static int
e1000g_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)6285 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6286 {
6287 /*
6288 * as the driver can always deal with an error in any dma or
6289 * access handle, we can just return the fme_status value.
6290 */
6291 pci_ereport_post(dip, err, NULL);
6292 return (err->fme_status);
6293 }
6294
6295 static void
e1000g_fm_init(struct e1000g * Adapter)6296 e1000g_fm_init(struct e1000g *Adapter)
6297 {
6298 ddi_iblock_cookie_t iblk;
6299 int fma_dma_flag;
6300
6301 /* Only register with IO Fault Services if we have some capability */
6302 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6303 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6304 } else {
6305 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6306 }
6307
6308 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6309 fma_dma_flag = 1;
6310 } else {
6311 fma_dma_flag = 0;
6312 }
6313
6314 (void) e1000g_set_fma_flags(fma_dma_flag);
6315
6316 if (Adapter->fm_capabilities) {
6317
6318 /* Register capabilities with IO Fault Services */
6319 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6320
6321 /*
6322 * Initialize pci ereport capabilities if ereport capable
6323 */
6324 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6325 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6326 pci_ereport_setup(Adapter->dip);
6327
6328 /*
6329 * Register error callback if error callback capable
6330 */
6331 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6332 ddi_fm_handler_register(Adapter->dip,
6333 e1000g_fm_error_cb, (void*) Adapter);
6334 }
6335 }
6336
6337 static void
e1000g_fm_fini(struct e1000g * Adapter)6338 e1000g_fm_fini(struct e1000g *Adapter)
6339 {
6340 /* Only unregister FMA capabilities if we registered some */
6341 if (Adapter->fm_capabilities) {
6342
6343 /*
6344 * Release any resources allocated by pci_ereport_setup()
6345 */
6346 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6347 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6348 pci_ereport_teardown(Adapter->dip);
6349
6350 /*
6351 * Un-register error callback if error callback capable
6352 */
6353 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6354 ddi_fm_handler_unregister(Adapter->dip);
6355
6356 /* Unregister from IO Fault Services */
6357 mutex_enter(&e1000g_rx_detach_lock);
6358 ddi_fm_fini(Adapter->dip);
6359 if (Adapter->priv_dip != NULL) {
6360 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6361 }
6362 mutex_exit(&e1000g_rx_detach_lock);
6363 }
6364 }
6365
6366 void
e1000g_fm_ereport(struct e1000g * Adapter,char * detail)6367 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6368 {
6369 uint64_t ena;
6370 char buf[FM_MAX_CLASS];
6371
6372 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6373 ena = fm_ena_generate(0, FM_ENA_FMT1);
6374 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6375 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6376 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6377 }
6378 }
6379
6380 /*
6381 * quiesce(9E) entry point.
6382 *
6383 * This function is called when the system is single-threaded at high
6384 * PIL with preemption disabled. Therefore, this function must not be
6385 * blocked.
6386 *
6387 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6388 * DDI_FAILURE indicates an error condition and should almost never happen.
6389 */
6390 static int
e1000g_quiesce(dev_info_t * devinfo)6391 e1000g_quiesce(dev_info_t *devinfo)
6392 {
6393 struct e1000g *Adapter;
6394
6395 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6396
6397 if (Adapter == NULL)
6398 return (DDI_FAILURE);
6399
6400 e1000g_clear_all_interrupts(Adapter);
6401
6402 (void) e1000_reset_hw(&Adapter->shared);
6403
6404 /* Setup our HW Tx Head & Tail descriptor pointers */
6405 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6406 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6407
6408 /* Setup our HW Rx Head & Tail descriptor pointers */
6409 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6410 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6411
6412 return (DDI_SUCCESS);
6413 }
6414
6415 /*
6416 * synchronize the adv* and en* parameters.
6417 *
6418 * See comments in <sys/dld.h> for details of the *_en_*
6419 * parameters. The usage of ndd for setting adv parameters will
6420 * synchronize all the en parameters with the e1000g parameters,
6421 * implicitly disabling any settings made via dladm.
6422 */
6423 static void
e1000g_param_sync(struct e1000g * Adapter)6424 e1000g_param_sync(struct e1000g *Adapter)
6425 {
6426 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6427 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6428 Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6429 Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6430 Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6431 Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6432 }
6433
6434 /*
6435 * e1000g_get_driver_control - tell manageability firmware that the driver
6436 * has control.
6437 */
6438 static void
e1000g_get_driver_control(struct e1000_hw * hw)6439 e1000g_get_driver_control(struct e1000_hw *hw)
6440 {
6441 uint32_t ctrl_ext;
6442 uint32_t swsm;
6443
6444 /* tell manageability firmware the driver has taken over */
6445 switch (hw->mac.type) {
6446 case e1000_82573:
6447 swsm = E1000_READ_REG(hw, E1000_SWSM);
6448 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6449 break;
6450 case e1000_82571:
6451 case e1000_82572:
6452 case e1000_82574:
6453 case e1000_80003es2lan:
6454 case e1000_ich8lan:
6455 case e1000_ich9lan:
6456 case e1000_ich10lan:
6457 case e1000_pchlan:
6458 case e1000_pch2lan:
6459 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6460 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6461 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6462 break;
6463 default:
6464 /* no manageability firmware: do nothing */
6465 break;
6466 }
6467 }
6468
6469 /*
6470 * e1000g_release_driver_control - tell manageability firmware that the driver
6471 * has released control.
6472 */
6473 static void
e1000g_release_driver_control(struct e1000_hw * hw)6474 e1000g_release_driver_control(struct e1000_hw *hw)
6475 {
6476 uint32_t ctrl_ext;
6477 uint32_t swsm;
6478
6479 /* tell manageability firmware the driver has released control */
6480 switch (hw->mac.type) {
6481 case e1000_82573:
6482 swsm = E1000_READ_REG(hw, E1000_SWSM);
6483 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6484 break;
6485 case e1000_82571:
6486 case e1000_82572:
6487 case e1000_82574:
6488 case e1000_80003es2lan:
6489 case e1000_ich8lan:
6490 case e1000_ich9lan:
6491 case e1000_ich10lan:
6492 case e1000_pchlan:
6493 case e1000_pch2lan:
6494 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6495 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6496 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6497 break;
6498 default:
6499 /* no manageability firmware: do nothing */
6500 break;
6501 }
6502 }
6503
6504 /*
6505 * Restore e1000g promiscuous mode.
6506 */
6507 static void
e1000g_restore_promisc(struct e1000g * Adapter)6508 e1000g_restore_promisc(struct e1000g *Adapter)
6509 {
6510 if (Adapter->e1000g_promisc) {
6511 uint32_t rctl;
6512
6513 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6514 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6515 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6516 }
6517 }
6518