1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010-2013, by Broadcom, Inc.
24 * All Rights Reserved.
25 */
26
27 /*
28 * Copyright (c) 2002, 2010, Oracle and/or its affiliates.
29 * All rights reserved.
30 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
31 */
32
33 #include "bge_impl.h"
34 #include <sys/sdt.h>
35 #include <sys/mac_provider.h>
36 #include <sys/mac.h>
37 #include <sys/mac_flow.h>
38
39
40 #ifndef STRINGIFY
41 #define XSTRINGIFY(x) #x
42 #define STRINGIFY(x) XSTRINGIFY(x)
43 #endif
44
45 /*
46 * This is the string displayed by modinfo, etc.
47 */
48 static char bge_ident[] = "Broadcom Gb Ethernet";
49
50 /*
51 * Property names
52 */
53 static char debug_propname[] = "bge-debug-flags";
54 static char clsize_propname[] = "cache-line-size";
55 static char latency_propname[] = "latency-timer";
56 static char localmac_boolname[] = "local-mac-address?";
57 static char localmac_propname[] = "local-mac-address";
58 static char macaddr_propname[] = "mac-address";
59 static char subdev_propname[] = "subsystem-id";
60 static char subven_propname[] = "subsystem-vendor-id";
61 static char rxrings_propname[] = "bge-rx-rings";
62 static char txrings_propname[] = "bge-tx-rings";
63 static char eee_propname[] = "bge-eee";
64 static char fm_cap[] = "fm-capable";
65 static char default_mtu[] = "default_mtu";
66
67 static int bge_add_intrs(bge_t *, int);
68 static void bge_rem_intrs(bge_t *);
69 static int bge_unicst_set(void *, const uint8_t *, int);
70 static int bge_addmac(void *, const uint8_t *);
71 static int bge_remmac(void *, const uint8_t *);
72
73 /*
74 * Describes the chip's DMA engine
75 */
76 static ddi_dma_attr_t dma_attr = {
77 DMA_ATTR_V0, /* dma_attr_version */
78 0x0000000000000000ull, /* dma_attr_addr_lo */
79 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
80 0x00000000FFFFFFFFull, /* dma_attr_count_max */
81 0x0000000000000001ull, /* dma_attr_align */
82 0x00000FFF, /* dma_attr_burstsizes */
83 0x00000001, /* dma_attr_minxfer */
84 0x000000000000FFFFull, /* dma_attr_maxxfer */
85 0x00000000FFFFFFFFull, /* dma_attr_seg */
86 1, /* dma_attr_sgllen */
87 0x00000001, /* dma_attr_granular */
88 DDI_DMA_FLAGERR /* dma_attr_flags */
89 };
90
91 /*
92 * PIO access attributes for registers
93 */
94 static ddi_device_acc_attr_t bge_reg_accattr = {
95 DDI_DEVICE_ATTR_V1,
96 DDI_NEVERSWAP_ACC,
97 DDI_STRICTORDER_ACC,
98 DDI_FLAGERR_ACC
99 };
100
101 /*
102 * DMA access attributes for descriptors: NOT to be byte swapped.
103 */
104 static ddi_device_acc_attr_t bge_desc_accattr = {
105 DDI_DEVICE_ATTR_V0,
106 DDI_NEVERSWAP_ACC,
107 DDI_STRICTORDER_ACC
108 };
109
110 /*
111 * DMA access attributes for data: NOT to be byte swapped.
112 */
113 static ddi_device_acc_attr_t bge_data_accattr = {
114 DDI_DEVICE_ATTR_V0,
115 DDI_NEVERSWAP_ACC,
116 DDI_STRICTORDER_ACC
117 };
118
119 static int bge_m_start(void *);
120 static void bge_m_stop(void *);
121 static int bge_m_promisc(void *, boolean_t);
122 static int bge_m_multicst(void *, boolean_t, const uint8_t *);
123 static void bge_m_ioctl(void *, queue_t *, mblk_t *);
124 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *);
125 static int bge_unicst_set(void *, const uint8_t *,
126 int);
127 static int bge_m_setprop(void *, const char *, mac_prop_id_t,
128 uint_t, const void *);
129 static int bge_m_getprop(void *, const char *, mac_prop_id_t,
130 uint_t, void *);
131 static void bge_m_propinfo(void *, const char *, mac_prop_id_t,
132 mac_prop_info_handle_t);
133 static int bge_set_priv_prop(bge_t *, const char *, uint_t,
134 const void *);
135 static int bge_get_priv_prop(bge_t *, const char *, uint_t,
136 void *);
137 static void bge_priv_propinfo(const char *,
138 mac_prop_info_handle_t);
139
140 static mac_callbacks_t bge_m_callbacks = {
141 .mc_callbacks = MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO |
142 MC_GETCAPAB,
143 .mc_getstat = bge_m_stat,
144 .mc_start = bge_m_start,
145 .mc_stop = bge_m_stop,
146 .mc_setpromisc = bge_m_promisc,
147 .mc_multicst = bge_m_multicst,
148 .mc_tx = bge_m_tx,
149 .mc_ioctl = bge_m_ioctl,
150 .mc_getcapab = bge_m_getcapab,
151 .mc_setprop = bge_m_setprop,
152 .mc_getprop = bge_m_getprop,
153 .mc_propinfo = bge_m_propinfo
154 };
155
156 char *bge_priv_prop[] = {
157 "_adv_asym_pause_cap",
158 "_adv_pause_cap",
159 "_drain_max",
160 "_msi_cnt",
161 "_rx_intr_coalesce_blank_time",
162 "_tx_intr_coalesce_blank_time",
163 "_rx_intr_coalesce_pkt_cnt",
164 "_tx_intr_coalesce_pkt_cnt",
165 NULL
166 };
167
168 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0};
169 /*
170 * ========== Transmit and receive ring reinitialisation ==========
171 */
172
173 /*
174 * These <reinit> routines each reset the specified ring to an initial
175 * state, assuming that the corresponding <init> routine has already
176 * been called exactly once.
177 */
178
179 static void
bge_reinit_send_ring(send_ring_t * srp)180 bge_reinit_send_ring(send_ring_t *srp)
181 {
182 bge_queue_t *txbuf_queue;
183 bge_queue_item_t *txbuf_head;
184 sw_txbuf_t *txbuf;
185 sw_sbd_t *ssbdp;
186 uint32_t slot;
187
188 /*
189 * Reinitialise control variables ...
190 */
191 srp->tx_flow = 0;
192 srp->tx_next = 0;
193 srp->txfill_next = 0;
194 srp->tx_free = srp->desc.nslots;
195 ASSERT(mutex_owned(srp->tc_lock));
196 srp->tc_next = 0;
197 srp->txpkt_next = 0;
198 srp->tx_block = 0;
199 srp->tx_nobd = 0;
200 srp->tx_nobuf = 0;
201
202 /*
203 * Initialize the tx buffer push queue
204 */
205 mutex_enter(srp->freetxbuf_lock);
206 mutex_enter(srp->txbuf_lock);
207 txbuf_queue = &srp->freetxbuf_queue;
208 txbuf_queue->head = NULL;
209 txbuf_queue->count = 0;
210 txbuf_queue->lock = srp->freetxbuf_lock;
211 srp->txbuf_push_queue = txbuf_queue;
212
213 /*
214 * Initialize the tx buffer pop queue
215 */
216 txbuf_queue = &srp->txbuf_queue;
217 txbuf_queue->head = NULL;
218 txbuf_queue->count = 0;
219 txbuf_queue->lock = srp->txbuf_lock;
220 srp->txbuf_pop_queue = txbuf_queue;
221 txbuf_head = srp->txbuf_head;
222 txbuf = srp->txbuf;
223 for (slot = 0; slot < srp->tx_buffers; ++slot) {
224 txbuf_head->item = txbuf;
225 txbuf_head->next = txbuf_queue->head;
226 txbuf_queue->head = txbuf_head;
227 txbuf_queue->count++;
228 txbuf++;
229 txbuf_head++;
230 }
231 mutex_exit(srp->txbuf_lock);
232 mutex_exit(srp->freetxbuf_lock);
233
234 /*
235 * Zero and sync all the h/w Send Buffer Descriptors
236 */
237 DMA_ZERO(srp->desc);
238 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
239 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
240 ssbdp = srp->sw_sbds;
241 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot)
242 ssbdp->pbuf = NULL;
243 }
244
245 static void
bge_reinit_recv_ring(recv_ring_t * rrp)246 bge_reinit_recv_ring(recv_ring_t *rrp)
247 {
248 /*
249 * Reinitialise control variables ...
250 */
251 rrp->rx_next = 0;
252 }
253
254 static void
bge_reinit_buff_ring(buff_ring_t * brp,uint32_t ring)255 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring)
256 {
257 bge_rbd_t *hw_rbd_p;
258 sw_rbd_t *srbdp;
259 uint32_t bufsize;
260 uint32_t nslots;
261 uint32_t slot;
262
263 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = {
264 RBD_FLAG_STD_RING,
265 RBD_FLAG_JUMBO_RING,
266 RBD_FLAG_MINI_RING
267 };
268
269 /*
270 * Zero, initialise and sync all the h/w Receive Buffer Descriptors
271 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>,
272 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>)
273 * should be zeroed, and so don't need to be set up specifically
274 * once the whole area has been cleared.
275 */
276 DMA_ZERO(brp->desc);
277
278 hw_rbd_p = DMA_VPTR(brp->desc);
279 nslots = brp->desc.nslots;
280 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
281 bufsize = brp->buf[0].size;
282 srbdp = brp->sw_rbds;
283 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) {
284 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress;
285 hw_rbd_p->index = (uint16_t)slot;
286 hw_rbd_p->len = (uint16_t)bufsize;
287 hw_rbd_p->opaque = srbdp->pbuf.token;
288 hw_rbd_p->flags |= ring_type_flag[ring];
289 }
290
291 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV);
292
293 /*
294 * Finally, reinitialise the ring control variables ...
295 */
296 brp->rf_next = (nslots != 0) ? (nslots-1) : 0;
297 }
298
299 /*
300 * Reinitialize all rings
301 */
302 static void
bge_reinit_rings(bge_t * bgep)303 bge_reinit_rings(bge_t *bgep)
304 {
305 uint32_t ring;
306
307 ASSERT(mutex_owned(bgep->genlock));
308
309 /*
310 * Send Rings ...
311 */
312 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring)
313 bge_reinit_send_ring(&bgep->send[ring]);
314
315 /*
316 * Receive Return Rings ...
317 */
318 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring)
319 bge_reinit_recv_ring(&bgep->recv[ring]);
320
321 /*
322 * Receive Producer Rings ...
323 */
324 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
325 bge_reinit_buff_ring(&bgep->buff[ring], ring);
326 }
327
328 /*
329 * ========== Internal state management entry points ==========
330 */
331
332 #undef BGE_DBG
333 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */
334
335 /*
336 * These routines provide all the functionality required by the
337 * corresponding GLD entry points, but don't update the GLD state
338 * so they can be called internally without disturbing our record
339 * of what GLD thinks we should be doing ...
340 */
341
342 /*
343 * bge_reset() -- reset h/w & rings to initial state
344 */
345 static int
346 #ifdef BGE_IPMI_ASF
bge_reset(bge_t * bgep,uint_t asf_mode)347 bge_reset(bge_t *bgep, uint_t asf_mode)
348 #else
349 bge_reset(bge_t *bgep)
350 #endif
351 {
352 uint32_t ring;
353 int retval;
354
355 BGE_TRACE(("bge_reset($%p)", (void *)bgep));
356
357 ASSERT(mutex_owned(bgep->genlock));
358
359 /*
360 * Grab all the other mutexes in the world (this should
361 * ensure no other threads are manipulating driver state)
362 */
363 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
364 mutex_enter(bgep->recv[ring].rx_lock);
365 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
366 mutex_enter(bgep->buff[ring].rf_lock);
367 rw_enter(bgep->errlock, RW_WRITER);
368 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
369 mutex_enter(bgep->send[ring].tx_lock);
370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
371 mutex_enter(bgep->send[ring].tc_lock);
372
373 #ifdef BGE_IPMI_ASF
374 retval = bge_chip_reset(bgep, B_TRUE, asf_mode);
375 #else
376 retval = bge_chip_reset(bgep, B_TRUE);
377 #endif
378 bge_reinit_rings(bgep);
379
380 /*
381 * Free the world ...
382 */
383 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; )
384 mutex_exit(bgep->send[ring].tc_lock);
385 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
386 mutex_exit(bgep->send[ring].tx_lock);
387 rw_exit(bgep->errlock);
388 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; )
389 mutex_exit(bgep->buff[ring].rf_lock);
390 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; )
391 mutex_exit(bgep->recv[ring].rx_lock);
392
393 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep));
394 return (retval);
395 }
396
397 /*
398 * bge_stop() -- stop processing, don't reset h/w or rings
399 */
400 static void
bge_stop(bge_t * bgep)401 bge_stop(bge_t *bgep)
402 {
403 BGE_TRACE(("bge_stop($%p)", (void *)bgep));
404
405 ASSERT(mutex_owned(bgep->genlock));
406
407 #ifdef BGE_IPMI_ASF
408 if (bgep->asf_enabled) {
409 bgep->asf_pseudostop = B_TRUE;
410 } else {
411 #endif
412 bge_chip_stop(bgep, B_FALSE);
413 #ifdef BGE_IPMI_ASF
414 }
415 #endif
416
417 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep));
418 }
419
420 /*
421 * bge_start() -- start transmitting/receiving
422 */
423 static int
bge_start(bge_t * bgep,boolean_t reset_phys)424 bge_start(bge_t *bgep, boolean_t reset_phys)
425 {
426 int retval;
427
428 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys));
429
430 ASSERT(mutex_owned(bgep->genlock));
431
432 /*
433 * Start chip processing, including enabling interrupts
434 */
435 retval = bge_chip_start(bgep, reset_phys);
436
437 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys));
438 return (retval);
439 }
440
441 /*
442 * bge_restart - restart transmitting/receiving after error or suspend
443 */
444 int
bge_restart(bge_t * bgep,boolean_t reset_phys)445 bge_restart(bge_t *bgep, boolean_t reset_phys)
446 {
447 int retval = DDI_SUCCESS;
448 ASSERT(mutex_owned(bgep->genlock));
449
450 #ifdef BGE_IPMI_ASF
451 if (bgep->asf_enabled) {
452 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS)
453 retval = DDI_FAILURE;
454 } else
455 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS)
456 retval = DDI_FAILURE;
457 #else
458 if (bge_reset(bgep) != DDI_SUCCESS)
459 retval = DDI_FAILURE;
460 #endif
461 if (bgep->bge_mac_state == BGE_MAC_STARTED) {
462 if (bge_start(bgep, reset_phys) != DDI_SUCCESS)
463 retval = DDI_FAILURE;
464 bgep->watchdog = 0;
465 ddi_trigger_softintr(bgep->drain_id);
466 }
467
468 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys));
469 return (retval);
470 }
471
472
473 /*
474 * ========== Nemo-required management entry points ==========
475 */
476
477 #undef BGE_DBG
478 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */
479
480 /*
481 * bge_m_stop() -- stop transmitting/receiving
482 */
483 static void
bge_m_stop(void * arg)484 bge_m_stop(void *arg)
485 {
486 bge_t *bgep = arg; /* private device info */
487 send_ring_t *srp;
488 uint32_t ring;
489
490 BGE_TRACE(("bge_m_stop($%p)", arg));
491
492 /*
493 * Just stop processing, then record new GLD state
494 */
495 mutex_enter(bgep->genlock);
496 if (!(bgep->progress & PROGRESS_INTR)) {
497 /* can happen during autorecovery */
498 bgep->bge_chip_state = BGE_CHIP_STOPPED;
499 } else
500 bge_stop(bgep);
501
502 bgep->link_state = LINK_STATE_UNKNOWN;
503 mac_link_update(bgep->mh, bgep->link_state);
504
505 /*
506 * Free the possible tx buffers allocated in tx process.
507 */
508 #ifdef BGE_IPMI_ASF
509 if (!bgep->asf_pseudostop)
510 #endif
511 {
512 rw_enter(bgep->errlock, RW_WRITER);
513 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) {
514 srp = &bgep->send[ring];
515 mutex_enter(srp->tx_lock);
516 if (srp->tx_array > 1)
517 bge_free_txbuf_arrays(srp);
518 mutex_exit(srp->tx_lock);
519 }
520 rw_exit(bgep->errlock);
521 }
522 bgep->bge_mac_state = BGE_MAC_STOPPED;
523 BGE_DEBUG(("bge_m_stop($%p) done", arg));
524 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
525 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
526 mutex_exit(bgep->genlock);
527 }
528
529 /*
530 * bge_m_start() -- start transmitting/receiving
531 */
532 static int
bge_m_start(void * arg)533 bge_m_start(void *arg)
534 {
535 bge_t *bgep = arg; /* private device info */
536
537 BGE_TRACE(("bge_m_start($%p)", arg));
538
539 /*
540 * Start processing and record new GLD state
541 */
542 mutex_enter(bgep->genlock);
543 if (!(bgep->progress & PROGRESS_INTR)) {
544 /* can happen during autorecovery */
545 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
546 mutex_exit(bgep->genlock);
547 return (EIO);
548 }
549 #ifdef BGE_IPMI_ASF
550 if (bgep->asf_enabled) {
551 if ((bgep->asf_status == ASF_STAT_RUN) &&
552 (bgep->asf_pseudostop)) {
553 bgep->bge_mac_state = BGE_MAC_STARTED;
554 /* forcing a mac link update here */
555 bge_phys_check(bgep);
556 bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP :
557 LINK_STATE_DOWN;
558 mac_link_update(bgep->mh, bgep->link_state);
559 mutex_exit(bgep->genlock);
560 return (0);
561 }
562 }
563 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
564 #else
565 if (bge_reset(bgep) != DDI_SUCCESS) {
566 #endif
567 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
568 (void) bge_check_acc_handle(bgep, bgep->io_handle);
569 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
570 mutex_exit(bgep->genlock);
571 return (EIO);
572 }
573 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) {
574 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
575 (void) bge_check_acc_handle(bgep, bgep->io_handle);
576 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
577 mutex_exit(bgep->genlock);
578 return (EIO);
579 }
580 bgep->watchdog = 0;
581 bgep->bge_mac_state = BGE_MAC_STARTED;
582 BGE_DEBUG(("bge_m_start($%p) done", arg));
583
584 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
585 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
586 mutex_exit(bgep->genlock);
587 return (EIO);
588 }
589 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
590 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
591 mutex_exit(bgep->genlock);
592 return (EIO);
593 }
594 #ifdef BGE_IPMI_ASF
595 if (bgep->asf_enabled) {
596 if (bgep->asf_status != ASF_STAT_RUN) {
597 /* start ASF heart beat */
598 bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
599 (void *)bgep,
600 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
601 bgep->asf_status = ASF_STAT_RUN;
602 }
603 }
604 #endif
605 mutex_exit(bgep->genlock);
606
607 return (0);
608 }
609
610 /*
611 * bge_unicst_set() -- set the physical network address
612 */
613 static int
614 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot)
615 {
616 bge_t *bgep = arg; /* private device info */
617
618 BGE_TRACE(("bge_unicst_set($%p, %s)", arg,
619 ether_sprintf((void *)macaddr)));
620 /*
621 * Remember the new current address in the driver state
622 * Sync the chip's idea of the address too ...
623 */
624 mutex_enter(bgep->genlock);
625 if (!(bgep->progress & PROGRESS_INTR)) {
626 /* can happen during autorecovery */
627 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
628 mutex_exit(bgep->genlock);
629 return (EIO);
630 }
631 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr);
632 #ifdef BGE_IPMI_ASF
633 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) {
634 #else
635 if (bge_chip_sync(bgep) == DDI_FAILURE) {
636 #endif
637 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
638 (void) bge_check_acc_handle(bgep, bgep->io_handle);
639 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
640 mutex_exit(bgep->genlock);
641 return (EIO);
642 }
643 #ifdef BGE_IPMI_ASF
644 if (bgep->asf_enabled) {
645 /*
646 * The above bge_chip_sync() function wrote the ethernet MAC
647 * addresses registers which destroyed the IPMI/ASF sideband.
648 * Here, we have to reset chip to make IPMI/ASF sideband work.
649 */
650 if (bgep->asf_status == ASF_STAT_RUN) {
651 /*
652 * We must stop ASF heart beat before bge_chip_stop(),
653 * otherwise some computers (ex. IBM HS20 blade server)
654 * may crash.
655 */
656 bge_asf_update_status(bgep);
657 bge_asf_stop_timer(bgep);
658 bgep->asf_status = ASF_STAT_STOP;
659
660 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
661 }
662 bge_chip_stop(bgep, B_FALSE);
663
664 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) {
665 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
666 (void) bge_check_acc_handle(bgep, bgep->io_handle);
667 ddi_fm_service_impact(bgep->devinfo,
668 DDI_SERVICE_DEGRADED);
669 mutex_exit(bgep->genlock);
670 return (EIO);
671 }
672
673 /*
674 * Start our ASF heartbeat counter as soon as possible.
675 */
676 if (bgep->asf_status != ASF_STAT_RUN) {
677 /* start ASF heart beat */
678 bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
679 (void *)bgep,
680 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
681 bgep->asf_status = ASF_STAT_RUN;
682 }
683 }
684 #endif
685 BGE_DEBUG(("bge_unicst_set($%p) done", arg));
686 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
687 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
688 mutex_exit(bgep->genlock);
689 return (EIO);
690 }
691 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
692 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
693 mutex_exit(bgep->genlock);
694 return (EIO);
695 }
696 mutex_exit(bgep->genlock);
697
698 return (0);
699 }
700
701 extern void bge_wake_factotum(bge_t *);
702
703 static boolean_t
704 bge_param_locked(mac_prop_id_t pr_num)
705 {
706 /*
707 * All adv_* parameters are locked (read-only) while
708 * the device is in any sort of loopback mode ...
709 */
710 switch (pr_num) {
711 case MAC_PROP_ADV_1000FDX_CAP:
712 case MAC_PROP_EN_1000FDX_CAP:
713 case MAC_PROP_ADV_1000HDX_CAP:
714 case MAC_PROP_EN_1000HDX_CAP:
715 case MAC_PROP_ADV_100FDX_CAP:
716 case MAC_PROP_EN_100FDX_CAP:
717 case MAC_PROP_ADV_100HDX_CAP:
718 case MAC_PROP_EN_100HDX_CAP:
719 case MAC_PROP_ADV_10FDX_CAP:
720 case MAC_PROP_EN_10FDX_CAP:
721 case MAC_PROP_ADV_10HDX_CAP:
722 case MAC_PROP_EN_10HDX_CAP:
723 case MAC_PROP_AUTONEG:
724 case MAC_PROP_FLOWCTRL:
725 return (B_TRUE);
726 }
727 return (B_FALSE);
728 }
729 /*
730 * callback functions for set/get of properties
731 */
732 static int
733 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
734 uint_t pr_valsize, const void *pr_val)
735 {
736 bge_t *bgep = barg;
737 int err = 0;
738 uint32_t cur_mtu, new_mtu;
739 link_flowctrl_t fl;
740
741 mutex_enter(bgep->genlock);
742 if (bgep->param_loop_mode != BGE_LOOP_NONE &&
743 bge_param_locked(pr_num)) {
744 /*
745 * All adv_* parameters are locked (read-only)
746 * while the device is in any sort of loopback mode.
747 */
748 mutex_exit(bgep->genlock);
749 return (EBUSY);
750 }
751 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
752 ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
753 (pr_num == MAC_PROP_EN_100HDX_CAP) ||
754 (pr_num == MAC_PROP_EN_10FDX_CAP) ||
755 (pr_num == MAC_PROP_EN_10HDX_CAP))) {
756 /*
757 * these properties are read/write on copper,
758 * read-only and 0 on serdes
759 */
760 mutex_exit(bgep->genlock);
761 return (ENOTSUP);
762 }
763 if (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
764 ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
765 (pr_num == MAC_PROP_EN_1000HDX_CAP))) {
766 mutex_exit(bgep->genlock);
767 return (ENOTSUP);
768 }
769
770 switch (pr_num) {
771 case MAC_PROP_EN_1000FDX_CAP:
772 bgep->param_en_1000fdx = *(uint8_t *)pr_val;
773 bgep->param_adv_1000fdx = *(uint8_t *)pr_val;
774 goto reprogram;
775 case MAC_PROP_EN_1000HDX_CAP:
776 bgep->param_en_1000hdx = *(uint8_t *)pr_val;
777 bgep->param_adv_1000hdx = *(uint8_t *)pr_val;
778 goto reprogram;
779 case MAC_PROP_EN_100FDX_CAP:
780 bgep->param_en_100fdx = *(uint8_t *)pr_val;
781 bgep->param_adv_100fdx = *(uint8_t *)pr_val;
782 goto reprogram;
783 case MAC_PROP_EN_100HDX_CAP:
784 bgep->param_en_100hdx = *(uint8_t *)pr_val;
785 bgep->param_adv_100hdx = *(uint8_t *)pr_val;
786 goto reprogram;
787 case MAC_PROP_EN_10FDX_CAP:
788 bgep->param_en_10fdx = *(uint8_t *)pr_val;
789 bgep->param_adv_10fdx = *(uint8_t *)pr_val;
790 goto reprogram;
791 case MAC_PROP_EN_10HDX_CAP:
792 bgep->param_en_10hdx = *(uint8_t *)pr_val;
793 bgep->param_adv_10hdx = *(uint8_t *)pr_val;
794 reprogram:
795 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL)
796 err = EINVAL;
797 break;
798 case MAC_PROP_ADV_1000FDX_CAP:
799 case MAC_PROP_ADV_1000HDX_CAP:
800 case MAC_PROP_ADV_100FDX_CAP:
801 case MAC_PROP_ADV_100HDX_CAP:
802 case MAC_PROP_ADV_10FDX_CAP:
803 case MAC_PROP_ADV_10HDX_CAP:
804 case MAC_PROP_STATUS:
805 case MAC_PROP_SPEED:
806 case MAC_PROP_DUPLEX:
807 case MAC_PROP_MEDIA:
808 err = ENOTSUP; /* read-only prop. Can't set this */
809 break;
810 case MAC_PROP_AUTONEG:
811 bgep->param_adv_autoneg = *(uint8_t *)pr_val;
812 if (bge_reprogram(bgep) == IOC_INVAL)
813 err = EINVAL;
814 break;
815 case MAC_PROP_MTU:
816 cur_mtu = bgep->chipid.default_mtu;
817 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
818
819 if (new_mtu == cur_mtu) {
820 err = 0;
821 break;
822 }
823 if (new_mtu < BGE_DEFAULT_MTU ||
824 new_mtu > BGE_MAXIMUM_MTU) {
825 err = EINVAL;
826 break;
827 }
828 if ((new_mtu > BGE_DEFAULT_MTU) &&
829 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) {
830 err = EINVAL;
831 break;
832 }
833 if (bgep->bge_mac_state == BGE_MAC_STARTED) {
834 err = EBUSY;
835 break;
836 }
837 bgep->chipid.default_mtu = new_mtu;
838 if (bge_chip_id_init(bgep)) {
839 err = EINVAL;
840 break;
841 }
842 bgep->bge_dma_error = B_TRUE;
843 bgep->manual_reset = B_TRUE;
844 bge_chip_stop(bgep, B_TRUE);
845 bge_wake_factotum(bgep);
846 err = 0;
847 break;
848 case MAC_PROP_FLOWCTRL:
849 bcopy(pr_val, &fl, sizeof (fl));
850 switch (fl) {
851 default:
852 err = ENOTSUP;
853 break;
854 case LINK_FLOWCTRL_NONE:
855 bgep->param_adv_pause = 0;
856 bgep->param_adv_asym_pause = 0;
857
858 bgep->param_link_rx_pause = B_FALSE;
859 bgep->param_link_tx_pause = B_FALSE;
860 break;
861 case LINK_FLOWCTRL_RX:
862 bgep->param_adv_pause = 1;
863 bgep->param_adv_asym_pause = 1;
864
865 bgep->param_link_rx_pause = B_TRUE;
866 bgep->param_link_tx_pause = B_FALSE;
867 break;
868 case LINK_FLOWCTRL_TX:
869 bgep->param_adv_pause = 0;
870 bgep->param_adv_asym_pause = 1;
871
872 bgep->param_link_rx_pause = B_FALSE;
873 bgep->param_link_tx_pause = B_TRUE;
874 break;
875 case LINK_FLOWCTRL_BI:
876 bgep->param_adv_pause = 1;
877 bgep->param_adv_asym_pause = 0;
878
879 bgep->param_link_rx_pause = B_TRUE;
880 bgep->param_link_tx_pause = B_TRUE;
881 break;
882 }
883
884 if (err == 0) {
885 if (bge_reprogram(bgep) == IOC_INVAL)
886 err = EINVAL;
887 }
888
889 break;
890 case MAC_PROP_PRIVATE:
891 err = bge_set_priv_prop(bgep, pr_name, pr_valsize,
892 pr_val);
893 break;
894 default:
895 err = ENOTSUP;
896 break;
897 }
898 mutex_exit(bgep->genlock);
899 return (err);
900 }
901
902 /* ARGSUSED */
903 static int
904 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
905 uint_t pr_valsize, void *pr_val)
906 {
907 bge_t *bgep = barg;
908 int err = 0;
909
910 switch (pr_num) {
911 case MAC_PROP_DUPLEX:
912 ASSERT(pr_valsize >= sizeof (link_duplex_t));
913 bcopy(&bgep->param_link_duplex, pr_val,
914 sizeof (link_duplex_t));
915 break;
916 case MAC_PROP_SPEED: {
917 uint64_t speed = bgep->param_link_speed * 1000000ull;
918
919 ASSERT(pr_valsize >= sizeof (speed));
920 bcopy(&speed, pr_val, sizeof (speed));
921 break;
922 }
923 case MAC_PROP_STATUS:
924 ASSERT(pr_valsize >= sizeof (link_state_t));
925 bcopy(&bgep->link_state, pr_val,
926 sizeof (link_state_t));
927 break;
928 case MAC_PROP_MEDIA: {
929 mac_ether_media_t media;
930
931 ASSERT(pr_valsize >= sizeof (mac_ether_media_t));
932 media = bge_phys_media(bgep);
933 bcopy(&media, pr_val, sizeof (mac_ether_media_t));
934 break;
935 }
936 case MAC_PROP_AUTONEG:
937 *(uint8_t *)pr_val = bgep->param_adv_autoneg;
938 break;
939 case MAC_PROP_FLOWCTRL: {
940 link_flowctrl_t fl;
941
942 ASSERT(pr_valsize >= sizeof (fl));
943
944 if (bgep->param_link_rx_pause &&
945 !bgep->param_link_tx_pause)
946 fl = LINK_FLOWCTRL_RX;
947
948 if (!bgep->param_link_rx_pause &&
949 !bgep->param_link_tx_pause)
950 fl = LINK_FLOWCTRL_NONE;
951
952 if (!bgep->param_link_rx_pause &&
953 bgep->param_link_tx_pause)
954 fl = LINK_FLOWCTRL_TX;
955
956 if (bgep->param_link_rx_pause &&
957 bgep->param_link_tx_pause)
958 fl = LINK_FLOWCTRL_BI;
959 bcopy(&fl, pr_val, sizeof (fl));
960 break;
961 }
962 case MAC_PROP_ADV_1000FDX_CAP:
963 *(uint8_t *)pr_val = bgep->param_adv_1000fdx;
964 break;
965 case MAC_PROP_EN_1000FDX_CAP:
966 *(uint8_t *)pr_val = bgep->param_en_1000fdx;
967 break;
968 case MAC_PROP_ADV_1000HDX_CAP:
969 *(uint8_t *)pr_val = bgep->param_adv_1000hdx;
970 break;
971 case MAC_PROP_EN_1000HDX_CAP:
972 *(uint8_t *)pr_val = bgep->param_en_1000hdx;
973 break;
974 case MAC_PROP_ADV_100FDX_CAP:
975 *(uint8_t *)pr_val = bgep->param_adv_100fdx;
976 break;
977 case MAC_PROP_EN_100FDX_CAP:
978 *(uint8_t *)pr_val = bgep->param_en_100fdx;
979 break;
980 case MAC_PROP_ADV_100HDX_CAP:
981 *(uint8_t *)pr_val = bgep->param_adv_100hdx;
982 break;
983 case MAC_PROP_EN_100HDX_CAP:
984 *(uint8_t *)pr_val = bgep->param_en_100hdx;
985 break;
986 case MAC_PROP_ADV_10FDX_CAP:
987 *(uint8_t *)pr_val = bgep->param_adv_10fdx;
988 break;
989 case MAC_PROP_EN_10FDX_CAP:
990 *(uint8_t *)pr_val = bgep->param_en_10fdx;
991 break;
992 case MAC_PROP_ADV_10HDX_CAP:
993 *(uint8_t *)pr_val = bgep->param_adv_10hdx;
994 break;
995 case MAC_PROP_EN_10HDX_CAP:
996 *(uint8_t *)pr_val = bgep->param_en_10hdx;
997 break;
998 case MAC_PROP_ADV_100T4_CAP:
999 case MAC_PROP_EN_100T4_CAP:
1000 *(uint8_t *)pr_val = 0;
1001 break;
1002 case MAC_PROP_PRIVATE:
1003 err = bge_get_priv_prop(bgep, pr_name,
1004 pr_valsize, pr_val);
1005 return (err);
1006 default:
1007 return (ENOTSUP);
1008 }
1009 return (0);
1010 }
1011
1012 static void
1013 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
1014 mac_prop_info_handle_t prh)
1015 {
1016 bge_t *bgep = barg;
1017 int flags = bgep->chipid.flags;
1018
1019 /*
1020 * By default permissions are read/write unless specified
1021 * otherwise by the driver.
1022 */
1023
1024 switch (pr_num) {
1025 case MAC_PROP_DUPLEX:
1026 case MAC_PROP_SPEED:
1027 case MAC_PROP_STATUS:
1028 case MAC_PROP_ADV_1000FDX_CAP:
1029 case MAC_PROP_ADV_1000HDX_CAP:
1030 case MAC_PROP_ADV_100FDX_CAP:
1031 case MAC_PROP_ADV_100HDX_CAP:
1032 case MAC_PROP_ADV_10FDX_CAP:
1033 case MAC_PROP_ADV_10HDX_CAP:
1034 case MAC_PROP_ADV_100T4_CAP:
1035 case MAC_PROP_EN_100T4_CAP:
1036 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1037 break;
1038
1039 case MAC_PROP_EN_1000FDX_CAP:
1040 case MAC_PROP_EN_1000HDX_CAP:
1041 if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1042 mac_prop_info_set_default_uint8(prh, 0);
1043 else
1044 mac_prop_info_set_default_uint8(prh, 1);
1045 break;
1046
1047 case MAC_PROP_EN_100FDX_CAP:
1048 case MAC_PROP_EN_100HDX_CAP:
1049 case MAC_PROP_EN_10FDX_CAP:
1050 case MAC_PROP_EN_10HDX_CAP:
1051 mac_prop_info_set_default_uint8(prh,
1052 (flags & CHIP_FLAG_SERDES) ? 0 : 1);
1053 break;
1054
1055 case MAC_PROP_AUTONEG:
1056 mac_prop_info_set_default_uint8(prh, 1);
1057 break;
1058
1059 case MAC_PROP_FLOWCTRL:
1060 mac_prop_info_set_default_link_flowctrl(prh,
1061 LINK_FLOWCTRL_BI);
1062 break;
1063
1064 case MAC_PROP_MTU:
1065 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU,
1066 (flags & CHIP_FLAG_NO_JUMBO) ?
1067 BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU);
1068 break;
1069
1070 case MAC_PROP_PRIVATE:
1071 bge_priv_propinfo(pr_name, prh);
1072 break;
1073 }
1074
1075 mutex_enter(bgep->genlock);
1076 if ((bgep->param_loop_mode != BGE_LOOP_NONE &&
1077 bge_param_locked(pr_num)) ||
1078 ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
1079 ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
1080 (pr_num == MAC_PROP_EN_100HDX_CAP) ||
1081 (pr_num == MAC_PROP_EN_10FDX_CAP) ||
1082 (pr_num == MAC_PROP_EN_10HDX_CAP))) ||
1083 (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
1084 ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
1085 (pr_num == MAC_PROP_EN_1000HDX_CAP))))
1086 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1087 mutex_exit(bgep->genlock);
1088 }
1089
1090 /* ARGSUSED */
1091 static int
1092 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize,
1093 const void *pr_val)
1094 {
1095 int err = 0;
1096 long result;
1097
1098 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1099 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1100 if (result > 1 || result < 0) {
1101 err = EINVAL;
1102 } else {
1103 bgep->param_adv_pause = (uint32_t)result;
1104 if (bge_reprogram(bgep) == IOC_INVAL)
1105 err = EINVAL;
1106 }
1107 return (err);
1108 }
1109 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1110 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1111 if (result > 1 || result < 0) {
1112 err = EINVAL;
1113 } else {
1114 bgep->param_adv_asym_pause = (uint32_t)result;
1115 if (bge_reprogram(bgep) == IOC_INVAL)
1116 err = EINVAL;
1117 }
1118 return (err);
1119 }
1120 if (strcmp(pr_name, "_drain_max") == 0) {
1121
1122 /*
1123 * on the Tx side, we need to update the h/w register for
1124 * real packet transmission per packet. The drain_max parameter
1125 * is used to reduce the register access. This parameter
1126 * controls the max number of packets that we will hold before
1127 * updating the bge h/w to trigger h/w transmit. The bge
1128 * chipset usually has a max of 512 Tx descriptors, thus
1129 * the upper bound on drain_max is 512.
1130 */
1131 if (pr_val == NULL) {
1132 err = EINVAL;
1133 return (err);
1134 }
1135 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1136 if (result > 512 || result < 1)
1137 err = EINVAL;
1138 else {
1139 bgep->param_drain_max = (uint32_t)result;
1140 if (bge_reprogram(bgep) == IOC_INVAL)
1141 err = EINVAL;
1142 }
1143 return (err);
1144 }
1145 if (strcmp(pr_name, "_msi_cnt") == 0) {
1146
1147 if (pr_val == NULL) {
1148 err = EINVAL;
1149 return (err);
1150 }
1151 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1152 if (result > 7 || result < 0)
1153 err = EINVAL;
1154 else {
1155 bgep->param_msi_cnt = (uint32_t)result;
1156 if (bge_reprogram(bgep) == IOC_INVAL)
1157 err = EINVAL;
1158 }
1159 return (err);
1160 }
1161 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) {
1162 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1163 return (EINVAL);
1164 if (result < 0)
1165 err = EINVAL;
1166 else {
1167 bgep->chipid.rx_ticks_norm = (uint32_t)result;
1168 bge_chip_coalesce_update(bgep);
1169 }
1170 return (err);
1171 }
1172
1173 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) {
1174 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1175 return (EINVAL);
1176
1177 if (result < 0)
1178 err = EINVAL;
1179 else {
1180 bgep->chipid.rx_count_norm = (uint32_t)result;
1181 bge_chip_coalesce_update(bgep);
1182 }
1183 return (err);
1184 }
1185 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) {
1186 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1187 return (EINVAL);
1188 if (result < 0)
1189 err = EINVAL;
1190 else {
1191 bgep->chipid.tx_ticks_norm = (uint32_t)result;
1192 bge_chip_coalesce_update(bgep);
1193 }
1194 return (err);
1195 }
1196
1197 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) {
1198 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1199 return (EINVAL);
1200
1201 if (result < 0)
1202 err = EINVAL;
1203 else {
1204 bgep->chipid.tx_count_norm = (uint32_t)result;
1205 bge_chip_coalesce_update(bgep);
1206 }
1207 return (err);
1208 }
1209 return (ENOTSUP);
1210 }
1211
1212 static int
1213 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize,
1214 void *pr_val)
1215 {
1216 int value;
1217
1218 if (strcmp(pr_name, "_adv_pause_cap") == 0)
1219 value = bge->param_adv_pause;
1220 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1221 value = bge->param_adv_asym_pause;
1222 else if (strcmp(pr_name, "_drain_max") == 0)
1223 value = bge->param_drain_max;
1224 else if (strcmp(pr_name, "_msi_cnt") == 0)
1225 value = bge->param_msi_cnt;
1226 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1227 value = bge->chipid.rx_ticks_norm;
1228 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1229 value = bge->chipid.tx_ticks_norm;
1230 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1231 value = bge->chipid.rx_count_norm;
1232 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1233 value = bge->chipid.tx_count_norm;
1234 else
1235 return (ENOTSUP);
1236
1237 (void) snprintf(pr_val, pr_valsize, "%d", value);
1238 return (0);
1239 }
1240
1241 static void
1242 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph)
1243 {
1244 char valstr[64];
1245 int value;
1246
1247 if (strcmp(pr_name, "_adv_pause_cap") == 0)
1248 value = 1;
1249 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1250 value = 1;
1251 else if (strcmp(pr_name, "_drain_max") == 0)
1252 value = 64;
1253 else if (strcmp(pr_name, "_msi_cnt") == 0)
1254 value = 0;
1255 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1256 value = bge_rx_ticks_norm;
1257 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1258 value = bge_tx_ticks_norm;
1259 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1260 value = bge_rx_count_norm;
1261 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1262 value = bge_tx_count_norm;
1263 else
1264 return;
1265
1266 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1267 mac_prop_info_set_default_str(mph, valstr);
1268 }
1269
1270 /*
1271 * Compute the index of the required bit in the multicast hash map.
1272 * This must mirror the way the hardware actually does it!
1273 * See Broadcom document 570X-PG102-R page 125.
1274 */
1275 static uint32_t
1276 bge_hash_index(const uint8_t *mca)
1277 {
1278 uint32_t hash;
1279
1280 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table);
1281
1282 return (hash);
1283 }
1284
1285 /*
1286 * bge_m_multicst_add() -- enable/disable a multicast address
1287 */
1288 static int
1289 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1290 {
1291 bge_t *bgep = arg; /* private device info */
1292 uint32_t hash;
1293 uint32_t index;
1294 uint32_t word;
1295 uint32_t bit;
1296 uint8_t *refp;
1297
1298 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg,
1299 (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1300
1301 /*
1302 * Precalculate all required masks, pointers etc ...
1303 */
1304 hash = bge_hash_index(mca);
1305 index = hash % BGE_HASH_TABLE_SIZE;
1306 word = index/32u;
1307 bit = 1 << (index % 32u);
1308 refp = &bgep->mcast_refs[index];
1309
1310 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d",
1311 hash, index, word, bit, *refp));
1312
1313 /*
1314 * We must set the appropriate bit in the hash map (and the
1315 * corresponding h/w register) when the refcount goes from 0
1316 * to >0, and clear it when the last ref goes away (refcount
1317 * goes from >0 back to 0). If we change the hash map, we
1318 * must also update the chip's hardware map registers.
1319 */
1320 mutex_enter(bgep->genlock);
1321 if (!(bgep->progress & PROGRESS_INTR)) {
1322 /* can happen during autorecovery */
1323 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1324 mutex_exit(bgep->genlock);
1325 return (EIO);
1326 }
1327 if (add) {
1328 if ((*refp)++ == 0) {
1329 bgep->mcast_hash[word] |= bit;
1330 #ifdef BGE_IPMI_ASF
1331 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1332 #else
1333 if (bge_chip_sync(bgep) == DDI_FAILURE) {
1334 #endif
1335 (void) bge_check_acc_handle(bgep,
1336 bgep->cfg_handle);
1337 (void) bge_check_acc_handle(bgep,
1338 bgep->io_handle);
1339 ddi_fm_service_impact(bgep->devinfo,
1340 DDI_SERVICE_DEGRADED);
1341 mutex_exit(bgep->genlock);
1342 return (EIO);
1343 }
1344 }
1345 } else {
1346 if (--(*refp) == 0) {
1347 bgep->mcast_hash[word] &= ~bit;
1348 #ifdef BGE_IPMI_ASF
1349 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1350 #else
1351 if (bge_chip_sync(bgep) == DDI_FAILURE) {
1352 #endif
1353 (void) bge_check_acc_handle(bgep,
1354 bgep->cfg_handle);
1355 (void) bge_check_acc_handle(bgep,
1356 bgep->io_handle);
1357 ddi_fm_service_impact(bgep->devinfo,
1358 DDI_SERVICE_DEGRADED);
1359 mutex_exit(bgep->genlock);
1360 return (EIO);
1361 }
1362 }
1363 }
1364 BGE_DEBUG(("bge_m_multicst($%p) done", arg));
1365 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1366 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1367 mutex_exit(bgep->genlock);
1368 return (EIO);
1369 }
1370 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1371 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1372 mutex_exit(bgep->genlock);
1373 return (EIO);
1374 }
1375 mutex_exit(bgep->genlock);
1376
1377 return (0);
1378 }
1379
1380 /*
1381 * bge_m_promisc() -- set or reset promiscuous mode on the board
1382 *
1383 * Program the hardware to enable/disable promiscuous and/or
1384 * receive-all-multicast modes.
1385 */
1386 static int
1387 bge_m_promisc(void *arg, boolean_t on)
1388 {
1389 bge_t *bgep = arg;
1390
1391 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on));
1392
1393 /*
1394 * Store MAC layer specified mode and pass to chip layer to update h/w
1395 */
1396 mutex_enter(bgep->genlock);
1397 if (!(bgep->progress & PROGRESS_INTR)) {
1398 /* can happen during autorecovery */
1399 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1400 mutex_exit(bgep->genlock);
1401 return (EIO);
1402 }
1403 bgep->promisc = on;
1404 #ifdef BGE_IPMI_ASF
1405 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1406 #else
1407 if (bge_chip_sync(bgep) == DDI_FAILURE) {
1408 #endif
1409 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
1410 (void) bge_check_acc_handle(bgep, bgep->io_handle);
1411 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1412 mutex_exit(bgep->genlock);
1413 return (EIO);
1414 }
1415 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg));
1416 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1417 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1418 mutex_exit(bgep->genlock);
1419 return (EIO);
1420 }
1421 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1422 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1423 mutex_exit(bgep->genlock);
1424 return (EIO);
1425 }
1426 mutex_exit(bgep->genlock);
1427 return (0);
1428 }
1429
1430 /*
1431 * Find the slot for the specified unicast address
1432 */
1433 int
1434 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr)
1435 {
1436 int slot;
1437
1438 ASSERT(mutex_owned(bgep->genlock));
1439
1440 for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1441 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0)
1442 return (slot);
1443 }
1444
1445 return (-1);
1446 }
1447
1448 /*
1449 * The job of bge_addmac() is to set up everything in hardware for the mac
1450 * address indicated to map to the specified group.
1451 *
1452 * For this to make sense, we need to first understand how most of the bge chips
1453 * work. A given packet reaches a ring in two distinct logical steps:
1454 *
1455 * 1) The device must accept the packet.
1456 * 2) The device must steer an accepted packet to a specific ring.
1457 *
1458 * For step 1, the device has four global MAC address filtering registers. We
1459 * must either add the address here or put the device in promiscuous mode.
1460 * Because there are only four of these and up to four groups, each group is
1461 * only allowed to program a single entry. Note, this is not explicitly done in
1462 * the driver. Rather, it is implicitly done by how we implement step 2. These
1463 * registers start at 0x410 and are referred to as the 'EMAC MAC Addresses' in
1464 * the manuals.
1465 *
1466 * For step 2, the device has eight sets of rule registers that are used to
1467 * control how a packet in step 1 is mapped to a specific ring. Each set is
1468 * comprised of a control register and a mask register. These start at 0x480 and
1469 * are referred to as the 'Receive Rules Control Registers' and 'Receive Rules
1470 * Value/Mask Registers'. These can be used to check for a 16-bit or 32-bit
1471 * value at an offset in the packet. In addition, two sets can be combined to
1472 * create a single conditional rule.
1473 *
1474 * For our purposes, we need to use this mechanism to steer a mac address to a
1475 * specific ring. This requires that we use two of the sets of registers per MAC
1476 * address that comes in here. The data about this is stored in 'mac_addr_rule'
1477 * member of the 'recv_ring_t'.
1478 *
1479 * A reasonable question to ask is why are we storing this on the ring, when it
1480 * relates to the group. The answer is that the current implementation of the
1481 * driver assumes that each group is comprised of a single ring. While some
1482 * parts may support additional rings, the driver doesn't take advantage of
1483 * that.
1484 *
1485 * A result of all this is that the driver will support up to 4 groups today.
1486 * Each group has a single ring. We want to make sure that each group can have a
1487 * single MAC address programmed into it. This results in the check for a rule
1488 * being assigned in the 'mac_addr_rule' member of the recv_ring_t below. If a
1489 * future part were to support more global MAC address filters in part 1 and
1490 * more rule registers needed for part 2, then we could relax this constraint
1491 * and allow a group to have more than one MAC address assigned to it.
1492 */
1493 static int
1494 bge_addmac(void *arg, const uint8_t * mac_addr)
1495 {
1496 recv_ring_t *rrp = (recv_ring_t *)arg;
1497 bge_t *bgep = rrp->bgep;
1498 bge_recv_rule_t *rulep = bgep->recv_rules;
1499 bge_rule_info_t *rinfop = NULL;
1500 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1;
1501 int i;
1502 uint16_t tmp16;
1503 uint32_t tmp32;
1504 int slot;
1505 int err;
1506
1507 mutex_enter(bgep->genlock);
1508 if (bgep->unicst_addr_avail == 0) {
1509 mutex_exit(bgep->genlock);
1510 return (ENOSPC);
1511 }
1512
1513 /*
1514 * The driver only supports a MAC address being programmed to be
1515 * received by one ring in step 2. We check the global table of MAC
1516 * addresses to see if this address has already been claimed by another
1517 * group as a way to determine that.
1518 */
1519 slot = bge_unicst_find(bgep, mac_addr);
1520 if (slot != -1) {
1521 mutex_exit(bgep->genlock);
1522 return (EEXIST);
1523 }
1524
1525 /*
1526 * Check to see if this group has already used its hardware resources
1527 * for step 2. If so, we have to return ENOSPC to MAC to indicate that
1528 * this group cannot handle an additional MAC address and that MAC will
1529 * need to use software classification on the default group.
1530 */
1531 if (rrp->mac_addr_rule != NULL) {
1532 mutex_exit(bgep->genlock);
1533 return (ENOSPC);
1534 }
1535
1536 for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1537 if (!bgep->curr_addr[slot].set) {
1538 bgep->curr_addr[slot].set = B_TRUE;
1539 break;
1540 }
1541 }
1542
1543 VERIFY3S(slot, <, bgep->unicst_addr_total);
1544 bgep->unicst_addr_avail--;
1545 mutex_exit(bgep->genlock);
1546
1547 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0)
1548 goto fail;
1549
1550 /*
1551 * Allocate a bge_rule_info_t to keep track of which rule slots
1552 * are being used.
1553 */
1554 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP);
1555 if (rinfop == NULL) {
1556 err = ENOMEM;
1557 goto fail;
1558 }
1559
1560 /*
1561 * Look for the starting slot to place the rules.
1562 * The two slots we reserve must be contiguous.
1563 */
1564 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++)
1565 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 &&
1566 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0)
1567 break;
1568
1569 ASSERT(i + 1 < RECV_RULES_NUM_MAX);
1570
1571 bcopy(mac_addr, &tmp32, sizeof (tmp32));
1572 rulep[i].mask_value = ntohl(tmp32);
1573 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND;
1574 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value);
1575 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control);
1576
1577 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16));
1578 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16);
1579 rulep[i+1].control = RULE_DEST_MAC_2(ring);
1580 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value);
1581 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control);
1582 rinfop->start = i;
1583 rinfop->count = 2;
1584
1585 rrp->mac_addr_rule = rinfop;
1586 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL);
1587
1588 return (0);
1589
1590 fail:
1591 /* Clear the address just set */
1592 (void) bge_unicst_set(bgep, zero_addr, slot);
1593 mutex_enter(bgep->genlock);
1594 bgep->curr_addr[slot].set = B_FALSE;
1595 bgep->unicst_addr_avail++;
1596 mutex_exit(bgep->genlock);
1597
1598 return (err);
1599 }
1600
1601 /*
1602 * Stop classifying packets matching the MAC address to the specified ring.
1603 */
1604 static int
1605 bge_remmac(void *arg, const uint8_t *mac_addr)
1606 {
1607 recv_ring_t *rrp = (recv_ring_t *)arg;
1608 bge_t *bgep = rrp->bgep;
1609 bge_recv_rule_t *rulep = bgep->recv_rules;
1610 bge_rule_info_t *rinfop = rrp->mac_addr_rule;
1611 int start;
1612 int slot;
1613 int err;
1614
1615 /*
1616 * Remove the MAC address from its slot.
1617 */
1618 mutex_enter(bgep->genlock);
1619 slot = bge_unicst_find(bgep, mac_addr);
1620 if (slot == -1) {
1621 mutex_exit(bgep->genlock);
1622 return (EINVAL);
1623 }
1624
1625 ASSERT(bgep->curr_addr[slot].set);
1626 mutex_exit(bgep->genlock);
1627
1628 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0)
1629 return (err);
1630
1631 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0)
1632 return (EINVAL);
1633
1634 start = rinfop->start;
1635 rulep[start].mask_value = 0;
1636 rulep[start].control = 0;
1637 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1638 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1639 start++;
1640 rulep[start].mask_value = 0;
1641 rulep[start].control = 0;
1642 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1643 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1644
1645 kmem_free(rinfop, sizeof (bge_rule_info_t));
1646 rrp->mac_addr_rule = NULL;
1647 bzero(rrp->mac_addr_val, ETHERADDRL);
1648
1649 mutex_enter(bgep->genlock);
1650 bgep->curr_addr[slot].set = B_FALSE;
1651 bgep->unicst_addr_avail++;
1652 mutex_exit(bgep->genlock);
1653
1654 return (0);
1655 }
1656
1657
1658 static int
1659 bge_flag_intr_enable(mac_intr_handle_t ih)
1660 {
1661 recv_ring_t *rrp = (recv_ring_t *)ih;
1662 bge_t *bgep = rrp->bgep;
1663
1664 mutex_enter(bgep->genlock);
1665 rrp->poll_flag = 0;
1666 mutex_exit(bgep->genlock);
1667
1668 return (0);
1669 }
1670
1671 static int
1672 bge_flag_intr_disable(mac_intr_handle_t ih)
1673 {
1674 recv_ring_t *rrp = (recv_ring_t *)ih;
1675 bge_t *bgep = rrp->bgep;
1676
1677 mutex_enter(bgep->genlock);
1678 rrp->poll_flag = 1;
1679 mutex_exit(bgep->genlock);
1680
1681 return (0);
1682 }
1683
1684 static int
1685 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1686 {
1687 recv_ring_t *rx_ring;
1688
1689 rx_ring = (recv_ring_t *)rh;
1690 mutex_enter(rx_ring->rx_lock);
1691 rx_ring->ring_gen_num = mr_gen_num;
1692 mutex_exit(rx_ring->rx_lock);
1693 return (0);
1694 }
1695
1696
1697 /*
1698 * Callback funtion for MAC layer to register all rings
1699 * for given ring_group, noted by rg_index.
1700 */
1701 void
1702 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1703 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1704 {
1705 bge_t *bgep = arg;
1706 mac_intr_t *mintr;
1707
1708 switch (rtype) {
1709 case MAC_RING_TYPE_RX: {
1710 recv_ring_t *rx_ring;
1711 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1712 MAC_ADDRESS_REGS_MAX) && index == 0);
1713
1714 rx_ring = &bgep->recv[rg_index];
1715 rx_ring->ring_handle = rh;
1716
1717 infop->mri_driver = (mac_ring_driver_t)rx_ring;
1718 infop->mri_start = bge_ring_start;
1719 infop->mri_stop = NULL;
1720 infop->mri_poll = bge_poll_ring;
1721 infop->mri_stat = bge_rx_ring_stat;
1722
1723 mintr = &infop->mri_intr;
1724 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1725 mintr->mi_enable = bge_flag_intr_enable;
1726 mintr->mi_disable = bge_flag_intr_disable;
1727
1728 break;
1729 }
1730 case MAC_RING_TYPE_TX:
1731 default:
1732 ASSERT(0);
1733 break;
1734 }
1735 }
1736
1737 /*
1738 * Fill infop passed as argument
1739 * fill in respective ring_group info
1740 * Each group has a single ring in it. We keep it simple
1741 * and use the same internal handle for rings and groups.
1742 */
1743 void
1744 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index,
1745 mac_group_info_t * infop, mac_group_handle_t gh)
1746 {
1747 bge_t *bgep = arg;
1748
1749 switch (rtype) {
1750 case MAC_RING_TYPE_RX: {
1751 recv_ring_t *rx_ring;
1752
1753 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1754 MAC_ADDRESS_REGS_MAX));
1755 rx_ring = &bgep->recv[rg_index];
1756 rx_ring->ring_group_handle = gh;
1757
1758 infop->mgi_driver = (mac_group_driver_t)rx_ring;
1759 infop->mgi_start = NULL;
1760 infop->mgi_stop = NULL;
1761 infop->mgi_addmac = bge_addmac;
1762 infop->mgi_remmac = bge_remmac;
1763 infop->mgi_count = 1;
1764 break;
1765 }
1766 case MAC_RING_TYPE_TX:
1767 default:
1768 ASSERT(0);
1769 break;
1770 }
1771 }
1772
1773
1774 /*ARGSUSED*/
1775 static boolean_t
1776 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1777 {
1778 bge_t *bgep = arg;
1779 mac_capab_rings_t *cap_rings;
1780
1781 switch (cap) {
1782 case MAC_CAPAB_HCKSUM: {
1783 uint32_t *txflags = cap_data;
1784
1785 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1786 break;
1787 }
1788
1789 case MAC_CAPAB_RINGS:
1790 cap_rings = (mac_capab_rings_t *)cap_data;
1791
1792 /* Temporarily disable multiple tx rings. */
1793 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
1794 return (B_FALSE);
1795
1796 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
1797 cap_rings->mr_rnum =
1798 cap_rings->mr_gnum =
1799 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX);
1800 cap_rings->mr_rget = bge_fill_ring;
1801 cap_rings->mr_gget = bge_fill_group;
1802 break;
1803
1804 default:
1805 return (B_FALSE);
1806 }
1807 return (B_TRUE);
1808 }
1809
1810 #ifdef NOT_SUPPORTED_XXX
1811
1812 /*
1813 * Loopback ioctl code
1814 */
1815
1816 static lb_property_t loopmodes[] = {
1817 { normal, "normal", BGE_LOOP_NONE },
1818 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 },
1819 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 },
1820 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 },
1821 { internal, "PHY", BGE_LOOP_INTERNAL_PHY },
1822 { internal, "MAC", BGE_LOOP_INTERNAL_MAC }
1823 };
1824
1825 static enum ioc_reply
1826 bge_set_loop_mode(bge_t *bgep, uint32_t mode)
1827 {
1828 /*
1829 * If the mode isn't being changed, there's nothing to do ...
1830 */
1831 if (mode == bgep->param_loop_mode)
1832 return (IOC_ACK);
1833
1834 /*
1835 * Validate the requested mode and prepare a suitable message
1836 * to explain the link down/up cycle that the change will
1837 * probably induce ...
1838 */
1839 switch (mode) {
1840 default:
1841 return (IOC_INVAL);
1842
1843 case BGE_LOOP_NONE:
1844 case BGE_LOOP_EXTERNAL_1000:
1845 case BGE_LOOP_EXTERNAL_100:
1846 case BGE_LOOP_EXTERNAL_10:
1847 case BGE_LOOP_INTERNAL_PHY:
1848 case BGE_LOOP_INTERNAL_MAC:
1849 break;
1850 }
1851
1852 /*
1853 * All OK; tell the caller to reprogram
1854 * the PHY and/or MAC for the new mode ...
1855 */
1856 bgep->param_loop_mode = mode;
1857 return (IOC_RESTART_ACK);
1858 }
1859
1860 static enum ioc_reply
1861 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1862 {
1863 lb_info_sz_t *lbsp;
1864 lb_property_t *lbpp;
1865 uint32_t *lbmp;
1866 int cmd;
1867
1868 _NOTE(ARGUNUSED(wq))
1869
1870 /*
1871 * Validate format of ioctl
1872 */
1873 if (mp->b_cont == NULL)
1874 return (IOC_INVAL);
1875
1876 cmd = iocp->ioc_cmd;
1877 switch (cmd) {
1878 default:
1879 /* NOTREACHED */
1880 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd);
1881 return (IOC_INVAL);
1882
1883 case LB_GET_INFO_SIZE:
1884 if (iocp->ioc_count != sizeof (lb_info_sz_t))
1885 return (IOC_INVAL);
1886 lbsp = (void *)mp->b_cont->b_rptr;
1887 *lbsp = sizeof (loopmodes);
1888 return (IOC_REPLY);
1889
1890 case LB_GET_INFO:
1891 if (iocp->ioc_count != sizeof (loopmodes))
1892 return (IOC_INVAL);
1893 lbpp = (void *)mp->b_cont->b_rptr;
1894 bcopy(loopmodes, lbpp, sizeof (loopmodes));
1895 return (IOC_REPLY);
1896
1897 case LB_GET_MODE:
1898 if (iocp->ioc_count != sizeof (uint32_t))
1899 return (IOC_INVAL);
1900 lbmp = (void *)mp->b_cont->b_rptr;
1901 *lbmp = bgep->param_loop_mode;
1902 return (IOC_REPLY);
1903
1904 case LB_SET_MODE:
1905 if (iocp->ioc_count != sizeof (uint32_t))
1906 return (IOC_INVAL);
1907 lbmp = (void *)mp->b_cont->b_rptr;
1908 return (bge_set_loop_mode(bgep, *lbmp));
1909 }
1910 }
1911
1912 #endif /* NOT_SUPPORTED_XXX */
1913
1914 /*
1915 * Specific bge IOCTLs, the gld module handles the generic ones.
1916 */
1917 static void
1918 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1919 {
1920 bge_t *bgep = arg;
1921 struct iocblk *iocp;
1922 enum ioc_reply status;
1923 boolean_t need_privilege;
1924 int err;
1925 int cmd;
1926
1927 /*
1928 * Validate the command before bothering with the mutex ...
1929 */
1930 iocp = (void *)mp->b_rptr;
1931 iocp->ioc_error = 0;
1932 need_privilege = B_TRUE;
1933 cmd = iocp->ioc_cmd;
1934 switch (cmd) {
1935 default:
1936 miocnak(wq, mp, 0, EINVAL);
1937 return;
1938
1939 case BGE_MII_READ:
1940 case BGE_MII_WRITE:
1941 case BGE_SEE_READ:
1942 case BGE_SEE_WRITE:
1943 case BGE_FLASH_READ:
1944 case BGE_FLASH_WRITE:
1945 case BGE_DIAG:
1946 case BGE_PEEK:
1947 case BGE_POKE:
1948 case BGE_PHY_RESET:
1949 case BGE_SOFT_RESET:
1950 case BGE_HARD_RESET:
1951 break;
1952
1953 #ifdef NOT_SUPPORTED_XXX
1954 case LB_GET_INFO_SIZE:
1955 case LB_GET_INFO:
1956 case LB_GET_MODE:
1957 need_privilege = B_FALSE;
1958 /* FALLTHRU */
1959 case LB_SET_MODE:
1960 break;
1961 #endif
1962
1963 }
1964
1965 if (need_privilege) {
1966 /*
1967 * Check for specific net_config privilege on Solaris 10+.
1968 */
1969 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1970 if (err != 0) {
1971 miocnak(wq, mp, 0, err);
1972 return;
1973 }
1974 }
1975
1976 mutex_enter(bgep->genlock);
1977 if (!(bgep->progress & PROGRESS_INTR)) {
1978 /* can happen during autorecovery */
1979 mutex_exit(bgep->genlock);
1980 miocnak(wq, mp, 0, EIO);
1981 return;
1982 }
1983
1984 switch (cmd) {
1985 default:
1986 _NOTE(NOTREACHED)
1987 status = IOC_INVAL;
1988 break;
1989
1990 case BGE_MII_READ:
1991 case BGE_MII_WRITE:
1992 case BGE_SEE_READ:
1993 case BGE_SEE_WRITE:
1994 case BGE_FLASH_READ:
1995 case BGE_FLASH_WRITE:
1996 case BGE_DIAG:
1997 case BGE_PEEK:
1998 case BGE_POKE:
1999 case BGE_PHY_RESET:
2000 case BGE_SOFT_RESET:
2001 case BGE_HARD_RESET:
2002 status = bge_chip_ioctl(bgep, wq, mp, iocp);
2003 break;
2004
2005 #ifdef NOT_SUPPORTED_XXX
2006 case LB_GET_INFO_SIZE:
2007 case LB_GET_INFO:
2008 case LB_GET_MODE:
2009 case LB_SET_MODE:
2010 status = bge_loop_ioctl(bgep, wq, mp, iocp);
2011 break;
2012 #endif
2013
2014 }
2015
2016 /*
2017 * Do we need to reprogram the PHY and/or the MAC?
2018 * Do it now, while we still have the mutex.
2019 *
2020 * Note: update the PHY first, 'cos it controls the
2021 * speed/duplex parameters that the MAC code uses.
2022 */
2023 switch (status) {
2024 case IOC_RESTART_REPLY:
2025 case IOC_RESTART_ACK:
2026 if (bge_reprogram(bgep) == IOC_INVAL)
2027 status = IOC_INVAL;
2028 break;
2029 }
2030
2031 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
2032 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2033 status = IOC_INVAL;
2034 }
2035 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
2036 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
2037 status = IOC_INVAL;
2038 }
2039 mutex_exit(bgep->genlock);
2040
2041 /*
2042 * Finally, decide how to reply
2043 */
2044 switch (status) {
2045 default:
2046 case IOC_INVAL:
2047 /*
2048 * Error, reply with a NAK and EINVAL or the specified error
2049 */
2050 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2051 EINVAL : iocp->ioc_error);
2052 break;
2053
2054 case IOC_DONE:
2055 /*
2056 * OK, reply already sent
2057 */
2058 break;
2059
2060 case IOC_RESTART_ACK:
2061 case IOC_ACK:
2062 /*
2063 * OK, reply with an ACK
2064 */
2065 miocack(wq, mp, 0, 0);
2066 break;
2067
2068 case IOC_RESTART_REPLY:
2069 case IOC_REPLY:
2070 /*
2071 * OK, send prepared reply as ACK or NAK
2072 */
2073 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2074 M_IOCACK : M_IOCNAK;
2075 qreply(wq, mp);
2076 break;
2077 }
2078 }
2079
2080 /*
2081 * ========== Per-instance setup/teardown code ==========
2082 */
2083
2084 #undef BGE_DBG
2085 #define BGE_DBG BGE_DBG_MEM /* debug flag for this code */
2086 /*
2087 * Allocate an area of memory and a DMA handle for accessing it
2088 */
2089 static int
2090 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p,
2091 uint_t dma_flags, dma_area_t *dma_p)
2092 {
2093 caddr_t va;
2094 int err;
2095
2096 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
2097 (void *)bgep, memsize, attr_p, dma_flags, dma_p));
2098
2099 /*
2100 * Allocate handle
2101 */
2102 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr,
2103 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
2104 if (err != DDI_SUCCESS)
2105 return (DDI_FAILURE);
2106
2107 /*
2108 * Allocate memory
2109 */
2110 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
2111 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength,
2112 &dma_p->acc_hdl);
2113 if (err != DDI_SUCCESS)
2114 return (DDI_FAILURE);
2115
2116 /*
2117 * Bind the two together
2118 */
2119 dma_p->mem_va = va;
2120 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2121 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
2122 &dma_p->cookie, &dma_p->ncookies);
2123
2124 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies",
2125 dma_p->alength, err, dma_p->ncookies));
2126
2127 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
2128 return (DDI_FAILURE);
2129
2130 dma_p->nslots = ~0U;
2131 dma_p->size = ~0U;
2132 dma_p->token = ~0U;
2133 dma_p->offset = 0;
2134 return (DDI_SUCCESS);
2135 }
2136
2137 /*
2138 * Free one allocated area of DMAable memory
2139 */
2140 static void
2141 bge_free_dma_mem(dma_area_t *dma_p)
2142 {
2143 if (dma_p->dma_hdl != NULL) {
2144 if (dma_p->ncookies) {
2145 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2146 dma_p->ncookies = 0;
2147 }
2148 ddi_dma_free_handle(&dma_p->dma_hdl);
2149 dma_p->dma_hdl = NULL;
2150 }
2151
2152 if (dma_p->acc_hdl != NULL) {
2153 ddi_dma_mem_free(&dma_p->acc_hdl);
2154 dma_p->acc_hdl = NULL;
2155 }
2156 }
2157 /*
2158 * Utility routine to carve a slice off a chunk of allocated memory,
2159 * updating the chunk descriptor accordingly. The size of the slice
2160 * is given by the product of the <qty> and <size> parameters.
2161 */
2162 static void
2163 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
2164 uint32_t qty, uint32_t size)
2165 {
2166 static uint32_t sequence = 0xbcd5704a;
2167 size_t totsize;
2168
2169 totsize = qty*size;
2170 ASSERT(totsize <= chunk->alength);
2171
2172 *slice = *chunk;
2173 slice->nslots = qty;
2174 slice->size = size;
2175 slice->alength = totsize;
2176 slice->token = ++sequence;
2177
2178 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
2179 chunk->alength -= totsize;
2180 chunk->offset += totsize;
2181 chunk->cookie.dmac_laddress += totsize;
2182 chunk->cookie.dmac_size -= totsize;
2183 }
2184
2185 /*
2186 * Initialise the specified Receive Producer (Buffer) Ring, using
2187 * the information in the <dma_area> descriptors that it contains
2188 * to set up all the other fields. This routine should be called
2189 * only once for each ring.
2190 */
2191 static void
2192 bge_init_buff_ring(bge_t *bgep, uint64_t ring)
2193 {
2194 buff_ring_t *brp;
2195 bge_status_t *bsp;
2196 sw_rbd_t *srbdp;
2197 dma_area_t pbuf;
2198 uint32_t bufsize;
2199 uint32_t nslots;
2200 uint32_t slot;
2201 uint32_t split;
2202
2203 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = {
2204 NIC_MEM_SHADOW_BUFF_STD,
2205 NIC_MEM_SHADOW_BUFF_JUMBO,
2206 NIC_MEM_SHADOW_BUFF_MINI
2207 };
2208 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = {
2209 RECV_STD_PROD_INDEX_REG,
2210 RECV_JUMBO_PROD_INDEX_REG,
2211 RECV_MINI_PROD_INDEX_REG
2212 };
2213 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = {
2214 STATUS_STD_BUFF_CONS_INDEX,
2215 STATUS_JUMBO_BUFF_CONS_INDEX,
2216 STATUS_MINI_BUFF_CONS_INDEX
2217 };
2218
2219 BGE_TRACE(("bge_init_buff_ring($%p, %d)",
2220 (void *)bgep, ring));
2221
2222 brp = &bgep->buff[ring];
2223 nslots = brp->desc.nslots;
2224 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
2225 bufsize = brp->buf[0].size;
2226
2227 /*
2228 * Set up the copy of the h/w RCB
2229 *
2230 * Note: unlike Send & Receive Return Rings, (where the max_len
2231 * field holds the number of slots), in a Receive Buffer Ring
2232 * this field indicates the size of each buffer in the ring.
2233 */
2234 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress;
2235 brp->hw_rcb.max_len = (uint16_t)bufsize;
2236 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2237 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring];
2238
2239 /*
2240 * Other one-off initialisation of per-ring data
2241 */
2242 brp->bgep = bgep;
2243 bsp = DMA_VPTR(bgep->status_block);
2244 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]];
2245 brp->chip_mbx_reg = mailbox_regs[ring];
2246 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER,
2247 DDI_INTR_PRI(bgep->intr_pri));
2248
2249 /*
2250 * Allocate the array of s/w Receive Buffer Descriptors
2251 */
2252 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP);
2253 brp->sw_rbds = srbdp;
2254
2255 /*
2256 * Now initialise each array element once and for all
2257 */
2258 for (split = 0; split < BGE_SPLIT; ++split) {
2259 pbuf = brp->buf[split];
2260 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot)
2261 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize);
2262 ASSERT(pbuf.alength == 0);
2263 }
2264 }
2265
2266 /*
2267 * Clean up initialisation done above before the memory is freed
2268 */
2269 static void
2270 bge_fini_buff_ring(bge_t *bgep, uint64_t ring)
2271 {
2272 buff_ring_t *brp;
2273 sw_rbd_t *srbdp;
2274
2275 BGE_TRACE(("bge_fini_buff_ring($%p, %d)",
2276 (void *)bgep, ring));
2277
2278 brp = &bgep->buff[ring];
2279 srbdp = brp->sw_rbds;
2280 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp));
2281
2282 mutex_destroy(brp->rf_lock);
2283 }
2284
2285 /*
2286 * Initialise the specified Receive (Return) Ring, using the
2287 * information in the <dma_area> descriptors that it contains
2288 * to set up all the other fields. This routine should be called
2289 * only once for each ring.
2290 */
2291 static void
2292 bge_init_recv_ring(bge_t *bgep, uint64_t ring)
2293 {
2294 recv_ring_t *rrp;
2295 bge_status_t *bsp;
2296 uint32_t nslots;
2297
2298 BGE_TRACE(("bge_init_recv_ring($%p, %d)",
2299 (void *)bgep, ring));
2300
2301 /*
2302 * The chip architecture requires that receive return rings have
2303 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103.
2304 */
2305 rrp = &bgep->recv[ring];
2306 nslots = rrp->desc.nslots;
2307 ASSERT(nslots == 0 || nslots == 512 ||
2308 nslots == 1024 || nslots == 2048);
2309
2310 /*
2311 * Set up the copy of the h/w RCB
2312 */
2313 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress;
2314 rrp->hw_rcb.max_len = (uint16_t)nslots;
2315 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2316 rrp->hw_rcb.nic_ring_addr = 0;
2317
2318 /*
2319 * Other one-off initialisation of per-ring data
2320 */
2321 rrp->bgep = bgep;
2322 bsp = DMA_VPTR(bgep->status_block);
2323 rrp->prod_index_p = RECV_INDEX_P(bsp, ring);
2324 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring);
2325 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER,
2326 DDI_INTR_PRI(bgep->intr_pri));
2327 }
2328
2329
2330 /*
2331 * Clean up initialisation done above before the memory is freed
2332 */
2333 static void
2334 bge_fini_recv_ring(bge_t *bgep, uint64_t ring)
2335 {
2336 recv_ring_t *rrp;
2337
2338 BGE_TRACE(("bge_fini_recv_ring($%p, %d)",
2339 (void *)bgep, ring));
2340
2341 rrp = &bgep->recv[ring];
2342 if (rrp->rx_softint)
2343 ddi_remove_softintr(rrp->rx_softint);
2344 mutex_destroy(rrp->rx_lock);
2345 }
2346
2347 /*
2348 * Initialise the specified Send Ring, using the information in the
2349 * <dma_area> descriptors that it contains to set up all the other
2350 * fields. This routine should be called only once for each ring.
2351 */
2352 static void
2353 bge_init_send_ring(bge_t *bgep, uint64_t ring)
2354 {
2355 send_ring_t *srp;
2356 bge_status_t *bsp;
2357 sw_sbd_t *ssbdp;
2358 dma_area_t desc;
2359 dma_area_t pbuf;
2360 uint32_t nslots;
2361 uint32_t slot;
2362 uint32_t split;
2363 sw_txbuf_t *txbuf;
2364
2365 BGE_TRACE(("bge_init_send_ring($%p, %d)",
2366 (void *)bgep, ring));
2367
2368 /*
2369 * The chip architecture requires that host-based send rings
2370 * have 512 elements per ring. See 570X-PG102-R page 56.
2371 */
2372 srp = &bgep->send[ring];
2373 nslots = srp->desc.nslots;
2374 ASSERT(nslots == 0 || nslots == 512);
2375
2376 /*
2377 * Set up the copy of the h/w RCB
2378 */
2379 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress;
2380 srp->hw_rcb.max_len = (uint16_t)nslots;
2381 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2382 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots);
2383
2384 /*
2385 * Other one-off initialisation of per-ring data
2386 */
2387 srp->bgep = bgep;
2388 bsp = DMA_VPTR(bgep->status_block);
2389 srp->cons_index_p = SEND_INDEX_P(bsp, ring);
2390 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring);
2391 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
2392 DDI_INTR_PRI(bgep->intr_pri));
2393 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER,
2394 DDI_INTR_PRI(bgep->intr_pri));
2395 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER,
2396 DDI_INTR_PRI(bgep->intr_pri));
2397 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
2398 DDI_INTR_PRI(bgep->intr_pri));
2399 if (nslots == 0)
2400 return;
2401
2402 /*
2403 * Allocate the array of s/w Send Buffer Descriptors
2404 */
2405 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
2406 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP);
2407 srp->txbuf_head =
2408 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP);
2409 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP);
2410 srp->sw_sbds = ssbdp;
2411 srp->txbuf = txbuf;
2412 srp->tx_buffers = BGE_SEND_BUF_NUM;
2413 srp->tx_buffers_low = srp->tx_buffers / 4;
2414 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT)
2415 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO;
2416 else
2417 srp->tx_array_max = BGE_SEND_BUF_ARRAY;
2418 srp->tx_array = 1;
2419
2420 /*
2421 * Chunk tx desc area
2422 */
2423 desc = srp->desc;
2424 for (slot = 0; slot < nslots; ++ssbdp, ++slot) {
2425 bge_slice_chunk(&ssbdp->desc, &desc, 1,
2426 sizeof (bge_sbd_t));
2427 }
2428 ASSERT(desc.alength == 0);
2429
2430 /*
2431 * Chunk tx buffer area
2432 */
2433 for (split = 0; split < BGE_SPLIT; ++split) {
2434 pbuf = srp->buf[0][split];
2435 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2436 bge_slice_chunk(&txbuf->buf, &pbuf, 1,
2437 bgep->chipid.snd_buff_size);
2438 txbuf++;
2439 }
2440 ASSERT(pbuf.alength == 0);
2441 }
2442 }
2443
2444 /*
2445 * Clean up initialisation done above before the memory is freed
2446 */
2447 static void
2448 bge_fini_send_ring(bge_t *bgep, uint64_t ring)
2449 {
2450 send_ring_t *srp;
2451 uint32_t array;
2452 uint32_t split;
2453 uint32_t nslots;
2454
2455 BGE_TRACE(("bge_fini_send_ring($%p, %d)",
2456 (void *)bgep, ring));
2457
2458 srp = &bgep->send[ring];
2459 mutex_destroy(srp->tc_lock);
2460 mutex_destroy(srp->freetxbuf_lock);
2461 mutex_destroy(srp->txbuf_lock);
2462 mutex_destroy(srp->tx_lock);
2463 nslots = srp->desc.nslots;
2464 if (nslots == 0)
2465 return;
2466
2467 for (array = 1; array < srp->tx_array; ++array)
2468 for (split = 0; split < BGE_SPLIT; ++split)
2469 bge_free_dma_mem(&srp->buf[array][split]);
2470 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds));
2471 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head));
2472 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf));
2473 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp));
2474 srp->sw_sbds = NULL;
2475 srp->txbuf_head = NULL;
2476 srp->txbuf = NULL;
2477 srp->pktp = NULL;
2478 }
2479
2480 /*
2481 * Initialise all transmit, receive, and buffer rings.
2482 */
2483 void
2484 bge_init_rings(bge_t *bgep)
2485 {
2486 uint32_t ring;
2487
2488 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep));
2489
2490 /*
2491 * Perform one-off initialisation of each ring ...
2492 */
2493 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2494 bge_init_send_ring(bgep, ring);
2495 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2496 bge_init_recv_ring(bgep, ring);
2497 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2498 bge_init_buff_ring(bgep, ring);
2499 }
2500
2501 /*
2502 * Undo the work of bge_init_rings() above before the memory is freed
2503 */
2504 void
2505 bge_fini_rings(bge_t *bgep)
2506 {
2507 uint32_t ring;
2508
2509 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep));
2510
2511 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2512 bge_fini_buff_ring(bgep, ring);
2513 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2514 bge_fini_recv_ring(bgep, ring);
2515 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2516 bge_fini_send_ring(bgep, ring);
2517 }
2518
2519 /*
2520 * Called from the bge_m_stop() to free the tx buffers which are
2521 * allocated from the tx process.
2522 */
2523 void
2524 bge_free_txbuf_arrays(send_ring_t *srp)
2525 {
2526 uint32_t array;
2527 uint32_t split;
2528
2529 ASSERT(mutex_owned(srp->tx_lock));
2530
2531 /*
2532 * Free the extra tx buffer DMA area
2533 */
2534 for (array = 1; array < srp->tx_array; ++array)
2535 for (split = 0; split < BGE_SPLIT; ++split)
2536 bge_free_dma_mem(&srp->buf[array][split]);
2537
2538 /*
2539 * Restore initial tx buffer numbers
2540 */
2541 srp->tx_array = 1;
2542 srp->tx_buffers = BGE_SEND_BUF_NUM;
2543 srp->tx_buffers_low = srp->tx_buffers / 4;
2544 srp->tx_flow = 0;
2545 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
2546 }
2547
2548 /*
2549 * Called from tx process to allocate more tx buffers
2550 */
2551 bge_queue_item_t *
2552 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp)
2553 {
2554 bge_queue_t *txbuf_queue;
2555 bge_queue_item_t *txbuf_item_last;
2556 bge_queue_item_t *txbuf_item;
2557 bge_queue_item_t *txbuf_item_rtn;
2558 sw_txbuf_t *txbuf;
2559 dma_area_t area;
2560 size_t txbuffsize;
2561 uint32_t slot;
2562 uint32_t array;
2563 uint32_t split;
2564 uint32_t err;
2565
2566 ASSERT(mutex_owned(srp->tx_lock));
2567
2568 array = srp->tx_array;
2569 if (array >= srp->tx_array_max)
2570 return (NULL);
2571
2572 /*
2573 * Allocate memory & handles for TX buffers
2574 */
2575 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2576 ASSERT((txbuffsize % BGE_SPLIT) == 0);
2577 for (split = 0; split < BGE_SPLIT; ++split) {
2578 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2579 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2580 &srp->buf[array][split]);
2581 if (err != DDI_SUCCESS) {
2582 /* Free the last already allocated OK chunks */
2583 for (slot = 0; slot <= split; ++slot)
2584 bge_free_dma_mem(&srp->buf[array][slot]);
2585 srp->tx_alloc_fail++;
2586 return (NULL);
2587 }
2588 }
2589
2590 /*
2591 * Chunk tx buffer area
2592 */
2593 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2594 for (split = 0; split < BGE_SPLIT; ++split) {
2595 area = srp->buf[array][split];
2596 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2597 bge_slice_chunk(&txbuf->buf, &area, 1,
2598 bgep->chipid.snd_buff_size);
2599 txbuf++;
2600 }
2601 }
2602
2603 /*
2604 * Add above buffers to the tx buffer pop queue
2605 */
2606 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2607 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2608 txbuf_item_last = NULL;
2609 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) {
2610 txbuf_item->item = txbuf;
2611 txbuf_item->next = txbuf_item_last;
2612 txbuf_item_last = txbuf_item;
2613 txbuf++;
2614 txbuf_item++;
2615 }
2616 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2617 txbuf_item_rtn = txbuf_item;
2618 txbuf_item++;
2619 txbuf_queue = srp->txbuf_pop_queue;
2620 mutex_enter(txbuf_queue->lock);
2621 txbuf_item->next = txbuf_queue->head;
2622 txbuf_queue->head = txbuf_item_last;
2623 txbuf_queue->count += BGE_SEND_BUF_NUM - 1;
2624 mutex_exit(txbuf_queue->lock);
2625
2626 srp->tx_array++;
2627 srp->tx_buffers += BGE_SEND_BUF_NUM;
2628 srp->tx_buffers_low = srp->tx_buffers / 4;
2629
2630 return (txbuf_item_rtn);
2631 }
2632
2633 /*
2634 * This function allocates all the transmit and receive buffers
2635 * and descriptors, in four chunks.
2636 */
2637 int
2638 bge_alloc_bufs(bge_t *bgep)
2639 {
2640 dma_area_t area;
2641 size_t rxbuffsize;
2642 size_t txbuffsize;
2643 size_t rxbuffdescsize;
2644 size_t rxdescsize;
2645 size_t txdescsize;
2646 uint32_t ring;
2647 uint32_t rx_rings = bgep->chipid.rx_rings;
2648 uint32_t tx_rings = bgep->chipid.tx_rings;
2649 int split;
2650 int err;
2651
2652 BGE_TRACE(("bge_alloc_bufs($%p)",
2653 (void *)bgep));
2654
2655 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size;
2656 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size;
2657 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE;
2658
2659 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2660 txbuffsize *= tx_rings;
2661
2662 rxdescsize = rx_rings*bgep->chipid.recv_slots;
2663 rxdescsize *= sizeof (bge_rbd_t);
2664
2665 rxbuffdescsize = BGE_STD_SLOTS_USED;
2666 rxbuffdescsize += bgep->chipid.jumbo_slots;
2667 rxbuffdescsize += BGE_MINI_SLOTS_USED;
2668 rxbuffdescsize *= sizeof (bge_rbd_t);
2669
2670 txdescsize = tx_rings*BGE_SEND_SLOTS_USED;
2671 txdescsize *= sizeof (bge_sbd_t);
2672 txdescsize += sizeof (bge_statistics_t);
2673 txdescsize += sizeof (bge_status_t);
2674 txdescsize += BGE_STATUS_PADDING;
2675
2676 /*
2677 * Enable PCI relaxed ordering only for RX/TX data buffers
2678 */
2679 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2680 DEVICE_5725_SERIES_CHIPSETS(bgep) ||
2681 DEVICE_57765_SERIES_CHIPSETS(bgep))) {
2682 if (bge_relaxed_ordering)
2683 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2684 }
2685
2686 /*
2687 * Allocate memory & handles for RX buffers
2688 */
2689 ASSERT((rxbuffsize % BGE_SPLIT) == 0);
2690 for (split = 0; split < BGE_SPLIT; ++split) {
2691 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT,
2692 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE,
2693 &bgep->rx_buff[split]);
2694 if (err != DDI_SUCCESS)
2695 return (DDI_FAILURE);
2696 }
2697 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Buffers (rxbuffsize = %d)",
2698 rxbuffsize/BGE_SPLIT,
2699 rxbuffsize));
2700
2701 /*
2702 * Allocate memory & handles for TX buffers
2703 */
2704 ASSERT((txbuffsize % BGE_SPLIT) == 0);
2705 for (split = 0; split < BGE_SPLIT; ++split) {
2706 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2707 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2708 &bgep->tx_buff[split]);
2709 if (err != DDI_SUCCESS)
2710 return (DDI_FAILURE);
2711 }
2712 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Tx Buffers (txbuffsize = %d)",
2713 txbuffsize/BGE_SPLIT,
2714 txbuffsize));
2715
2716 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2717 DEVICE_5725_SERIES_CHIPSETS(bgep) ||
2718 DEVICE_57765_SERIES_CHIPSETS(bgep))) {
2719 /* no relaxed ordering for descriptors rings? */
2720 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING;
2721 }
2722
2723 /*
2724 * Allocate memory & handles for receive return rings
2725 */
2726 ASSERT((rxdescsize % rx_rings) == 0);
2727 for (split = 0; split < rx_rings; ++split) {
2728 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings,
2729 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2730 &bgep->rx_desc[split]);
2731 if (err != DDI_SUCCESS)
2732 return (DDI_FAILURE);
2733 }
2734 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Descs cons (rx_rings = %d, rxdescsize = %d)",
2735 rxdescsize/rx_rings,
2736 rx_rings,
2737 rxdescsize));
2738
2739 /*
2740 * Allocate memory & handles for buffer (producer) descriptor rings.
2741 * Note that split=rx_rings.
2742 */
2743 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr,
2744 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]);
2745 if (err != DDI_SUCCESS)
2746 return (DDI_FAILURE);
2747 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Rx Descs prod (rxbuffdescsize = %d)",
2748 rxdescsize));
2749
2750 /*
2751 * Allocate memory & handles for TX descriptor rings,
2752 * status block, and statistics area
2753 */
2754 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr,
2755 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc);
2756 if (err != DDI_SUCCESS)
2757 return (DDI_FAILURE);
2758 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Tx Descs / Status Block / Stats (txdescdize = %d)",
2759 txdescsize));
2760
2761 /*
2762 * Now carve up each of the allocated areas ...
2763 */
2764
2765 /* rx buffers */
2766 for (split = 0; split < BGE_SPLIT; ++split) {
2767 area = bgep->rx_buff[split];
2768
2769 BGE_DEBUG(("RXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2770 split,
2771 area.mem_va,
2772 area.alength,
2773 area.offset,
2774 area.cookie.dmac_laddress,
2775 area.cookie.dmac_size));
2776
2777 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split],
2778 &area, BGE_STD_SLOTS_USED/BGE_SPLIT,
2779 bgep->chipid.std_buf_size);
2780
2781 BGE_DEBUG(("RXB SLCE %d STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2782 split,
2783 bgep->buff[BGE_STD_BUFF_RING].buf[split].mem_va,
2784 bgep->buff[BGE_STD_BUFF_RING].buf[split].alength,
2785 bgep->buff[BGE_STD_BUFF_RING].buf[split].offset,
2786 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_laddress,
2787 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_size,
2788 BGE_STD_SLOTS_USED/BGE_SPLIT,
2789 bgep->chipid.std_buf_size));
2790
2791 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split],
2792 &area, bgep->chipid.jumbo_slots/BGE_SPLIT,
2793 bgep->chipid.recv_jumbo_size);
2794
2795 if ((bgep->chipid.jumbo_slots / BGE_SPLIT) > 0)
2796 {
2797 BGE_DEBUG(("RXB SLCE %d JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2798 split,
2799 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].mem_va,
2800 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].alength,
2801 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].offset,
2802 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_laddress,
2803 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_size,
2804 bgep->chipid.jumbo_slots/BGE_SPLIT,
2805 bgep->chipid.recv_jumbo_size));
2806 }
2807
2808 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split],
2809 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT,
2810 BGE_MINI_BUFF_SIZE);
2811
2812 if ((BGE_MINI_SLOTS_USED / BGE_SPLIT) > 0)
2813 {
2814 BGE_DEBUG(("RXB SLCE %d MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2815 split,
2816 bgep->buff[BGE_MINI_BUFF_RING].buf[split].mem_va,
2817 bgep->buff[BGE_MINI_BUFF_RING].buf[split].alength,
2818 bgep->buff[BGE_MINI_BUFF_RING].buf[split].offset,
2819 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_laddress,
2820 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_size,
2821 BGE_MINI_SLOTS_USED/BGE_SPLIT,
2822 BGE_MINI_BUFF_SIZE));
2823 }
2824
2825 BGE_DEBUG(("RXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2826 split,
2827 area.mem_va,
2828 area.alength,
2829 area.offset,
2830 area.cookie.dmac_laddress,
2831 area.cookie.dmac_size));
2832 }
2833
2834 /* tx buffers */
2835 for (split = 0; split < BGE_SPLIT; ++split) {
2836 area = bgep->tx_buff[split];
2837
2838 BGE_DEBUG(("TXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2839 split,
2840 area.mem_va,
2841 area.alength,
2842 area.offset,
2843 area.cookie.dmac_laddress,
2844 area.cookie.dmac_size));
2845
2846 for (ring = 0; ring < tx_rings; ++ring) {
2847 bge_slice_chunk(&bgep->send[ring].buf[0][split],
2848 &area, BGE_SEND_BUF_NUM/BGE_SPLIT,
2849 bgep->chipid.snd_buff_size);
2850
2851 BGE_DEBUG(("TXB SLCE %d RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2852 split, ring,
2853 bgep->send[ring].buf[0][split].mem_va,
2854 bgep->send[ring].buf[0][split].alength,
2855 bgep->send[ring].buf[0][split].offset,
2856 bgep->send[ring].buf[0][split].cookie.dmac_laddress,
2857 bgep->send[ring].buf[0][split].cookie.dmac_size,
2858 BGE_SEND_BUF_NUM/BGE_SPLIT,
2859 bgep->chipid.snd_buff_size));
2860 }
2861
2862 for (; ring < BGE_SEND_RINGS_MAX; ++ring) {
2863 bge_slice_chunk(&bgep->send[ring].buf[0][split],
2864 &area, 0, bgep->chipid.snd_buff_size);
2865 }
2866
2867 BGE_DEBUG(("TXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2868 split,
2869 area.mem_va,
2870 area.alength,
2871 area.offset,
2872 area.cookie.dmac_laddress,
2873 area.cookie.dmac_size));
2874 }
2875
2876 for (ring = 0; ring < rx_rings; ++ring) {
2877 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring],
2878 bgep->chipid.recv_slots, sizeof (bge_rbd_t));
2879
2880 BGE_DEBUG(("RXD CONS RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2881 ring,
2882 bgep->recv[ring].desc.mem_va,
2883 bgep->recv[ring].desc.alength,
2884 bgep->recv[ring].desc.offset,
2885 bgep->recv[ring].desc.cookie.dmac_laddress,
2886 bgep->recv[ring].desc.cookie.dmac_size,
2887 bgep->chipid.recv_slots,
2888 sizeof(bge_rbd_t)));
2889 }
2890
2891 /* dma alloc for rxbuffdescsize is located at bgep->rx_desc[#rings] */
2892 area = bgep->rx_desc[rx_rings]; /* note rx_rings = one beyond rings */
2893
2894 for (; ring < BGE_RECV_RINGS_MAX; ++ring) /* skip unused rings */
2895 bge_slice_chunk(&bgep->recv[ring].desc, &area,
2896 0, sizeof (bge_rbd_t));
2897
2898 BGE_DEBUG(("RXD PROD INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2899 area.mem_va,
2900 area.alength,
2901 area.offset,
2902 area.cookie.dmac_laddress,
2903 area.cookie.dmac_size));
2904
2905 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area,
2906 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t));
2907 BGE_DEBUG(("RXD PROD STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2908 bgep->buff[BGE_STD_BUFF_RING].desc.mem_va,
2909 bgep->buff[BGE_STD_BUFF_RING].desc.alength,
2910 bgep->buff[BGE_STD_BUFF_RING].desc.offset,
2911 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_laddress,
2912 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_size,
2913 BGE_STD_SLOTS_USED,
2914 sizeof(bge_rbd_t)));
2915
2916 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area,
2917 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t));
2918 BGE_DEBUG(("RXD PROD JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2919 bgep->buff[BGE_JUMBO_BUFF_RING].desc.mem_va,
2920 bgep->buff[BGE_JUMBO_BUFF_RING].desc.alength,
2921 bgep->buff[BGE_JUMBO_BUFF_RING].desc.offset,
2922 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_laddress,
2923 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_size,
2924 bgep->chipid.jumbo_slots,
2925 sizeof(bge_rbd_t)));
2926
2927 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area,
2928 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t));
2929 BGE_DEBUG(("RXD PROD MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2930 bgep->buff[BGE_MINI_BUFF_RING].desc.mem_va,
2931 bgep->buff[BGE_MINI_BUFF_RING].desc.alength,
2932 bgep->buff[BGE_MINI_BUFF_RING].desc.offset,
2933 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_laddress,
2934 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_size,
2935 BGE_MINI_SLOTS_USED,
2936 sizeof(bge_rbd_t)));
2937
2938 BGE_DEBUG(("RXD PROD DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2939 area.mem_va,
2940 area.alength,
2941 area.offset,
2942 area.cookie.dmac_laddress,
2943 area.cookie.dmac_size));
2944
2945 ASSERT(area.alength == 0);
2946
2947 area = bgep->tx_desc;
2948
2949 BGE_DEBUG(("TXD INIT: va=%p alen=%d off=%d pa=%llx psz=%d",
2950 area.mem_va,
2951 area.alength,
2952 area.offset,
2953 area.cookie.dmac_laddress,
2954 area.cookie.dmac_size));
2955
2956 for (ring = 0; ring < tx_rings; ++ring) {
2957 bge_slice_chunk(&bgep->send[ring].desc, &area,
2958 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t));
2959
2960 BGE_DEBUG(("TXD RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2961 ring,
2962 bgep->send[ring].desc.mem_va,
2963 bgep->send[ring].desc.alength,
2964 bgep->send[ring].desc.offset,
2965 bgep->send[ring].desc.cookie.dmac_laddress,
2966 bgep->send[ring].desc.cookie.dmac_size,
2967 BGE_SEND_SLOTS_USED,
2968 sizeof(bge_sbd_t)));
2969 }
2970
2971 for (; ring < BGE_SEND_RINGS_MAX; ++ring) /* skip unused rings */
2972 bge_slice_chunk(&bgep->send[ring].desc, &area,
2973 0, sizeof (bge_sbd_t));
2974
2975 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t));
2976 BGE_DEBUG(("TXD STATISTICS: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2977 bgep->statistics.mem_va,
2978 bgep->statistics.alength,
2979 bgep->statistics.offset,
2980 bgep->statistics.cookie.dmac_laddress,
2981 bgep->statistics.cookie.dmac_size,
2982 1,
2983 sizeof(bge_statistics_t)));
2984
2985 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t));
2986 BGE_DEBUG(("TXD STATUS BLOCK: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)",
2987 bgep->status_block.mem_va,
2988 bgep->status_block.alength,
2989 bgep->status_block.offset,
2990 bgep->status_block.cookie.dmac_laddress,
2991 bgep->status_block.cookie.dmac_size,
2992 1,
2993 sizeof(bge_status_t)));
2994
2995 BGE_DEBUG(("TXD DONE: va=%p alen=%d off=%d pa=%llx psz=%d",
2996 area.mem_va,
2997 area.alength,
2998 area.offset,
2999 area.cookie.dmac_laddress,
3000 area.cookie.dmac_size));
3001
3002 ASSERT(area.alength == BGE_STATUS_PADDING);
3003
3004 DMA_ZERO(bgep->status_block);
3005
3006 return (DDI_SUCCESS);
3007 }
3008
3009 #undef BGE_DBG
3010 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */
3011
3012 /*
3013 * This routine frees the transmit and receive buffers and descriptors.
3014 * Make sure the chip is stopped before calling it!
3015 */
3016 void
3017 bge_free_bufs(bge_t *bgep)
3018 {
3019 int split;
3020
3021 BGE_TRACE(("bge_free_bufs($%p)",
3022 (void *)bgep));
3023
3024 bge_free_dma_mem(&bgep->tx_desc);
3025 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split)
3026 bge_free_dma_mem(&bgep->rx_desc[split]);
3027 for (split = 0; split < BGE_SPLIT; ++split)
3028 bge_free_dma_mem(&bgep->tx_buff[split]);
3029 for (split = 0; split < BGE_SPLIT; ++split)
3030 bge_free_dma_mem(&bgep->rx_buff[split]);
3031 }
3032
3033 /*
3034 * Determine (initial) MAC address ("BIA") to use for this interface
3035 */
3036
3037 static void
3038 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp)
3039 {
3040 struct ether_addr sysaddr;
3041 char propbuf[8]; /* "true" or "false", plus NUL */
3042 uchar_t *bytes;
3043 int *ints;
3044 uint_t nelts;
3045 int err;
3046
3047 BGE_TRACE(("bge_find_mac_address($%p)",
3048 (void *)bgep));
3049
3050 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)",
3051 cidp->hw_mac_addr,
3052 ether_sprintf((void *)cidp->vendor_addr.addr),
3053 cidp->vendor_addr.set ? "" : "not "));
3054
3055 /*
3056 * The "vendor's factory-set address" may already have
3057 * been extracted from the chip, but if the property
3058 * "local-mac-address" is set we use that instead. It
3059 * will normally be set by OBP, but it could also be
3060 * specified in a .conf file(!)
3061 *
3062 * There doesn't seem to be a way to define byte-array
3063 * properties in a .conf, so we check whether it looks
3064 * like an array of 6 ints instead.
3065 *
3066 * Then, we check whether it looks like an array of 6
3067 * bytes (which it should, if OBP set it). If we can't
3068 * make sense of it either way, we'll ignore it.
3069 */
3070 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
3071 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts);
3072 if (err == DDI_PROP_SUCCESS) {
3073 if (nelts == ETHERADDRL) {
3074 while (nelts--)
3075 cidp->vendor_addr.addr[nelts] = ints[nelts];
3076 cidp->vendor_addr.set = B_TRUE;
3077 }
3078 ddi_prop_free(ints);
3079 }
3080
3081 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
3082 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts);
3083 if (err == DDI_PROP_SUCCESS) {
3084 if (nelts == ETHERADDRL) {
3085 while (nelts--)
3086 cidp->vendor_addr.addr[nelts] = bytes[nelts];
3087 cidp->vendor_addr.set = B_TRUE;
3088 }
3089 ddi_prop_free(bytes);
3090 }
3091
3092 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)",
3093 ether_sprintf((void *)cidp->vendor_addr.addr),
3094 cidp->vendor_addr.set ? "" : "not "));
3095
3096 /*
3097 * Look up the OBP property "local-mac-address?". Note that even
3098 * though its value is a string (which should be "true" or "false"),
3099 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero
3100 * the buffer first and then fetch the property as an untyped array;
3101 * this may or may not include a final NUL, but since there will
3102 * always be one left at the end of the buffer we can now treat it
3103 * as a string anyway.
3104 */
3105 nelts = sizeof (propbuf);
3106 bzero(propbuf, nelts--);
3107 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo,
3108 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts);
3109
3110 /*
3111 * Now, if the address still isn't set from the hardware (SEEPROM)
3112 * or the OBP or .conf property, OR if the user has foolishly set
3113 * 'local-mac-address? = false', use "the system address" instead
3114 * (but only if it's non-null i.e. has been set from the IDPROM).
3115 */
3116 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0)
3117 if (localetheraddr(NULL, &sysaddr) != 0) {
3118 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr);
3119 cidp->vendor_addr.set = B_TRUE;
3120 }
3121
3122 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)",
3123 ether_sprintf((void *)cidp->vendor_addr.addr),
3124 cidp->vendor_addr.set ? "" : "not "));
3125
3126 /*
3127 * Finally(!), if there's a valid "mac-address" property (created
3128 * if we netbooted from this interface), we must use this instead
3129 * of any of the above to ensure that the NFS/install server doesn't
3130 * get confused by the address changing as Solaris takes over!
3131 */
3132 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
3133 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts);
3134 if (err == DDI_PROP_SUCCESS) {
3135 if (nelts == ETHERADDRL) {
3136 while (nelts--)
3137 cidp->vendor_addr.addr[nelts] = bytes[nelts];
3138 cidp->vendor_addr.set = B_TRUE;
3139 }
3140 ddi_prop_free(bytes);
3141 }
3142
3143 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)",
3144 ether_sprintf((void *)cidp->vendor_addr.addr),
3145 cidp->vendor_addr.set ? "" : "not "));
3146 }
3147
3148 /*ARGSUSED*/
3149 int
3150 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle)
3151 {
3152 ddi_fm_error_t de;
3153
3154 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
3155 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
3156 return (de.fme_status);
3157 }
3158
3159 /*ARGSUSED*/
3160 int
3161 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle)
3162 {
3163 ddi_fm_error_t de;
3164
3165 ASSERT(bgep->progress & PROGRESS_BUFS);
3166 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
3167 return (de.fme_status);
3168 }
3169
3170 /*
3171 * The IO fault service error handling callback function
3172 */
3173 /*ARGSUSED*/
3174 static int
3175 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
3176 {
3177 /*
3178 * as the driver can always deal with an error in any dma or
3179 * access handle, we can just return the fme_status value.
3180 */
3181 pci_ereport_post(dip, err, NULL);
3182 return (err->fme_status);
3183 }
3184
3185 static void
3186 bge_fm_init(bge_t *bgep)
3187 {
3188 ddi_iblock_cookie_t iblk;
3189
3190 /* Only register with IO Fault Services if we have some capability */
3191 if (bgep->fm_capabilities) {
3192 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
3193 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
3194
3195 /* Register capabilities with IO Fault Services */
3196 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk);
3197
3198 /*
3199 * Initialize pci ereport capabilities if ereport capable
3200 */
3201 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
3202 DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3203 pci_ereport_setup(bgep->devinfo);
3204
3205 /*
3206 * Register error callback if error callback capable
3207 */
3208 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3209 ddi_fm_handler_register(bgep->devinfo,
3210 bge_fm_error_cb, (void*) bgep);
3211 } else {
3212 /*
3213 * These fields have to be cleared of FMA if there are no
3214 * FMA capabilities at runtime.
3215 */
3216 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
3217 dma_attr.dma_attr_flags = 0;
3218 }
3219 }
3220
3221 static void
3222 bge_fm_fini(bge_t *bgep)
3223 {
3224 /* Only unregister FMA capabilities if we registered some */
3225 if (bgep->fm_capabilities) {
3226
3227 /*
3228 * Release any resources allocated by pci_ereport_setup()
3229 */
3230 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
3231 DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3232 pci_ereport_teardown(bgep->devinfo);
3233
3234 /*
3235 * Un-register error callback if error callback capable
3236 */
3237 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
3238 ddi_fm_handler_unregister(bgep->devinfo);
3239
3240 /* Unregister from IO Fault Services */
3241 ddi_fm_fini(bgep->devinfo);
3242 }
3243 }
3244
3245 static void
3246 #ifdef BGE_IPMI_ASF
3247 bge_unattach(bge_t *bgep, uint_t asf_mode)
3248 #else
3249 bge_unattach(bge_t *bgep)
3250 #endif
3251 {
3252 BGE_TRACE(("bge_unattach($%p)",
3253 (void *)bgep));
3254
3255 /*
3256 * Flag that no more activity may be initiated
3257 */
3258 bgep->progress &= ~PROGRESS_READY;
3259
3260 /*
3261 * Quiesce the PHY and MAC (leave it reset but still powered).
3262 * Clean up and free all BGE data structures
3263 */
3264 if (bgep->periodic_id != NULL) {
3265 ddi_periodic_delete(bgep->periodic_id);
3266 bgep->periodic_id = NULL;
3267 }
3268
3269 if (bgep->progress & PROGRESS_KSTATS)
3270 bge_fini_kstats(bgep);
3271 if (bgep->progress & PROGRESS_PHY)
3272 bge_phys_reset(bgep);
3273 if (bgep->progress & PROGRESS_HWINT) {
3274 mutex_enter(bgep->genlock);
3275 #ifdef BGE_IPMI_ASF
3276 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS)
3277 #else
3278 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS)
3279 #endif
3280 ddi_fm_service_impact(bgep->devinfo,
3281 DDI_SERVICE_UNAFFECTED);
3282 #ifdef BGE_IPMI_ASF
3283 if (bgep->asf_enabled) {
3284 /*
3285 * This register has been overlaid. We restore its
3286 * initial value here.
3287 */
3288 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR,
3289 BGE_NIC_DATA_SIG);
3290 }
3291 #endif
3292 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
3293 ddi_fm_service_impact(bgep->devinfo,
3294 DDI_SERVICE_UNAFFECTED);
3295 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
3296 ddi_fm_service_impact(bgep->devinfo,
3297 DDI_SERVICE_UNAFFECTED);
3298 mutex_exit(bgep->genlock);
3299 }
3300 if (bgep->progress & PROGRESS_INTR) {
3301 bge_intr_disable(bgep);
3302 bge_fini_rings(bgep);
3303 }
3304 if (bgep->progress & PROGRESS_HWINT) {
3305 bge_rem_intrs(bgep);
3306 rw_destroy(bgep->errlock);
3307 mutex_destroy(bgep->softintrlock);
3308 mutex_destroy(bgep->genlock);
3309 }
3310 if (bgep->progress & PROGRESS_FACTOTUM)
3311 ddi_remove_softintr(bgep->factotum_id);
3312 if (bgep->progress & PROGRESS_RESCHED)
3313 ddi_remove_softintr(bgep->drain_id);
3314 if (bgep->progress & PROGRESS_BUFS)
3315 bge_free_bufs(bgep);
3316 if (bgep->progress & PROGRESS_REGS) {
3317 ddi_regs_map_free(&bgep->io_handle);
3318 if (bgep->ape_enabled)
3319 ddi_regs_map_free(&bgep->ape_handle);
3320 }
3321 if (bgep->progress & PROGRESS_CFG)
3322 pci_config_teardown(&bgep->cfg_handle);
3323
3324 bge_fm_fini(bgep);
3325
3326 ddi_remove_minor_node(bgep->devinfo, NULL);
3327 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t));
3328 kmem_free(bgep, sizeof (*bgep));
3329 }
3330
3331 static int
3332 bge_resume(dev_info_t *devinfo)
3333 {
3334 bge_t *bgep; /* Our private data */
3335 chip_id_t *cidp;
3336 chip_id_t chipid;
3337
3338 bgep = ddi_get_driver_private(devinfo);
3339 if (bgep == NULL)
3340 return (DDI_FAILURE);
3341
3342 /*
3343 * Refuse to resume if the data structures aren't consistent
3344 */
3345 if (bgep->devinfo != devinfo)
3346 return (DDI_FAILURE);
3347
3348 #ifdef BGE_IPMI_ASF
3349 /*
3350 * Power management hasn't been supported in BGE now. If you
3351 * want to implement it, please add the ASF/IPMI related
3352 * code here.
3353 */
3354
3355 #endif
3356
3357 /*
3358 * Read chip ID & set up config space command register(s)
3359 * Refuse to resume if the chip has changed its identity!
3360 */
3361 cidp = &bgep->chipid;
3362 mutex_enter(bgep->genlock);
3363 bge_chip_cfg_init(bgep, &chipid, B_FALSE);
3364 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3365 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3366 mutex_exit(bgep->genlock);
3367 return (DDI_FAILURE);
3368 }
3369 mutex_exit(bgep->genlock);
3370 if (chipid.vendor != cidp->vendor)
3371 return (DDI_FAILURE);
3372 if (chipid.device != cidp->device)
3373 return (DDI_FAILURE);
3374 if (chipid.revision != cidp->revision)
3375 return (DDI_FAILURE);
3376 if (chipid.asic_rev != cidp->asic_rev)
3377 return (DDI_FAILURE);
3378
3379 /*
3380 * All OK, reinitialise h/w & kick off GLD scheduling
3381 */
3382 mutex_enter(bgep->genlock);
3383 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) {
3384 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
3385 (void) bge_check_acc_handle(bgep, bgep->io_handle);
3386 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3387 mutex_exit(bgep->genlock);
3388 return (DDI_FAILURE);
3389 }
3390 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3391 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3392 mutex_exit(bgep->genlock);
3393 return (DDI_FAILURE);
3394 }
3395 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
3396 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3397 mutex_exit(bgep->genlock);
3398 return (DDI_FAILURE);
3399 }
3400 mutex_exit(bgep->genlock);
3401 return (DDI_SUCCESS);
3402 }
3403
3404 static int
3405 bge_fw_img_is_valid(bge_t *bgep, uint32_t offset)
3406 {
3407 uint32_t val;
3408
3409 if (bge_nvmem_read32(bgep, offset, &val) ||
3410 (val & 0xfc000000) != 0x0c000000 ||
3411 bge_nvmem_read32(bgep, offset + 4, &val) ||
3412 val != 0)
3413 return (0);
3414
3415 return (1);
3416 }
3417
3418 static void
3419 bge_read_mgmtfw_ver(bge_t *bgep)
3420 {
3421 uint32_t val;
3422 uint32_t offset;
3423 uint32_t start;
3424 int i, vlen;
3425
3426 for (offset = NVM_DIR_START;
3427 offset < NVM_DIR_END;
3428 offset += NVM_DIRENT_SIZE) {
3429 if (bge_nvmem_read32(bgep, offset, &val))
3430 return;
3431
3432 if ((val >> NVM_DIRTYPE_SHIFT) == NVM_DIRTYPE_ASFINI)
3433 break;
3434 }
3435
3436 if (offset == NVM_DIR_END)
3437 return;
3438
3439 if (bge_nvmem_read32(bgep, offset - 4, &start))
3440 return;
3441
3442 if (bge_nvmem_read32(bgep, offset + 4, &offset) ||
3443 !bge_fw_img_is_valid(bgep, offset) ||
3444 bge_nvmem_read32(bgep, offset + 8, &val))
3445 return;
3446
3447 offset += val - start;
3448
3449 vlen = strlen(bgep->fw_version);
3450
3451 bgep->fw_version[vlen++] = ',';
3452 bgep->fw_version[vlen++] = ' ';
3453
3454 for (i = 0; i < 4; i++) {
3455 uint32_t v;
3456
3457 if (bge_nvmem_read32(bgep, offset, &v))
3458 return;
3459
3460 v = BE_32(v);
3461
3462 offset += sizeof(v);
3463
3464 if (vlen > BGE_FW_VER_SIZE - sizeof(v)) {
3465 memcpy(&bgep->fw_version[vlen], &v, BGE_FW_VER_SIZE - vlen);
3466 break;
3467 }
3468
3469 memcpy(&bgep->fw_version[vlen], &v, sizeof(v));
3470 vlen += sizeof(v);
3471 }
3472 }
3473
3474 static void
3475 bge_read_dash_ver(bge_t *bgep)
3476 {
3477 int vlen;
3478 uint32_t apedata;
3479 char *fwtype;
3480
3481 if (!bgep->ape_enabled || !bgep->asf_enabled)
3482 return;
3483
3484 apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG);
3485 if (apedata != APE_SEG_SIG_MAGIC)
3486 return;
3487
3488 apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
3489 if (!(apedata & APE_FW_STATUS_READY))
3490 return;
3491
3492 apedata = bge_ape_get32(bgep, BGE_APE_FW_VERSION);
3493
3494 if (bge_ape_get32(bgep, BGE_APE_FW_FEATURES) &
3495 BGE_APE_FW_FEATURE_NCSI) {
3496 bgep->ape_has_ncsi = B_TRUE;
3497 fwtype = "NCSI";
3498 } else if ((bgep->chipid.device == DEVICE_ID_5725) ||
3499 (bgep->chipid.device == DEVICE_ID_5727)) {
3500 fwtype = "SMASH";
3501 } else {
3502 fwtype = "DASH";
3503 }
3504
3505 vlen = strlen(bgep->fw_version);
3506
3507 snprintf(&bgep->fw_version[vlen], BGE_FW_VER_SIZE - vlen,
3508 " %s v%d.%d.%d.%d", fwtype,
3509 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
3510 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
3511 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
3512 (apedata & APE_FW_VERSION_BLDMSK));
3513 }
3514
3515 static void
3516 bge_read_bc_ver(bge_t *bgep)
3517 {
3518 uint32_t val;
3519 uint32_t offset;
3520 uint32_t start;
3521 uint32_t ver_offset;
3522 int i, dst_off;
3523 uint32_t major;
3524 uint32_t minor;
3525 boolean_t newver = B_FALSE;
3526
3527 if (bge_nvmem_read32(bgep, 0xc, &offset) ||
3528 bge_nvmem_read32(bgep, 0x4, &start))
3529 return;
3530
3531 if (bge_nvmem_read32(bgep, offset, &val))
3532 return;
3533
3534 if ((val & 0xfc000000) == 0x0c000000) {
3535 if (bge_nvmem_read32(bgep, offset + 4, &val))
3536 return;
3537
3538 if (val == 0)
3539 newver = B_TRUE;
3540 }
3541
3542 dst_off = strlen(bgep->fw_version);
3543
3544 if (newver) {
3545 if (((BGE_FW_VER_SIZE - dst_off) < 16) ||
3546 bge_nvmem_read32(bgep, offset + 8, &ver_offset))
3547 return;
3548
3549 offset = offset + ver_offset - start;
3550 for (i = 0; i < 16; i += 4) {
3551 if (bge_nvmem_read32(bgep, offset + i, &val))
3552 return;
3553 val = BE_32(val);
3554 memcpy(bgep->fw_version + dst_off + i, &val,
3555 sizeof(val));
3556 }
3557 } else {
3558 if (bge_nvmem_read32(bgep, NVM_PTREV_BCVER, &ver_offset))
3559 return;
3560
3561 major = (ver_offset & NVM_BCVER_MAJMSK) >> NVM_BCVER_MAJSFT;
3562 minor = ver_offset & NVM_BCVER_MINMSK;
3563 snprintf(&bgep->fw_version[dst_off], BGE_FW_VER_SIZE - dst_off,
3564 "v%d.%02d", major, minor);
3565 }
3566 }
3567
3568 static void
3569 bge_read_fw_ver(bge_t *bgep)
3570 {
3571 uint32_t val;
3572 uint32_t magic;
3573
3574 *bgep->fw_version = 0;
3575
3576 if ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) ||
3577 (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) {
3578 snprintf(bgep->fw_version, sizeof(bgep->fw_version), "sb");
3579 return;
3580 }
3581
3582 mutex_enter(bgep->genlock);
3583
3584 bge_nvmem_read32(bgep, 0, &magic);
3585
3586 if (magic == EEPROM_MAGIC) {
3587 bge_read_bc_ver(bgep);
3588 } else {
3589 /* ignore other configs for now */
3590 mutex_exit(bgep->genlock);
3591 return;
3592 }
3593
3594 if (bgep->ape_enabled) {
3595 if (bgep->asf_enabled) {
3596 bge_read_dash_ver(bgep);
3597 }
3598 } else if (bgep->asf_enabled) {
3599 bge_read_mgmtfw_ver(bgep);
3600 }
3601
3602 mutex_exit(bgep->genlock);
3603
3604 bgep->fw_version[BGE_FW_VER_SIZE - 1] = 0; /* safety */
3605 }
3606
3607 /*
3608 * attach(9E) -- Attach a device to the system
3609 *
3610 * Called once for each board successfully probed.
3611 */
3612 static int
3613 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3614 {
3615 bge_t *bgep; /* Our private data */
3616 mac_register_t *macp;
3617 chip_id_t *cidp;
3618 caddr_t regs;
3619 int instance;
3620 int err;
3621 int intr_types;
3622 int *props = NULL;
3623 uint_t numProps;
3624 uint32_t regval;
3625 uint32_t pci_state_reg;
3626 #ifdef BGE_IPMI_ASF
3627 uint32_t mhcrValue;
3628 #ifdef __sparc
3629 uint16_t value16;
3630 #endif
3631 #ifdef BGE_NETCONSOLE
3632 int retval;
3633 #endif
3634 #endif
3635
3636 instance = ddi_get_instance(devinfo);
3637
3638 BGE_GTRACE(("bge_attach($%p, %d) instance %d",
3639 (void *)devinfo, cmd, instance));
3640 BGE_BRKPT(NULL, "bge_attach");
3641
3642 switch (cmd) {
3643 default:
3644 return (DDI_FAILURE);
3645
3646 case DDI_RESUME:
3647 return (bge_resume(devinfo));
3648
3649 case DDI_ATTACH:
3650 break;
3651 }
3652
3653 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP);
3654 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP);
3655 ddi_set_driver_private(devinfo, bgep);
3656 bgep->bge_guard = BGE_GUARD;
3657 bgep->devinfo = devinfo;
3658 bgep->param_drain_max = 64;
3659 bgep->param_msi_cnt = 0;
3660 bgep->param_loop_mode = 0;
3661
3662 /*
3663 * Initialize more fields in BGE private data
3664 */
3665 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3666 DDI_PROP_DONTPASS, debug_propname, bge_debug);
3667 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d",
3668 BGE_DRIVER_NAME, instance);
3669
3670 /*
3671 * Initialize for fma support
3672 */
3673 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3674 DDI_PROP_DONTPASS, fm_cap,
3675 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
3676 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
3677 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities));
3678 bge_fm_init(bgep);
3679
3680 /*
3681 * Look up the IOMMU's page size for DVMA mappings (must be
3682 * a power of 2) and convert to a mask. This can be used to
3683 * determine whether a message buffer crosses a page boundary.
3684 * Note: in 2s complement binary notation, if X is a power of
3685 * 2, then -X has the representation "11...1100...00".
3686 */
3687 bgep->pagemask = dvma_pagesize(devinfo);
3688 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask));
3689 bgep->pagemask = -bgep->pagemask;
3690
3691 /*
3692 * Map config space registers
3693 * Read chip ID & set up config space command register(s)
3694 *
3695 * Note: this leaves the chip accessible by Memory Space
3696 * accesses, but with interrupts and Bus Mastering off.
3697 * This should ensure that nothing untoward will happen
3698 * if it has been left active by the (net-)bootloader.
3699 * We'll re-enable Bus Mastering once we've reset the chip,
3700 * and allow interrupts only when everything else is set up.
3701 */
3702 err = pci_config_setup(devinfo, &bgep->cfg_handle);
3703
3704 bgep->ape_enabled = B_FALSE;
3705 bgep->ape_regs = NULL;
3706
3707 cidp = &bgep->chipid;
3708 cidp->device = pci_config_get16(bgep->cfg_handle, PCI_CONF_DEVID);
3709 if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3710 DEVICE_5725_SERIES_CHIPSETS(bgep)) {
3711 err = ddi_regs_map_setup(devinfo, BGE_PCI_APEREGS_RNUMBER,
3712 ®s, 0, 0, &bge_reg_accattr, &bgep->ape_handle);
3713 if (err != DDI_SUCCESS) {
3714 ddi_regs_map_free(&bgep->io_handle);
3715 bge_problem(bgep, "ddi_regs_map_setup() failed");
3716 goto attach_fail;
3717 }
3718 bgep->ape_regs = regs;
3719 bgep->ape_enabled = B_TRUE;
3720
3721 /*
3722 * Allow reads and writes to the
3723 * APE register and memory space.
3724 */
3725
3726 pci_state_reg = pci_config_get32(bgep->cfg_handle,
3727 PCI_CONF_BGE_PCISTATE);
3728 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
3729 PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR;
3730 pci_config_put32(bgep->cfg_handle,
3731 PCI_CONF_BGE_PCISTATE, pci_state_reg);
3732 bge_ape_lock_init(bgep);
3733 }
3734
3735 #ifdef BGE_IPMI_ASF
3736 #ifdef __sparc
3737 /*
3738 * We need to determine the type of chipset for accessing some configure
3739 * registers. (This information will be used by bge_ind_put32,
3740 * bge_ind_get32 and bge_nic_read32)
3741 */
3742 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM);
3743 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME);
3744 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16);
3745 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3746 MHCR_ENABLE_TAGGED_STATUS_MODE |
3747 MHCR_MASK_INTERRUPT_MODE |
3748 MHCR_MASK_PCI_INT_OUTPUT |
3749 MHCR_CLEAR_INTERRUPT_INTA |
3750 MHCR_ENABLE_ENDIAN_WORD_SWAP |
3751 MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3752 /*
3753 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
3754 * has been set in PCI_CONF_COMM already, we need to write the
3755 * byte-swapped value to it. So we just write zero first for simplicity.
3756 */
3757 if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3758 DEVICE_5725_SERIES_CHIPSETS(bgep) ||
3759 DEVICE_57765_SERIES_CHIPSETS(bgep))
3760 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3761 #else
3762 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3763 MHCR_ENABLE_TAGGED_STATUS_MODE |
3764 MHCR_MASK_INTERRUPT_MODE |
3765 MHCR_MASK_PCI_INT_OUTPUT |
3766 MHCR_CLEAR_INTERRUPT_INTA;
3767 #endif
3768 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue);
3769 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG,
3770 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3771 MEMORY_ARBITER_ENABLE);
3772 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) {
3773 bgep->asf_wordswapped = B_TRUE;
3774 } else {
3775 bgep->asf_wordswapped = B_FALSE;
3776 }
3777 bge_asf_get_config(bgep);
3778 #endif
3779 if (err != DDI_SUCCESS) {
3780 bge_problem(bgep, "pci_config_setup() failed");
3781 goto attach_fail;
3782 }
3783 bgep->progress |= PROGRESS_CFG;
3784 bge_chip_cfg_init(bgep, cidp, B_FALSE);
3785 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3786 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3787 goto attach_fail;
3788 }
3789
3790 #ifdef BGE_IPMI_ASF
3791 if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
3792 DEVICE_5714_SERIES_CHIPSETS(bgep)) {
3793 bgep->asf_newhandshake = B_TRUE;
3794 } else {
3795 bgep->asf_newhandshake = B_FALSE;
3796 }
3797 #endif
3798
3799 /*
3800 * Update those parts of the chip ID derived from volatile
3801 * registers with the values seen by OBP (in case the chip
3802 * has been reset externally and therefore lost them).
3803 */
3804 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3805 DDI_PROP_DONTPASS, subven_propname, cidp->subven);
3806 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3807 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev);
3808 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3809 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize);
3810 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3811 DDI_PROP_DONTPASS, latency_propname, cidp->latency);
3812 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3813 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings);
3814 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3815 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings);
3816 cidp->eee = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3817 DDI_PROP_DONTPASS, eee_propname, cidp->eee);
3818
3819 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3820 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU);
3821 if ((cidp->default_mtu < BGE_DEFAULT_MTU) ||
3822 (cidp->default_mtu > BGE_MAXIMUM_MTU)) {
3823 cidp->default_mtu = BGE_DEFAULT_MTU;
3824 }
3825
3826 /*
3827 * Map operating registers
3828 */
3829 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER,
3830 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle);
3831 if (err != DDI_SUCCESS) {
3832 bge_problem(bgep, "ddi_regs_map_setup() failed");
3833 goto attach_fail;
3834 }
3835 bgep->io_regs = regs;
3836
3837 bgep->progress |= PROGRESS_REGS;
3838
3839 /*
3840 * Characterise the device, so we know its requirements.
3841 * Then allocate the appropriate TX and RX descriptors & buffers.
3842 */
3843 if (bge_chip_id_init(bgep) == EIO) {
3844 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3845 goto attach_fail;
3846 }
3847
3848 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
3849 0, "reg", &props, &numProps);
3850 if ((err == DDI_PROP_SUCCESS) && (numProps > 0)) {
3851 bgep->pci_bus = PCI_REG_BUS_G(props[0]);
3852 bgep->pci_dev = PCI_REG_DEV_G(props[0]);
3853 bgep->pci_func = PCI_REG_FUNC_G(props[0]);
3854 ddi_prop_free(props);
3855 }
3856
3857 if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3858 DEVICE_5725_SERIES_CHIPSETS(bgep)) {
3859 regval = bge_reg_get32(bgep, CPMU_STATUS_REG);
3860 if ((bgep->chipid.device == DEVICE_ID_5719) ||
3861 (bgep->chipid.device == DEVICE_ID_5720)) {
3862 bgep->pci_func =
3863 ((regval & CPMU_STATUS_FUNC_NUM_5719) >>
3864 CPMU_STATUS_FUNC_NUM_5719_SHIFT);
3865 } else {
3866 bgep->pci_func = ((regval & CPMU_STATUS_FUNC_NUM) >>
3867 CPMU_STATUS_FUNC_NUM_SHIFT);
3868 }
3869 }
3870
3871 err = bge_alloc_bufs(bgep);
3872 if (err != DDI_SUCCESS) {
3873 bge_problem(bgep, "DMA buffer allocation failed");
3874 goto attach_fail;
3875 }
3876 bgep->progress |= PROGRESS_BUFS;
3877
3878 /*
3879 * Add the softint handlers:
3880 *
3881 * Both of these handlers are used to avoid restrictions on the
3882 * context and/or mutexes required for some operations. In
3883 * particular, the hardware interrupt handler and its subfunctions
3884 * can detect a number of conditions that we don't want to handle
3885 * in that context or with that set of mutexes held. So, these
3886 * softints are triggered instead:
3887 *
3888 * the <resched> softint is triggered if we have previously
3889 * had to refuse to send a packet because of resource shortage
3890 * (we've run out of transmit buffers), but the send completion
3891 * interrupt handler has now detected that more buffers have
3892 * become available.
3893 *
3894 * the <factotum> is triggered if the h/w interrupt handler
3895 * sees the <link state changed> or <error> bits in the status
3896 * block. It's also triggered periodically to poll the link
3897 * state, just in case we aren't getting link status change
3898 * interrupts ...
3899 */
3900 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id,
3901 NULL, NULL, bge_send_drain, (caddr_t)bgep);
3902 if (err != DDI_SUCCESS) {
3903 bge_problem(bgep, "ddi_add_softintr() failed");
3904 goto attach_fail;
3905 }
3906 bgep->progress |= PROGRESS_RESCHED;
3907 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id,
3908 NULL, NULL, bge_chip_factotum, (caddr_t)bgep);
3909 if (err != DDI_SUCCESS) {
3910 bge_problem(bgep, "ddi_add_softintr() failed");
3911 goto attach_fail;
3912 }
3913 bgep->progress |= PROGRESS_FACTOTUM;
3914
3915 /* Get supported interrupt types */
3916 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) {
3917 bge_error(bgep, "ddi_intr_get_supported_types failed\n");
3918
3919 goto attach_fail;
3920 }
3921
3922 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x",
3923 bgep->ifname, intr_types));
3924
3925 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) {
3926 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
3927 bge_error(bgep, "MSI registration failed, "
3928 "trying FIXED interrupt type\n");
3929 } else {
3930 BGE_DEBUG(("%s: Using MSI interrupt type",
3931 bgep->ifname));
3932 bgep->intr_type = DDI_INTR_TYPE_MSI;
3933 bgep->progress |= PROGRESS_HWINT;
3934 }
3935 }
3936
3937 if (!(bgep->progress & PROGRESS_HWINT) &&
3938 (intr_types & DDI_INTR_TYPE_FIXED)) {
3939 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
3940 bge_error(bgep, "FIXED interrupt "
3941 "registration failed\n");
3942 goto attach_fail;
3943 }
3944
3945 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname));
3946
3947 bgep->intr_type = DDI_INTR_TYPE_FIXED;
3948 bgep->progress |= PROGRESS_HWINT;
3949 }
3950
3951 if (!(bgep->progress & PROGRESS_HWINT)) {
3952 bge_error(bgep, "No interrupts registered\n");
3953 goto attach_fail;
3954 }
3955
3956 /*
3957 * Note that interrupts are not enabled yet as
3958 * mutex locks are not initialized. Initialize mutex locks.
3959 */
3960 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER,
3961 DDI_INTR_PRI(bgep->intr_pri));
3962 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER,
3963 DDI_INTR_PRI(bgep->intr_pri));
3964 rw_init(bgep->errlock, NULL, RW_DRIVER,
3965 DDI_INTR_PRI(bgep->intr_pri));
3966
3967 /*
3968 * Initialize rings.
3969 */
3970 bge_init_rings(bgep);
3971
3972 /*
3973 * Now that mutex locks are initialized, enable interrupts.
3974 */
3975 bge_intr_enable(bgep);
3976 bgep->progress |= PROGRESS_INTR;
3977
3978 /*
3979 * Initialise link state variables
3980 * Stop, reset & reinitialise the chip.
3981 * Initialise the (internal) PHY.
3982 */
3983 bgep->link_state = LINK_STATE_UNKNOWN;
3984
3985 mutex_enter(bgep->genlock);
3986
3987 /*
3988 * Reset chip & rings to initial state; also reset address
3989 * filtering, promiscuity, loopback mode.
3990 */
3991 #ifdef BGE_IPMI_ASF
3992 #ifdef BGE_NETCONSOLE
3993 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
3994 #else
3995 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) {
3996 #endif
3997 #else
3998 if (bge_reset(bgep) != DDI_SUCCESS) {
3999 #endif
4000 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4001 (void) bge_check_acc_handle(bgep, bgep->io_handle);
4002 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4003 mutex_exit(bgep->genlock);
4004 goto attach_fail;
4005 }
4006
4007 #ifdef BGE_IPMI_ASF
4008 if (bgep->asf_enabled) {
4009 bgep->asf_status = ASF_STAT_RUN_INIT;
4010 }
4011 #endif
4012
4013 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash));
4014 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs));
4015 bgep->promisc = B_FALSE;
4016 bgep->param_loop_mode = BGE_LOOP_NONE;
4017 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
4018 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4019 mutex_exit(bgep->genlock);
4020 goto attach_fail;
4021 }
4022 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
4023 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4024 mutex_exit(bgep->genlock);
4025 goto attach_fail;
4026 }
4027
4028 mutex_exit(bgep->genlock);
4029
4030 if (bge_phys_init(bgep) == EIO) {
4031 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
4032 goto attach_fail;
4033 }
4034 bgep->progress |= PROGRESS_PHY;
4035
4036 /*
4037 * initialize NDD-tweakable parameters
4038 */
4039 if (bge_nd_init(bgep)) {
4040 bge_problem(bgep, "bge_nd_init() failed");
4041 goto attach_fail;
4042 }
4043 bgep->progress |= PROGRESS_NDD;
4044
4045 /*
4046 * Create & initialise named kstats
4047 */
4048 bge_init_kstats(bgep, instance);
4049 bgep->progress |= PROGRESS_KSTATS;
4050
4051 /*
4052 * Determine whether to override the chip's own MAC address
4053 */
4054 bge_find_mac_address(bgep, cidp);
4055
4056 bge_read_fw_ver(bgep);
4057
4058 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX;
4059 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX;
4060
4061 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4062 goto attach_fail;
4063 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4064 macp->m_driver = bgep;
4065 macp->m_dip = devinfo;
4066 macp->m_src_addr = cidp->vendor_addr.addr;
4067 macp->m_callbacks = &bge_m_callbacks;
4068 macp->m_min_sdu = 0;
4069 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header);
4070 macp->m_margin = VLAN_TAGSZ;
4071 macp->m_priv_props = bge_priv_prop;
4072 macp->m_v12n = MAC_VIRT_LEVEL1;
4073
4074 /*
4075 * Finally, we're ready to register ourselves with the MAC layer
4076 * interface; if this succeeds, we're all ready to start()
4077 */
4078 err = mac_register(macp, &bgep->mh);
4079 mac_free(macp);
4080 if (err != 0)
4081 goto attach_fail;
4082
4083 mac_link_update(bgep->mh, LINK_STATE_UNKNOWN);
4084
4085 /*
4086 * Register a periodical handler.
4087 * bge_chip_cyclic() is invoked in kernel context.
4088 */
4089 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep,
4090 BGE_CYCLIC_PERIOD, DDI_IPL_0);
4091
4092 bgep->progress |= PROGRESS_READY;
4093 ASSERT(bgep->bge_guard == BGE_GUARD);
4094 #ifdef BGE_IPMI_ASF
4095 #ifdef BGE_NETCONSOLE
4096 if (bgep->asf_enabled) {
4097 mutex_enter(bgep->genlock);
4098 retval = bge_chip_start(bgep, B_TRUE);
4099 mutex_exit(bgep->genlock);
4100 if (retval != DDI_SUCCESS)
4101 goto attach_fail;
4102 }
4103 #endif
4104 #endif
4105
4106 ddi_report_dev(devinfo);
4107
4108 return (DDI_SUCCESS);
4109
4110 attach_fail:
4111 #ifdef BGE_IPMI_ASF
4112 bge_unattach(bgep, ASF_MODE_SHUTDOWN);
4113 #else
4114 bge_unattach(bgep);
4115 #endif
4116 return (DDI_FAILURE);
4117 }
4118
4119 /*
4120 * bge_suspend() -- suspend transmit/receive for powerdown
4121 */
4122 static int
4123 bge_suspend(bge_t *bgep)
4124 {
4125 /*
4126 * Stop processing and idle (powerdown) the PHY ...
4127 */
4128 mutex_enter(bgep->genlock);
4129 #ifdef BGE_IPMI_ASF
4130 /*
4131 * Power management hasn't been supported in BGE now. If you
4132 * want to implement it, please add the ASF/IPMI related
4133 * code here.
4134 */
4135 #endif
4136 bge_stop(bgep);
4137 if (bge_phys_idle(bgep) != DDI_SUCCESS) {
4138 (void) bge_check_acc_handle(bgep, bgep->io_handle);
4139 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4140 mutex_exit(bgep->genlock);
4141 return (DDI_FAILURE);
4142 }
4143 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
4144 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4145 mutex_exit(bgep->genlock);
4146 return (DDI_FAILURE);
4147 }
4148 mutex_exit(bgep->genlock);
4149
4150 return (DDI_SUCCESS);
4151 }
4152
4153 /*
4154 * quiesce(9E) entry point.
4155 *
4156 * This function is called when the system is single-threaded at high
4157 * PIL with preemption disabled. Therefore, this function must not be
4158 * blocked.
4159 *
4160 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4161 * DDI_FAILURE indicates an error condition and should almost never happen.
4162 */
4163 #ifdef __sparc
4164 #define bge_quiesce ddi_quiesce_not_supported
4165 #else
4166 static int
4167 bge_quiesce(dev_info_t *devinfo)
4168 {
4169 bge_t *bgep = ddi_get_driver_private(devinfo);
4170
4171 if (bgep == NULL)
4172 return (DDI_FAILURE);
4173
4174 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4175 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
4176 MHCR_MASK_PCI_INT_OUTPUT);
4177 } else {
4178 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE);
4179 }
4180
4181 /* Stop the chip */
4182 bge_chip_stop_nonblocking(bgep);
4183
4184 return (DDI_SUCCESS);
4185 }
4186 #endif
4187
4188 /*
4189 * detach(9E) -- Detach a device from the system
4190 */
4191 static int
4192 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
4193 {
4194 bge_t *bgep;
4195 #ifdef BGE_IPMI_ASF
4196 uint_t asf_mode;
4197 asf_mode = ASF_MODE_NONE;
4198 #endif
4199
4200 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd));
4201
4202 bgep = ddi_get_driver_private(devinfo);
4203
4204 switch (cmd) {
4205 default:
4206 return (DDI_FAILURE);
4207
4208 case DDI_SUSPEND:
4209 return (bge_suspend(bgep));
4210
4211 case DDI_DETACH:
4212 break;
4213 }
4214
4215 #ifdef BGE_IPMI_ASF
4216 mutex_enter(bgep->genlock);
4217 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) ||
4218 (bgep->asf_status == ASF_STAT_RUN_INIT))) {
4219
4220 bge_asf_update_status(bgep);
4221 if (bgep->asf_status == ASF_STAT_RUN) {
4222 bge_asf_stop_timer(bgep);
4223 }
4224 bgep->asf_status = ASF_STAT_STOP;
4225
4226 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
4227
4228 if (bgep->asf_pseudostop) {
4229 bge_chip_stop(bgep, B_FALSE);
4230 bgep->bge_mac_state = BGE_MAC_STOPPED;
4231 bgep->asf_pseudostop = B_FALSE;
4232 }
4233
4234 asf_mode = ASF_MODE_POST_SHUTDOWN;
4235
4236 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
4237 ddi_fm_service_impact(bgep->devinfo,
4238 DDI_SERVICE_UNAFFECTED);
4239 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4240 ddi_fm_service_impact(bgep->devinfo,
4241 DDI_SERVICE_UNAFFECTED);
4242 }
4243 mutex_exit(bgep->genlock);
4244 #endif
4245
4246 /*
4247 * Unregister from the GLD subsystem. This can fail, in
4248 * particular if there are DLPI style-2 streams still open -
4249 * in which case we just return failure without shutting
4250 * down chip operations.
4251 */
4252 if (mac_unregister(bgep->mh) != 0)
4253 return (DDI_FAILURE);
4254
4255 /*
4256 * All activity stopped, so we can clean up & exit
4257 */
4258 #ifdef BGE_IPMI_ASF
4259 bge_unattach(bgep, asf_mode);
4260 #else
4261 bge_unattach(bgep);
4262 #endif
4263 return (DDI_SUCCESS);
4264 }
4265
4266
4267 /*
4268 * ========== Module Loading Data & Entry Points ==========
4269 */
4270
4271 #undef BGE_DBG
4272 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */
4273
4274 DDI_DEFINE_STREAM_OPS(bge_dev_ops,
4275 nulldev, /* identify */
4276 nulldev, /* probe */
4277 bge_attach, /* attach */
4278 bge_detach, /* detach */
4279 nodev, /* reset */
4280 NULL, /* cb_ops */
4281 D_MP, /* bus_ops */
4282 NULL, /* power */
4283 bge_quiesce /* quiesce */
4284 );
4285
4286 static struct modldrv bge_modldrv = {
4287 &mod_driverops, /* Type of module. This one is a driver */
4288 bge_ident, /* short description */
4289 &bge_dev_ops /* driver specific ops */
4290 };
4291
4292 static struct modlinkage modlinkage = {
4293 MODREV_1, (void *)&bge_modldrv, NULL
4294 };
4295
4296
4297 int
4298 _info(struct modinfo *modinfop)
4299 {
4300 return (mod_info(&modlinkage, modinfop));
4301 }
4302
4303 int
4304 _init(void)
4305 {
4306 int status;
4307
4308 mac_init_ops(&bge_dev_ops, "bge");
4309 status = mod_install(&modlinkage);
4310 if (status == DDI_SUCCESS)
4311 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL);
4312 else
4313 mac_fini_ops(&bge_dev_ops);
4314 return (status);
4315 }
4316
4317 int
4318 _fini(void)
4319 {
4320 int status;
4321
4322 status = mod_remove(&modlinkage);
4323 if (status == DDI_SUCCESS) {
4324 mac_fini_ops(&bge_dev_ops);
4325 mutex_destroy(bge_log_mutex);
4326 }
4327 return (status);
4328 }
4329
4330
4331 /*
4332 * bge_add_intrs:
4333 *
4334 * Register FIXED or MSI interrupts.
4335 */
4336 static int
4337 bge_add_intrs(bge_t *bgep, int intr_type)
4338 {
4339 dev_info_t *dip = bgep->devinfo;
4340 int avail, actual, intr_size, count = 0;
4341 int i, flag, ret;
4342
4343 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type));
4344
4345 /* Get number of interrupts */
4346 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
4347 if ((ret != DDI_SUCCESS) || (count == 0)) {
4348 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, "
4349 "count: %d", ret, count);
4350
4351 return (DDI_FAILURE);
4352 }
4353
4354 /* Get number of available interrupts */
4355 ret = ddi_intr_get_navail(dip, intr_type, &avail);
4356 if ((ret != DDI_SUCCESS) || (avail == 0)) {
4357 bge_error(bgep, "ddi_intr_get_navail() failure, "
4358 "ret: %d, avail: %d\n", ret, avail);
4359
4360 return (DDI_FAILURE);
4361 }
4362
4363 if (avail < count) {
4364 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d",
4365 bgep->ifname, count, avail));
4366 }
4367
4368 /*
4369 * BGE hardware generates only single MSI even though it claims
4370 * to support multiple MSIs. So, hard code MSI count value to 1.
4371 */
4372 if (intr_type == DDI_INTR_TYPE_MSI) {
4373 count = 1;
4374 flag = DDI_INTR_ALLOC_STRICT;
4375 } else {
4376 flag = DDI_INTR_ALLOC_NORMAL;
4377 }
4378
4379 /* Allocate an array of interrupt handles */
4380 intr_size = count * sizeof (ddi_intr_handle_t);
4381 bgep->htable = kmem_alloc(intr_size, KM_SLEEP);
4382
4383 /* Call ddi_intr_alloc() */
4384 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0,
4385 count, &actual, flag);
4386
4387 if ((ret != DDI_SUCCESS) || (actual == 0)) {
4388 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret);
4389
4390 kmem_free(bgep->htable, intr_size);
4391 return (DDI_FAILURE);
4392 }
4393
4394 if (actual < count) {
4395 BGE_DEBUG(("%s: Requested: %d, Received: %d",
4396 bgep->ifname, count, actual));
4397 }
4398
4399 bgep->intr_cnt = actual;
4400
4401 /*
4402 * Get priority for first msi, assume remaining are all the same
4403 */
4404 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) !=
4405 DDI_SUCCESS) {
4406 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret);
4407
4408 /* Free already allocated intr */
4409 for (i = 0; i < actual; i++) {
4410 (void) ddi_intr_free(bgep->htable[i]);
4411 }
4412
4413 kmem_free(bgep->htable, intr_size);
4414 return (DDI_FAILURE);
4415 }
4416
4417 /* Call ddi_intr_add_handler() */
4418 for (i = 0; i < actual; i++) {
4419 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr,
4420 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
4421 bge_error(bgep, "ddi_intr_add_handler() "
4422 "failed %d\n", ret);
4423
4424 /* Free already allocated intr */
4425 for (i = 0; i < actual; i++) {
4426 (void) ddi_intr_free(bgep->htable[i]);
4427 }
4428
4429 kmem_free(bgep->htable, intr_size);
4430 return (DDI_FAILURE);
4431 }
4432 }
4433
4434 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap))
4435 != DDI_SUCCESS) {
4436 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret);
4437
4438 for (i = 0; i < actual; i++) {
4439 (void) ddi_intr_remove_handler(bgep->htable[i]);
4440 (void) ddi_intr_free(bgep->htable[i]);
4441 }
4442
4443 kmem_free(bgep->htable, intr_size);
4444 return (DDI_FAILURE);
4445 }
4446
4447 return (DDI_SUCCESS);
4448 }
4449
4450 /*
4451 * bge_rem_intrs:
4452 *
4453 * Unregister FIXED or MSI interrupts
4454 */
4455 static void
4456 bge_rem_intrs(bge_t *bgep)
4457 {
4458 int i;
4459
4460 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep));
4461
4462 /* Call ddi_intr_remove_handler() */
4463 for (i = 0; i < bgep->intr_cnt; i++) {
4464 (void) ddi_intr_remove_handler(bgep->htable[i]);
4465 (void) ddi_intr_free(bgep->htable[i]);
4466 }
4467
4468 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t));
4469 }
4470
4471
4472 void
4473 bge_intr_enable(bge_t *bgep)
4474 {
4475 int i;
4476
4477 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
4478 /* Call ddi_intr_block_enable() for MSI interrupts */
4479 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt);
4480 } else {
4481 /* Call ddi_intr_enable for MSI or FIXED interrupts */
4482 for (i = 0; i < bgep->intr_cnt; i++) {
4483 (void) ddi_intr_enable(bgep->htable[i]);
4484 }
4485 }
4486 }
4487
4488
4489 void
4490 bge_intr_disable(bge_t *bgep)
4491 {
4492 int i;
4493
4494 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
4495 /* Call ddi_intr_block_disable() */
4496 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt);
4497 } else {
4498 for (i = 0; i < bgep->intr_cnt; i++) {
4499 (void) ddi_intr_disable(bgep->htable[i]);
4500 }
4501 }
4502 }
4503
4504 int
4505 bge_reprogram(bge_t *bgep)
4506 {
4507 int status = 0;
4508
4509 ASSERT(mutex_owned(bgep->genlock));
4510
4511 if (bge_phys_update(bgep) != DDI_SUCCESS) {
4512 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4513 status = IOC_INVAL;
4514 }
4515 #ifdef BGE_IPMI_ASF
4516 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
4517 #else
4518 if (bge_chip_sync(bgep) == DDI_FAILURE) {
4519 #endif
4520 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
4521 status = IOC_INVAL;
4522 }
4523 if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4524 bge_chip_msi_trig(bgep);
4525 return (status);
4526 }
4527