1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include "rge.h"
27
28 /*
29 * This is the string displayed by modinfo, etc.
30 * Make sure you keep the version ID up to date!
31 */
32 static char rge_ident[] = "Realtek 1Gb Ethernet";
33
34 /*
35 * Used for buffers allocated by ddi_dma_mem_alloc()
36 */
37 static ddi_dma_attr_t dma_attr_buf = {
38 DMA_ATTR_V0, /* dma_attr version */
39 (uint32_t)0, /* dma_attr_addr_lo */
40 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
41 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */
42 (uint32_t)16, /* dma_attr_align */
43 0xFFFFFFFF, /* dma_attr_burstsizes */
44 1, /* dma_attr_minxfer */
45 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
46 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
47 1, /* dma_attr_sgllen */
48 1, /* dma_attr_granular */
49 0, /* dma_attr_flags */
50 };
51
52 /*
53 * Used for BDs allocated by ddi_dma_mem_alloc()
54 */
55 static ddi_dma_attr_t dma_attr_desc = {
56 DMA_ATTR_V0, /* dma_attr version */
57 (uint32_t)0, /* dma_attr_addr_lo */
58 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
59 (uint32_t)0xFFFFFFFF, /* dma_attr_count_max */
60 (uint32_t)256, /* dma_attr_align */
61 0xFFFFFFFF, /* dma_attr_burstsizes */
62 1, /* dma_attr_minxfer */
63 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
64 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
65 1, /* dma_attr_sgllen */
66 1, /* dma_attr_granular */
67 0, /* dma_attr_flags */
68 };
69
70 /*
71 * PIO access attributes for registers
72 */
73 static ddi_device_acc_attr_t rge_reg_accattr = {
74 DDI_DEVICE_ATTR_V0,
75 DDI_STRUCTURE_LE_ACC,
76 DDI_STRICTORDER_ACC,
77 DDI_DEFAULT_ACC
78 };
79
80 /*
81 * DMA access attributes for descriptors
82 */
83 static ddi_device_acc_attr_t rge_desc_accattr = {
84 DDI_DEVICE_ATTR_V0,
85 DDI_NEVERSWAP_ACC,
86 DDI_STRICTORDER_ACC,
87 DDI_DEFAULT_ACC
88 };
89
90 /*
91 * DMA access attributes for data
92 */
93 static ddi_device_acc_attr_t rge_buf_accattr = {
94 DDI_DEVICE_ATTR_V0,
95 DDI_NEVERSWAP_ACC,
96 DDI_STRICTORDER_ACC,
97 DDI_DEFAULT_ACC
98 };
99
100 /*
101 * Property names
102 */
103 static char debug_propname[] = "rge_debug_flags";
104 static char mtu_propname[] = "default_mtu";
105 static char msi_propname[] = "msi_enable";
106
107 static int rge_m_start(void *);
108 static void rge_m_stop(void *);
109 static int rge_m_promisc(void *, boolean_t);
110 static int rge_m_multicst(void *, boolean_t, const uint8_t *);
111 static int rge_m_unicst(void *, const uint8_t *);
112 static void rge_m_ioctl(void *, queue_t *, mblk_t *);
113 static boolean_t rge_m_getcapab(void *, mac_capab_t, void *);
114
115 #define RGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
116
117 static mac_callbacks_t rge_m_callbacks = {
118 RGE_M_CALLBACK_FLAGS,
119 rge_m_stat,
120 rge_m_start,
121 rge_m_stop,
122 rge_m_promisc,
123 rge_m_multicst,
124 rge_m_unicst,
125 rge_m_tx,
126 NULL,
127 rge_m_ioctl,
128 rge_m_getcapab
129 };
130
131 /*
132 * Allocate an area of memory and a DMA handle for accessing it
133 */
134 static int
rge_alloc_dma_mem(rge_t * rgep,size_t memsize,ddi_dma_attr_t * dma_attr_p,ddi_device_acc_attr_t * acc_attr_p,uint_t dma_flags,dma_area_t * dma_p)135 rge_alloc_dma_mem(rge_t *rgep, size_t memsize, ddi_dma_attr_t *dma_attr_p,
136 ddi_device_acc_attr_t *acc_attr_p, uint_t dma_flags, dma_area_t *dma_p)
137 {
138 caddr_t vaddr;
139 int err;
140
141 /*
142 * Allocate handle
143 */
144 err = ddi_dma_alloc_handle(rgep->devinfo, dma_attr_p,
145 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
146 if (err != DDI_SUCCESS) {
147 dma_p->dma_hdl = NULL;
148 return (DDI_FAILURE);
149 }
150
151 /*
152 * Allocate memory
153 */
154 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
155 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
156 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
157 if (err != DDI_SUCCESS) {
158 ddi_dma_free_handle(&dma_p->dma_hdl);
159 dma_p->dma_hdl = NULL;
160 dma_p->acc_hdl = NULL;
161 return (DDI_FAILURE);
162 }
163
164 /*
165 * Bind the two together
166 */
167 dma_p->mem_va = vaddr;
168 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
169 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
170 &dma_p->cookie, &dma_p->ncookies);
171 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) {
172 ddi_dma_mem_free(&dma_p->acc_hdl);
173 ddi_dma_free_handle(&dma_p->dma_hdl);
174 dma_p->acc_hdl = NULL;
175 dma_p->dma_hdl = NULL;
176 return (DDI_FAILURE);
177 }
178
179 dma_p->nslots = ~0U;
180 dma_p->size = ~0U;
181 dma_p->token = ~0U;
182 dma_p->offset = 0;
183 return (DDI_SUCCESS);
184 }
185
186 /*
187 * Free one allocated area of DMAable memory
188 */
189 static void
rge_free_dma_mem(dma_area_t * dma_p)190 rge_free_dma_mem(dma_area_t *dma_p)
191 {
192 if (dma_p->dma_hdl != NULL) {
193 if (dma_p->ncookies) {
194 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
195 dma_p->ncookies = 0;
196 }
197 ddi_dma_free_handle(&dma_p->dma_hdl);
198 dma_p->dma_hdl = NULL;
199 }
200
201 if (dma_p->acc_hdl != NULL) {
202 ddi_dma_mem_free(&dma_p->acc_hdl);
203 dma_p->acc_hdl = NULL;
204 }
205 }
206
207 /*
208 * Utility routine to carve a slice off a chunk of allocated memory,
209 * updating the chunk descriptor accordingly. The size of the slice
210 * is given by the product of the <qty> and <size> parameters.
211 */
212 static void
rge_slice_chunk(dma_area_t * slice,dma_area_t * chunk,uint32_t qty,uint32_t size)213 rge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
214 uint32_t qty, uint32_t size)
215 {
216 static uint32_t sequence = 0xbcd5704a;
217 size_t totsize;
218
219 totsize = qty*size;
220 ASSERT(totsize <= chunk->alength);
221
222 *slice = *chunk;
223 slice->nslots = qty;
224 slice->size = size;
225 slice->alength = totsize;
226 slice->token = ++sequence;
227
228 chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
229 chunk->alength -= totsize;
230 chunk->offset += totsize;
231 chunk->cookie.dmac_laddress += totsize;
232 chunk->cookie.dmac_size -= totsize;
233 }
234
235 static int
rge_alloc_bufs(rge_t * rgep)236 rge_alloc_bufs(rge_t *rgep)
237 {
238 size_t txdescsize;
239 size_t rxdescsize;
240 int err;
241
242 /*
243 * Allocate memory & handle for packet statistics
244 */
245 err = rge_alloc_dma_mem(rgep,
246 RGE_STATS_DUMP_SIZE,
247 &dma_attr_desc,
248 &rge_desc_accattr,
249 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
250 &rgep->dma_area_stats);
251 if (err != DDI_SUCCESS)
252 return (DDI_FAILURE);
253 rgep->hw_stats = DMA_VPTR(rgep->dma_area_stats);
254
255 /*
256 * Allocate memory & handle for Tx descriptor ring
257 */
258 txdescsize = RGE_SEND_SLOTS * sizeof (rge_bd_t);
259 err = rge_alloc_dma_mem(rgep,
260 txdescsize,
261 &dma_attr_desc,
262 &rge_desc_accattr,
263 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
264 &rgep->dma_area_txdesc);
265 if (err != DDI_SUCCESS)
266 return (DDI_FAILURE);
267
268 /*
269 * Allocate memory & handle for Rx descriptor ring
270 */
271 rxdescsize = RGE_RECV_SLOTS * sizeof (rge_bd_t);
272 err = rge_alloc_dma_mem(rgep,
273 rxdescsize,
274 &dma_attr_desc,
275 &rge_desc_accattr,
276 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
277 &rgep->dma_area_rxdesc);
278 if (err != DDI_SUCCESS)
279 return (DDI_FAILURE);
280
281 return (DDI_SUCCESS);
282 }
283
284 /*
285 * rge_free_bufs() -- free descriptors/buffers allocated for this
286 * device instance.
287 */
288 static void
rge_free_bufs(rge_t * rgep)289 rge_free_bufs(rge_t *rgep)
290 {
291 rge_free_dma_mem(&rgep->dma_area_stats);
292 rge_free_dma_mem(&rgep->dma_area_txdesc);
293 rge_free_dma_mem(&rgep->dma_area_rxdesc);
294 }
295
296 /*
297 * ========== Transmit and receive ring reinitialisation ==========
298 */
299
300 /*
301 * These <reinit> routines each reset the rx/tx rings to an initial
302 * state, assuming that the corresponding <init> routine has already
303 * been called exactly once.
304 */
305 static void
rge_reinit_send_ring(rge_t * rgep)306 rge_reinit_send_ring(rge_t *rgep)
307 {
308 sw_sbd_t *ssbdp;
309 rge_bd_t *bdp;
310 uint32_t slot;
311
312 /*
313 * re-init send ring
314 */
315 DMA_ZERO(rgep->tx_desc);
316 ssbdp = rgep->sw_sbds;
317 bdp = rgep->tx_ring;
318 for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
319 bdp->host_buf_addr =
320 RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress);
321 bdp->host_buf_addr_hi =
322 RGE_BSWAP_32(ssbdp->pbuf.cookie.dmac_laddress >> 32);
323 /* last BD in Tx ring */
324 if (slot == (RGE_SEND_SLOTS - 1))
325 bdp->flags_len = RGE_BSWAP_32(BD_FLAG_EOR);
326 ssbdp++;
327 bdp++;
328 }
329 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
330 rgep->tx_next = 0;
331 rgep->tc_next = 0;
332 rgep->tc_tail = 0;
333 rgep->tx_flow = 0;
334 rgep->tx_free = RGE_SEND_SLOTS;
335 }
336
337 static void
rge_reinit_recv_ring(rge_t * rgep)338 rge_reinit_recv_ring(rge_t *rgep)
339 {
340 rge_bd_t *bdp;
341 sw_rbd_t *srbdp;
342 dma_area_t *pbuf;
343 uint32_t slot;
344
345 /*
346 * re-init receive ring
347 */
348 DMA_ZERO(rgep->rx_desc);
349 srbdp = rgep->sw_rbds;
350 bdp = rgep->rx_ring;
351 for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
352 pbuf = &srbdp->rx_buf->pbuf;
353 bdp->host_buf_addr =
354 RGE_BSWAP_32(pbuf->cookie.dmac_laddress + rgep->head_room);
355 bdp->host_buf_addr_hi =
356 RGE_BSWAP_32(pbuf->cookie.dmac_laddress >> 32);
357 bdp->flags_len = RGE_BSWAP_32(BD_FLAG_HW_OWN |
358 (rgep->rxbuf_size - rgep->head_room));
359 /* last BD in Tx ring */
360 if (slot == (RGE_RECV_SLOTS - 1))
361 bdp->flags_len |= RGE_BSWAP_32(BD_FLAG_EOR);
362 srbdp++;
363 bdp++;
364 }
365 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
366 rgep->watchdog = 0;
367 rgep->rx_next = 0;
368 }
369
370 static void
rge_reinit_buf_ring(rge_t * rgep)371 rge_reinit_buf_ring(rge_t *rgep)
372 {
373
374 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
375 return;
376
377 /*
378 * If all the up-sending buffers haven't been returned to driver,
379 * use bcopy() only in rx process.
380 */
381 if (rgep->rx_free != RGE_BUF_SLOTS)
382 rgep->rx_bcopy = B_TRUE;
383 }
384
385 static void
rge_reinit_rings(rge_t * rgep)386 rge_reinit_rings(rge_t *rgep)
387 {
388 rge_reinit_send_ring(rgep);
389 rge_reinit_recv_ring(rgep);
390 rge_reinit_buf_ring(rgep);
391 }
392
393 static void
rge_fini_send_ring(rge_t * rgep)394 rge_fini_send_ring(rge_t *rgep)
395 {
396 sw_sbd_t *ssbdp;
397 uint32_t slot;
398
399 ssbdp = rgep->sw_sbds;
400 for (slot = 0; slot < RGE_SEND_SLOTS; ++slot) {
401 rge_free_dma_mem(&ssbdp->pbuf);
402 ssbdp++;
403 }
404
405 kmem_free(rgep->sw_sbds, RGE_SEND_SLOTS * sizeof (sw_sbd_t));
406 rgep->sw_sbds = NULL;
407 }
408
409 static void
rge_fini_recv_ring(rge_t * rgep)410 rge_fini_recv_ring(rge_t *rgep)
411 {
412 sw_rbd_t *srbdp;
413 uint32_t slot;
414
415 srbdp = rgep->sw_rbds;
416 for (slot = 0; slot < RGE_RECV_SLOTS; ++srbdp, ++slot) {
417 if (srbdp->rx_buf) {
418 if (srbdp->rx_buf->mp != NULL) {
419 freemsg(srbdp->rx_buf->mp);
420 srbdp->rx_buf->mp = NULL;
421 }
422 rge_free_dma_mem(&srbdp->rx_buf->pbuf);
423 kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
424 srbdp->rx_buf = NULL;
425 }
426 }
427
428 kmem_free(rgep->sw_rbds, RGE_RECV_SLOTS * sizeof (sw_rbd_t));
429 rgep->sw_rbds = NULL;
430 }
431
432 static void
rge_fini_buf_ring(rge_t * rgep)433 rge_fini_buf_ring(rge_t *rgep)
434 {
435 sw_rbd_t *srbdp;
436 uint32_t slot;
437
438 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
439 return;
440
441 ASSERT(rgep->rx_free == RGE_BUF_SLOTS);
442
443 srbdp = rgep->free_srbds;
444 for (slot = 0; slot < RGE_BUF_SLOTS; ++srbdp, ++slot) {
445 if (srbdp->rx_buf != NULL) {
446 if (srbdp->rx_buf->mp != NULL) {
447 freemsg(srbdp->rx_buf->mp);
448 srbdp->rx_buf->mp = NULL;
449 }
450 rge_free_dma_mem(&srbdp->rx_buf->pbuf);
451 kmem_free(srbdp->rx_buf, sizeof (dma_buf_t));
452 srbdp->rx_buf = NULL;
453 }
454 }
455
456 kmem_free(rgep->free_srbds, RGE_BUF_SLOTS * sizeof (sw_rbd_t));
457 rgep->free_srbds = NULL;
458 }
459
460 static void
rge_fini_rings(rge_t * rgep)461 rge_fini_rings(rge_t *rgep)
462 {
463 rge_fini_send_ring(rgep);
464 rge_fini_recv_ring(rgep);
465 rge_fini_buf_ring(rgep);
466 }
467
468 static int
rge_init_send_ring(rge_t * rgep)469 rge_init_send_ring(rge_t *rgep)
470 {
471 uint32_t slot;
472 sw_sbd_t *ssbdp;
473 dma_area_t *pbuf;
474 dma_area_t desc;
475 int err;
476
477 /*
478 * Allocate the array of s/w Tx Buffer Descriptors
479 */
480 ssbdp = kmem_zalloc(RGE_SEND_SLOTS*sizeof (*ssbdp), KM_SLEEP);
481 rgep->sw_sbds = ssbdp;
482
483 /*
484 * Init send ring
485 */
486 rgep->tx_desc = rgep->dma_area_txdesc;
487 DMA_ZERO(rgep->tx_desc);
488 rgep->tx_ring = rgep->tx_desc.mem_va;
489
490 desc = rgep->tx_desc;
491 for (slot = 0; slot < RGE_SEND_SLOTS; slot++) {
492 rge_slice_chunk(&ssbdp->desc, &desc, 1, sizeof (rge_bd_t));
493
494 /*
495 * Allocate memory & handle for Tx buffers
496 */
497 pbuf = &ssbdp->pbuf;
498 err = rge_alloc_dma_mem(rgep, rgep->txbuf_size,
499 &dma_attr_buf, &rge_buf_accattr,
500 DDI_DMA_WRITE | DDI_DMA_STREAMING, pbuf);
501 if (err != DDI_SUCCESS) {
502 rge_error(rgep,
503 "rge_init_send_ring: alloc tx buffer failed");
504 rge_fini_send_ring(rgep);
505 return (DDI_FAILURE);
506 }
507 ssbdp++;
508 }
509 ASSERT(desc.alength == 0);
510
511 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
512 return (DDI_SUCCESS);
513 }
514
515 static int
rge_init_recv_ring(rge_t * rgep)516 rge_init_recv_ring(rge_t *rgep)
517 {
518 uint32_t slot;
519 sw_rbd_t *srbdp;
520 dma_buf_t *rx_buf;
521 dma_area_t *pbuf;
522 int err;
523
524 /*
525 * Allocate the array of s/w Rx Buffer Descriptors
526 */
527 srbdp = kmem_zalloc(RGE_RECV_SLOTS*sizeof (*srbdp), KM_SLEEP);
528 rgep->sw_rbds = srbdp;
529
530 /*
531 * Init receive ring
532 */
533 rgep->rx_next = 0;
534 rgep->rx_desc = rgep->dma_area_rxdesc;
535 DMA_ZERO(rgep->rx_desc);
536 rgep->rx_ring = rgep->rx_desc.mem_va;
537
538 for (slot = 0; slot < RGE_RECV_SLOTS; slot++) {
539 srbdp->rx_buf = rx_buf =
540 kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
541
542 /*
543 * Allocate memory & handle for Rx buffers
544 */
545 pbuf = &rx_buf->pbuf;
546 err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
547 &dma_attr_buf, &rge_buf_accattr,
548 DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
549 if (err != DDI_SUCCESS) {
550 rge_fini_recv_ring(rgep);
551 rge_error(rgep,
552 "rge_init_recv_ring: alloc rx buffer failed");
553 return (DDI_FAILURE);
554 }
555
556 pbuf->alength -= rgep->head_room;
557 pbuf->offset += rgep->head_room;
558 if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)) {
559 rx_buf->rx_recycle.free_func = rge_rx_recycle;
560 rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
561 rx_buf->private = (caddr_t)rgep;
562 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
563 rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
564 if (rx_buf->mp == NULL) {
565 rge_fini_recv_ring(rgep);
566 rge_problem(rgep,
567 "rge_init_recv_ring: desballoc() failed");
568 return (DDI_FAILURE);
569 }
570 }
571 srbdp++;
572 }
573 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORDEV);
574 return (DDI_SUCCESS);
575 }
576
577 static int
rge_init_buf_ring(rge_t * rgep)578 rge_init_buf_ring(rge_t *rgep)
579 {
580 uint32_t slot;
581 sw_rbd_t *free_srbdp;
582 dma_buf_t *rx_buf;
583 dma_area_t *pbuf;
584 int err;
585
586 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) {
587 rgep->rx_bcopy = B_TRUE;
588 return (DDI_SUCCESS);
589 }
590
591 /*
592 * Allocate the array of s/w free Buffer Descriptors
593 */
594 free_srbdp = kmem_zalloc(RGE_BUF_SLOTS*sizeof (*free_srbdp), KM_SLEEP);
595 rgep->free_srbds = free_srbdp;
596
597 /*
598 * Init free buffer ring
599 */
600 rgep->rc_next = 0;
601 rgep->rf_next = 0;
602 rgep->rx_bcopy = B_FALSE;
603 rgep->rx_free = RGE_BUF_SLOTS;
604 for (slot = 0; slot < RGE_BUF_SLOTS; slot++) {
605 free_srbdp->rx_buf = rx_buf =
606 kmem_zalloc(sizeof (dma_buf_t), KM_SLEEP);
607
608 /*
609 * Allocate memory & handle for free Rx buffers
610 */
611 pbuf = &rx_buf->pbuf;
612 err = rge_alloc_dma_mem(rgep, rgep->rxbuf_size,
613 &dma_attr_buf, &rge_buf_accattr,
614 DDI_DMA_READ | DDI_DMA_STREAMING, pbuf);
615 if (err != DDI_SUCCESS) {
616 rge_fini_buf_ring(rgep);
617 rge_error(rgep,
618 "rge_init_buf_ring: alloc rx free buffer failed");
619 return (DDI_FAILURE);
620 }
621 pbuf->alength -= rgep->head_room;
622 pbuf->offset += rgep->head_room;
623 rx_buf->rx_recycle.free_func = rge_rx_recycle;
624 rx_buf->rx_recycle.free_arg = (caddr_t)rx_buf;
625 rx_buf->private = (caddr_t)rgep;
626 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
627 rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
628 if (rx_buf->mp == NULL) {
629 rge_fini_buf_ring(rgep);
630 rge_problem(rgep,
631 "rge_init_buf_ring: desballoc() failed");
632 return (DDI_FAILURE);
633 }
634 free_srbdp++;
635 }
636 return (DDI_SUCCESS);
637 }
638
639 static int
rge_init_rings(rge_t * rgep)640 rge_init_rings(rge_t *rgep)
641 {
642 int err;
643
644 err = rge_init_send_ring(rgep);
645 if (err != DDI_SUCCESS)
646 return (DDI_FAILURE);
647
648 err = rge_init_recv_ring(rgep);
649 if (err != DDI_SUCCESS) {
650 rge_fini_send_ring(rgep);
651 return (DDI_FAILURE);
652 }
653
654 err = rge_init_buf_ring(rgep);
655 if (err != DDI_SUCCESS) {
656 rge_fini_send_ring(rgep);
657 rge_fini_recv_ring(rgep);
658 return (DDI_FAILURE);
659 }
660
661 return (DDI_SUCCESS);
662 }
663
664 /*
665 * ========== Internal state management entry points ==========
666 */
667
668 #undef RGE_DBG
669 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */
670
671 /*
672 * These routines provide all the functionality required by the
673 * corresponding MAC layer entry points, but don't update the
674 * MAC state so they can be called internally without disturbing
675 * our record of what NEMO thinks we should be doing ...
676 */
677
678 /*
679 * rge_reset() -- reset h/w & rings to initial state
680 */
681 static void
rge_reset(rge_t * rgep)682 rge_reset(rge_t *rgep)
683 {
684 ASSERT(mutex_owned(rgep->genlock));
685
686 /*
687 * Grab all the other mutexes in the world (this should
688 * ensure no other threads are manipulating driver state)
689 */
690 mutex_enter(rgep->rx_lock);
691 mutex_enter(rgep->rc_lock);
692 rw_enter(rgep->errlock, RW_WRITER);
693
694 (void) rge_chip_reset(rgep);
695 rge_reinit_rings(rgep);
696 rge_chip_init(rgep);
697
698 /*
699 * Free the world ...
700 */
701 rw_exit(rgep->errlock);
702 mutex_exit(rgep->rc_lock);
703 mutex_exit(rgep->rx_lock);
704
705 rgep->stats.rpackets = 0;
706 rgep->stats.rbytes = 0;
707 rgep->stats.opackets = 0;
708 rgep->stats.obytes = 0;
709 rgep->stats.tx_pre_ismax = B_FALSE;
710 rgep->stats.tx_cur_ismax = B_FALSE;
711
712 RGE_DEBUG(("rge_reset($%p) done", (void *)rgep));
713 }
714
715 /*
716 * rge_stop() -- stop processing, don't reset h/w or rings
717 */
718 static void
rge_stop(rge_t * rgep)719 rge_stop(rge_t *rgep)
720 {
721 ASSERT(mutex_owned(rgep->genlock));
722
723 rge_chip_stop(rgep, B_FALSE);
724
725 RGE_DEBUG(("rge_stop($%p) done", (void *)rgep));
726 }
727
728 /*
729 * rge_start() -- start transmitting/receiving
730 */
731 static void
rge_start(rge_t * rgep)732 rge_start(rge_t *rgep)
733 {
734 ASSERT(mutex_owned(rgep->genlock));
735
736 /*
737 * Start chip processing, including enabling interrupts
738 */
739 rge_chip_start(rgep);
740 rgep->watchdog = 0;
741 }
742
743 /*
744 * rge_restart - restart transmitting/receiving after error or suspend
745 */
746 void
rge_restart(rge_t * rgep)747 rge_restart(rge_t *rgep)
748 {
749 uint32_t i;
750
751 ASSERT(mutex_owned(rgep->genlock));
752 /*
753 * Wait for posted buffer to be freed...
754 */
755 if (!rgep->rx_bcopy) {
756 for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
757 if (rgep->rx_free == RGE_BUF_SLOTS)
758 break;
759 drv_usecwait(1000);
760 RGE_DEBUG(("rge_restart: waiting for rx buf free..."));
761 }
762 }
763 rge_reset(rgep);
764 rgep->stats.chip_reset++;
765 if (rgep->rge_mac_state == RGE_MAC_STARTED) {
766 rge_start(rgep);
767 rgep->resched_needed = B_TRUE;
768 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
769 }
770 }
771
772
773 /*
774 * ========== Nemo-required management entry points ==========
775 */
776
777 #undef RGE_DBG
778 #define RGE_DBG RGE_DBG_NEMO /* debug flag for this code */
779
780 /*
781 * rge_m_stop() -- stop transmitting/receiving
782 */
783 static void
rge_m_stop(void * arg)784 rge_m_stop(void *arg)
785 {
786 rge_t *rgep = arg; /* private device info */
787 uint32_t i;
788
789 /*
790 * Just stop processing, then record new MAC state
791 */
792 mutex_enter(rgep->genlock);
793 if (rgep->suspended) {
794 ASSERT(rgep->rge_mac_state == RGE_MAC_STOPPED);
795 mutex_exit(rgep->genlock);
796 return;
797 }
798 rge_stop(rgep);
799 /*
800 * Wait for posted buffer to be freed...
801 */
802 if (!rgep->rx_bcopy) {
803 for (i = 0; i < RXBUFF_FREE_LOOP; i++) {
804 if (rgep->rx_free == RGE_BUF_SLOTS)
805 break;
806 drv_usecwait(1000);
807 RGE_DEBUG(("rge_m_stop: waiting for rx buf free..."));
808 }
809 }
810 rgep->rge_mac_state = RGE_MAC_STOPPED;
811 RGE_DEBUG(("rge_m_stop($%p) done", arg));
812 mutex_exit(rgep->genlock);
813 }
814
815 /*
816 * rge_m_start() -- start transmitting/receiving
817 */
818 static int
rge_m_start(void * arg)819 rge_m_start(void *arg)
820 {
821 rge_t *rgep = arg; /* private device info */
822
823 mutex_enter(rgep->genlock);
824 if (rgep->suspended) {
825 mutex_exit(rgep->genlock);
826 return (DDI_FAILURE);
827 }
828 /*
829 * Clear hw/sw statistics
830 */
831 DMA_ZERO(rgep->dma_area_stats);
832 bzero(&rgep->stats, sizeof (rge_stats_t));
833
834 /*
835 * Start processing and record new MAC state
836 */
837 rge_reset(rgep);
838 rge_start(rgep);
839 rgep->rge_mac_state = RGE_MAC_STARTED;
840 RGE_DEBUG(("rge_m_start($%p) done", arg));
841
842 mutex_exit(rgep->genlock);
843
844 return (0);
845 }
846
847 /*
848 * rge_m_unicst_set() -- set the physical network address
849 */
850 static int
rge_m_unicst(void * arg,const uint8_t * macaddr)851 rge_m_unicst(void *arg, const uint8_t *macaddr)
852 {
853 rge_t *rgep = arg; /* private device info */
854
855 /*
856 * Remember the new current address in the driver state
857 * Sync the chip's idea of the address too ...
858 */
859 mutex_enter(rgep->genlock);
860 bcopy(macaddr, rgep->netaddr, ETHERADDRL);
861
862 if (rgep->suspended) {
863 mutex_exit(rgep->genlock);
864 return (DDI_SUCCESS);
865 }
866
867 rge_chip_sync(rgep, RGE_SET_MAC);
868 mutex_exit(rgep->genlock);
869
870 return (0);
871 }
872
873 /*
874 * Compute the index of the required bit in the multicast hash map.
875 * This must mirror the way the hardware actually does it!
876 */
877 static uint32_t
rge_hash_index(const uint8_t * mca)878 rge_hash_index(const uint8_t *mca)
879 {
880 uint32_t crc = (uint32_t)RGE_HASH_CRC;
881 uint32_t const POLY = RGE_HASH_POLY;
882 uint32_t msb;
883 int bytes;
884 uchar_t currentbyte;
885 uint32_t index;
886 int bit;
887
888 for (bytes = 0; bytes < ETHERADDRL; bytes++) {
889 currentbyte = mca[bytes];
890 for (bit = 0; bit < 8; bit++) {
891 msb = crc >> 31;
892 crc <<= 1;
893 if (msb ^ (currentbyte & 1))
894 crc ^= POLY;
895 currentbyte >>= 1;
896 }
897 }
898 index = crc >> 26;
899 /* the index value is between 0 and 63(0x3f) */
900
901 return (index);
902 }
903
904 /*
905 * rge_m_multicst_add() -- enable/disable a multicast address
906 */
907 static int
rge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)908 rge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
909 {
910 rge_t *rgep = arg; /* private device info */
911 struct ether_addr *addr;
912 uint32_t index;
913 uint32_t reg;
914 uint8_t *hashp;
915
916 mutex_enter(rgep->genlock);
917 hashp = rgep->mcast_hash;
918 addr = (struct ether_addr *)mca;
919 /*
920 * Calculate the Multicast address hash index value
921 * Normally, the position of MAR0-MAR7 is
922 * MAR0: offset 0x08, ..., MAR7: offset 0x0F.
923 *
924 * For pcie chipset, the position of MAR0-MAR7 is
925 * different from others:
926 * MAR0: offset 0x0F, ..., MAR7: offset 0x08.
927 */
928 index = rge_hash_index(addr->ether_addr_octet);
929 if (rgep->chipid.is_pcie)
930 reg = (~(index / RGE_MCAST_NUM)) & 0x7;
931 else
932 reg = index / RGE_MCAST_NUM;
933
934 if (add) {
935 if (rgep->mcast_refs[index]++) {
936 mutex_exit(rgep->genlock);
937 return (0);
938 }
939 hashp[reg] |= 1 << (index % RGE_MCAST_NUM);
940 } else {
941 if (--rgep->mcast_refs[index]) {
942 mutex_exit(rgep->genlock);
943 return (0);
944 }
945 hashp[reg] &= ~ (1 << (index % RGE_MCAST_NUM));
946 }
947
948 if (rgep->suspended) {
949 mutex_exit(rgep->genlock);
950 return (DDI_SUCCESS);
951 }
952
953 /*
954 * Set multicast register
955 */
956 rge_chip_sync(rgep, RGE_SET_MUL);
957
958 mutex_exit(rgep->genlock);
959 return (0);
960 }
961
962 /*
963 * rge_m_promisc() -- set or reset promiscuous mode on the board
964 *
965 * Program the hardware to enable/disable promiscuous and/or
966 * receive-all-multicast modes.
967 */
968 static int
rge_m_promisc(void * arg,boolean_t on)969 rge_m_promisc(void *arg, boolean_t on)
970 {
971 rge_t *rgep = arg;
972
973 /*
974 * Store MAC layer specified mode and pass to chip layer to update h/w
975 */
976 mutex_enter(rgep->genlock);
977
978 if (rgep->promisc == on) {
979 mutex_exit(rgep->genlock);
980 return (0);
981 }
982 rgep->promisc = on;
983
984 if (rgep->suspended) {
985 mutex_exit(rgep->genlock);
986 return (DDI_SUCCESS);
987 }
988
989 rge_chip_sync(rgep, RGE_SET_PROMISC);
990 RGE_DEBUG(("rge_m_promisc_set($%p) done", arg));
991 mutex_exit(rgep->genlock);
992 return (0);
993 }
994
995 /*
996 * Loopback ioctl code
997 */
998
999 static lb_property_t loopmodes[] = {
1000 { normal, "normal", RGE_LOOP_NONE },
1001 { internal, "PHY", RGE_LOOP_INTERNAL_PHY },
1002 { internal, "MAC", RGE_LOOP_INTERNAL_MAC }
1003 };
1004
1005 static enum ioc_reply
rge_set_loop_mode(rge_t * rgep,uint32_t mode)1006 rge_set_loop_mode(rge_t *rgep, uint32_t mode)
1007 {
1008 /*
1009 * If the mode isn't being changed, there's nothing to do ...
1010 */
1011 if (mode == rgep->param_loop_mode)
1012 return (IOC_ACK);
1013
1014 /*
1015 * Validate the requested mode and prepare a suitable message
1016 * to explain the link down/up cycle that the change will
1017 * probably induce ...
1018 */
1019 switch (mode) {
1020 default:
1021 return (IOC_INVAL);
1022
1023 case RGE_LOOP_NONE:
1024 case RGE_LOOP_INTERNAL_PHY:
1025 case RGE_LOOP_INTERNAL_MAC:
1026 break;
1027 }
1028
1029 /*
1030 * All OK; tell the caller to reprogram
1031 * the PHY and/or MAC for the new mode ...
1032 */
1033 rgep->param_loop_mode = mode;
1034 return (IOC_RESTART_ACK);
1035 }
1036
1037 static enum ioc_reply
rge_loop_ioctl(rge_t * rgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)1038 rge_loop_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1039 {
1040 lb_info_sz_t *lbsp;
1041 lb_property_t *lbpp;
1042 uint32_t *lbmp;
1043 int cmd;
1044
1045 _NOTE(ARGUNUSED(wq))
1046
1047 /*
1048 * Validate format of ioctl
1049 */
1050 if (mp->b_cont == NULL)
1051 return (IOC_INVAL);
1052
1053 cmd = iocp->ioc_cmd;
1054 switch (cmd) {
1055 default:
1056 /* NOTREACHED */
1057 rge_error(rgep, "rge_loop_ioctl: invalid cmd 0x%x", cmd);
1058 return (IOC_INVAL);
1059
1060 case LB_GET_INFO_SIZE:
1061 if (iocp->ioc_count != sizeof (lb_info_sz_t))
1062 return (IOC_INVAL);
1063 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
1064 *lbsp = sizeof (loopmodes);
1065 return (IOC_REPLY);
1066
1067 case LB_GET_INFO:
1068 if (iocp->ioc_count != sizeof (loopmodes))
1069 return (IOC_INVAL);
1070 lbpp = (lb_property_t *)mp->b_cont->b_rptr;
1071 bcopy(loopmodes, lbpp, sizeof (loopmodes));
1072 return (IOC_REPLY);
1073
1074 case LB_GET_MODE:
1075 if (iocp->ioc_count != sizeof (uint32_t))
1076 return (IOC_INVAL);
1077 lbmp = (uint32_t *)mp->b_cont->b_rptr;
1078 *lbmp = rgep->param_loop_mode;
1079 return (IOC_REPLY);
1080
1081 case LB_SET_MODE:
1082 if (iocp->ioc_count != sizeof (uint32_t))
1083 return (IOC_INVAL);
1084 lbmp = (uint32_t *)mp->b_cont->b_rptr;
1085 return (rge_set_loop_mode(rgep, *lbmp));
1086 }
1087 }
1088
1089 /*
1090 * Specific rge IOCTLs, the MAC layer handles the generic ones.
1091 */
1092 static void
rge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)1093 rge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1094 {
1095 rge_t *rgep = arg;
1096 struct iocblk *iocp;
1097 enum ioc_reply status;
1098 boolean_t need_privilege;
1099 int err;
1100 int cmd;
1101
1102 /*
1103 * If suspended, we might actually be able to do some of
1104 * these ioctls, but it is harder to make sure they occur
1105 * without actually putting the hardware in an undesireable
1106 * state. So just NAK it.
1107 */
1108 mutex_enter(rgep->genlock);
1109 if (rgep->suspended) {
1110 miocnak(wq, mp, 0, EINVAL);
1111 mutex_exit(rgep->genlock);
1112 return;
1113 }
1114 mutex_exit(rgep->genlock);
1115
1116 /*
1117 * Validate the command before bothering with the mutex ...
1118 */
1119 iocp = (struct iocblk *)mp->b_rptr;
1120 iocp->ioc_error = 0;
1121 need_privilege = B_TRUE;
1122 cmd = iocp->ioc_cmd;
1123 switch (cmd) {
1124 default:
1125 miocnak(wq, mp, 0, EINVAL);
1126 return;
1127
1128 case RGE_MII_READ:
1129 case RGE_MII_WRITE:
1130 case RGE_DIAG:
1131 case RGE_PEEK:
1132 case RGE_POKE:
1133 case RGE_PHY_RESET:
1134 case RGE_SOFT_RESET:
1135 case RGE_HARD_RESET:
1136 break;
1137
1138 case LB_GET_INFO_SIZE:
1139 case LB_GET_INFO:
1140 case LB_GET_MODE:
1141 need_privilege = B_FALSE;
1142 /* FALLTHRU */
1143 case LB_SET_MODE:
1144 break;
1145
1146 case ND_GET:
1147 need_privilege = B_FALSE;
1148 /* FALLTHRU */
1149 case ND_SET:
1150 break;
1151 }
1152
1153 if (need_privilege) {
1154 /*
1155 * Check for specific net_config privilege
1156 */
1157 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1158 if (err != 0) {
1159 miocnak(wq, mp, 0, err);
1160 return;
1161 }
1162 }
1163
1164 mutex_enter(rgep->genlock);
1165
1166 switch (cmd) {
1167 default:
1168 _NOTE(NOTREACHED)
1169 status = IOC_INVAL;
1170 break;
1171
1172 case RGE_MII_READ:
1173 case RGE_MII_WRITE:
1174 case RGE_DIAG:
1175 case RGE_PEEK:
1176 case RGE_POKE:
1177 case RGE_PHY_RESET:
1178 case RGE_SOFT_RESET:
1179 case RGE_HARD_RESET:
1180 status = rge_chip_ioctl(rgep, wq, mp, iocp);
1181 break;
1182
1183 case LB_GET_INFO_SIZE:
1184 case LB_GET_INFO:
1185 case LB_GET_MODE:
1186 case LB_SET_MODE:
1187 status = rge_loop_ioctl(rgep, wq, mp, iocp);
1188 break;
1189
1190 case ND_GET:
1191 case ND_SET:
1192 status = rge_nd_ioctl(rgep, wq, mp, iocp);
1193 break;
1194 }
1195
1196 /*
1197 * Do we need to reprogram the PHY and/or the MAC?
1198 * Do it now, while we still have the mutex.
1199 *
1200 * Note: update the PHY first, 'cos it controls the
1201 * speed/duplex parameters that the MAC code uses.
1202 */
1203 switch (status) {
1204 case IOC_RESTART_REPLY:
1205 case IOC_RESTART_ACK:
1206 rge_phy_update(rgep);
1207 break;
1208 }
1209
1210 mutex_exit(rgep->genlock);
1211
1212 /*
1213 * Finally, decide how to reply
1214 */
1215 switch (status) {
1216 default:
1217 case IOC_INVAL:
1218 /*
1219 * Error, reply with a NAK and EINVAL or the specified error
1220 */
1221 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1222 EINVAL : iocp->ioc_error);
1223 break;
1224
1225 case IOC_DONE:
1226 /*
1227 * OK, reply already sent
1228 */
1229 break;
1230
1231 case IOC_RESTART_ACK:
1232 case IOC_ACK:
1233 /*
1234 * OK, reply with an ACK
1235 */
1236 miocack(wq, mp, 0, 0);
1237 break;
1238
1239 case IOC_RESTART_REPLY:
1240 case IOC_REPLY:
1241 /*
1242 * OK, send prepared reply as ACK or NAK
1243 */
1244 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1245 M_IOCACK : M_IOCNAK;
1246 qreply(wq, mp);
1247 break;
1248 }
1249 }
1250
1251 /* ARGSUSED */
1252 static boolean_t
rge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)1253 rge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1254 {
1255 rge_t *rgep = arg;
1256
1257 switch (cap) {
1258 case MAC_CAPAB_HCKSUM: {
1259 uint32_t *hcksum_txflags = cap_data;
1260 switch (rgep->chipid.mac_ver) {
1261 case MAC_VER_8169:
1262 case MAC_VER_8169S_D:
1263 case MAC_VER_8169S_E:
1264 case MAC_VER_8169SB:
1265 case MAC_VER_8169SC:
1266 case MAC_VER_8168:
1267 case MAC_VER_8168B_B:
1268 case MAC_VER_8168B_C:
1269 case MAC_VER_8101E:
1270 *hcksum_txflags = HCKSUM_INET_FULL_V4 |
1271 HCKSUM_IPHDRCKSUM;
1272 break;
1273 case MAC_VER_8168C:
1274 case MAC_VER_8101E_B:
1275 case MAC_VER_8101E_C:
1276 default:
1277 *hcksum_txflags = 0;
1278 break;
1279 }
1280 break;
1281 }
1282 default:
1283 return (B_FALSE);
1284 }
1285 return (B_TRUE);
1286 }
1287
1288 /*
1289 * ============ Init MSI/Fixed Interrupt routines ==============
1290 */
1291
1292 /*
1293 * rge_add_intrs:
1294 *
1295 * Register FIXED or MSI interrupts.
1296 */
1297 static int
rge_add_intrs(rge_t * rgep,int intr_type)1298 rge_add_intrs(rge_t *rgep, int intr_type)
1299 {
1300 dev_info_t *dip = rgep->devinfo;
1301 int avail;
1302 int actual;
1303 int intr_size;
1304 int count;
1305 int i, j;
1306 int ret;
1307
1308 /* Get number of interrupts */
1309 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1310 if ((ret != DDI_SUCCESS) || (count == 0)) {
1311 rge_error(rgep, "ddi_intr_get_nintrs() failure, ret: %d, "
1312 "count: %d", ret, count);
1313 return (DDI_FAILURE);
1314 }
1315
1316 /* Get number of available interrupts */
1317 ret = ddi_intr_get_navail(dip, intr_type, &avail);
1318 if ((ret != DDI_SUCCESS) || (avail == 0)) {
1319 rge_error(rgep, "ddi_intr_get_navail() failure, "
1320 "ret: %d, avail: %d\n", ret, avail);
1321 return (DDI_FAILURE);
1322 }
1323
1324 /* Allocate an array of interrupt handles */
1325 intr_size = count * sizeof (ddi_intr_handle_t);
1326 rgep->htable = kmem_alloc(intr_size, KM_SLEEP);
1327 rgep->intr_rqst = count;
1328
1329 /* Call ddi_intr_alloc() */
1330 ret = ddi_intr_alloc(dip, rgep->htable, intr_type, 0,
1331 count, &actual, DDI_INTR_ALLOC_NORMAL);
1332 if (ret != DDI_SUCCESS || actual == 0) {
1333 rge_error(rgep, "ddi_intr_alloc() failed %d\n", ret);
1334 kmem_free(rgep->htable, intr_size);
1335 return (DDI_FAILURE);
1336 }
1337 if (actual < count) {
1338 rge_log(rgep, "ddi_intr_alloc() Requested: %d, Received: %d\n",
1339 count, actual);
1340 }
1341 rgep->intr_cnt = actual;
1342
1343 /*
1344 * Get priority for first msi, assume remaining are all the same
1345 */
1346 if ((ret = ddi_intr_get_pri(rgep->htable[0], &rgep->intr_pri)) !=
1347 DDI_SUCCESS) {
1348 rge_error(rgep, "ddi_intr_get_pri() failed %d\n", ret);
1349 /* Free already allocated intr */
1350 for (i = 0; i < actual; i++) {
1351 (void) ddi_intr_free(rgep->htable[i]);
1352 }
1353 kmem_free(rgep->htable, intr_size);
1354 return (DDI_FAILURE);
1355 }
1356
1357 /* Test for high level mutex */
1358 if (rgep->intr_pri >= ddi_intr_get_hilevel_pri()) {
1359 rge_error(rgep, "rge_add_intrs:"
1360 "Hi level interrupt not supported");
1361 for (i = 0; i < actual; i++)
1362 (void) ddi_intr_free(rgep->htable[i]);
1363 kmem_free(rgep->htable, intr_size);
1364 return (DDI_FAILURE);
1365 }
1366
1367 /* Call ddi_intr_add_handler() */
1368 for (i = 0; i < actual; i++) {
1369 if ((ret = ddi_intr_add_handler(rgep->htable[i], rge_intr,
1370 (caddr_t)rgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
1371 rge_error(rgep, "ddi_intr_add_handler() "
1372 "failed %d\n", ret);
1373 /* Remove already added intr */
1374 for (j = 0; j < i; j++)
1375 (void) ddi_intr_remove_handler(rgep->htable[j]);
1376 /* Free already allocated intr */
1377 for (i = 0; i < actual; i++) {
1378 (void) ddi_intr_free(rgep->htable[i]);
1379 }
1380 kmem_free(rgep->htable, intr_size);
1381 return (DDI_FAILURE);
1382 }
1383 }
1384
1385 if ((ret = ddi_intr_get_cap(rgep->htable[0], &rgep->intr_cap))
1386 != DDI_SUCCESS) {
1387 rge_error(rgep, "ddi_intr_get_cap() failed %d\n", ret);
1388 for (i = 0; i < actual; i++) {
1389 (void) ddi_intr_remove_handler(rgep->htable[i]);
1390 (void) ddi_intr_free(rgep->htable[i]);
1391 }
1392 kmem_free(rgep->htable, intr_size);
1393 return (DDI_FAILURE);
1394 }
1395
1396 return (DDI_SUCCESS);
1397 }
1398
1399 /*
1400 * rge_rem_intrs:
1401 *
1402 * Unregister FIXED or MSI interrupts
1403 */
1404 static void
rge_rem_intrs(rge_t * rgep)1405 rge_rem_intrs(rge_t *rgep)
1406 {
1407 int i;
1408
1409 /* Disable all interrupts */
1410 if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1411 /* Call ddi_intr_block_disable() */
1412 (void) ddi_intr_block_disable(rgep->htable, rgep->intr_cnt);
1413 } else {
1414 for (i = 0; i < rgep->intr_cnt; i++) {
1415 (void) ddi_intr_disable(rgep->htable[i]);
1416 }
1417 }
1418
1419 /* Call ddi_intr_remove_handler() */
1420 for (i = 0; i < rgep->intr_cnt; i++) {
1421 (void) ddi_intr_remove_handler(rgep->htable[i]);
1422 (void) ddi_intr_free(rgep->htable[i]);
1423 }
1424
1425 kmem_free(rgep->htable, rgep->intr_rqst * sizeof (ddi_intr_handle_t));
1426 }
1427
1428 /*
1429 * ========== Per-instance setup/teardown code ==========
1430 */
1431
1432 #undef RGE_DBG
1433 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */
1434
1435 static void
rge_unattach(rge_t * rgep)1436 rge_unattach(rge_t *rgep)
1437 {
1438 /*
1439 * Flag that no more activity may be initiated
1440 */
1441 rgep->progress &= ~PROGRESS_READY;
1442 rgep->rge_mac_state = RGE_MAC_UNATTACH;
1443
1444 /*
1445 * Quiesce the PHY and MAC (leave it reset but still powered).
1446 * Clean up and free all RGE data structures
1447 */
1448 if (rgep->periodic_id != NULL) {
1449 ddi_periodic_delete(rgep->periodic_id);
1450 rgep->periodic_id = NULL;
1451 }
1452
1453 if (rgep->progress & PROGRESS_KSTATS)
1454 rge_fini_kstats(rgep);
1455
1456 if (rgep->progress & PROGRESS_PHY)
1457 (void) rge_phy_reset(rgep);
1458
1459 if (rgep->progress & PROGRESS_INIT) {
1460 mutex_enter(rgep->genlock);
1461 (void) rge_chip_reset(rgep);
1462 mutex_exit(rgep->genlock);
1463 rge_fini_rings(rgep);
1464 }
1465
1466 if (rgep->progress & PROGRESS_INTR) {
1467 rge_rem_intrs(rgep);
1468 mutex_destroy(rgep->rc_lock);
1469 mutex_destroy(rgep->rx_lock);
1470 mutex_destroy(rgep->tc_lock);
1471 mutex_destroy(rgep->tx_lock);
1472 rw_destroy(rgep->errlock);
1473 mutex_destroy(rgep->genlock);
1474 }
1475
1476 if (rgep->progress & PROGRESS_FACTOTUM)
1477 (void) ddi_intr_remove_softint(rgep->factotum_hdl);
1478
1479 if (rgep->progress & PROGRESS_RESCHED)
1480 (void) ddi_intr_remove_softint(rgep->resched_hdl);
1481
1482 if (rgep->progress & PROGRESS_NDD)
1483 rge_nd_cleanup(rgep);
1484
1485 rge_free_bufs(rgep);
1486
1487 if (rgep->progress & PROGRESS_REGS)
1488 ddi_regs_map_free(&rgep->io_handle);
1489
1490 if (rgep->progress & PROGRESS_CFG)
1491 pci_config_teardown(&rgep->cfg_handle);
1492
1493 ddi_remove_minor_node(rgep->devinfo, NULL);
1494 kmem_free(rgep, sizeof (*rgep));
1495 }
1496
1497 static int
rge_resume(dev_info_t * devinfo)1498 rge_resume(dev_info_t *devinfo)
1499 {
1500 rge_t *rgep; /* Our private data */
1501 chip_id_t *cidp;
1502 chip_id_t chipid;
1503
1504 rgep = ddi_get_driver_private(devinfo);
1505
1506 /*
1507 * If there are state inconsistancies, this is bad. Returning
1508 * DDI_FAILURE here will eventually cause the machine to panic,
1509 * so it is best done here so that there is a possibility of
1510 * debugging the problem.
1511 */
1512 if (rgep == NULL)
1513 cmn_err(CE_PANIC,
1514 "rge: ngep returned from ddi_get_driver_private was NULL");
1515
1516 /*
1517 * Refuse to resume if the data structures aren't consistent
1518 */
1519 if (rgep->devinfo != devinfo)
1520 cmn_err(CE_PANIC,
1521 "rge: passed devinfo not the same as saved devinfo");
1522
1523 /*
1524 * Read chip ID & set up config space command register(s)
1525 * Refuse to resume if the chip has changed its identity!
1526 */
1527 cidp = &rgep->chipid;
1528 rge_chip_cfg_init(rgep, &chipid);
1529 if (chipid.vendor != cidp->vendor)
1530 return (DDI_FAILURE);
1531 if (chipid.device != cidp->device)
1532 return (DDI_FAILURE);
1533 if (chipid.revision != cidp->revision)
1534 return (DDI_FAILURE);
1535
1536 mutex_enter(rgep->genlock);
1537
1538 /*
1539 * Only in one case, this conditional branch can be executed: the port
1540 * hasn't been plumbed.
1541 */
1542 if (rgep->suspended == B_FALSE) {
1543 mutex_exit(rgep->genlock);
1544 return (DDI_SUCCESS);
1545 }
1546 rgep->rge_mac_state = RGE_MAC_STARTED;
1547 /*
1548 * All OK, reinitialise h/w & kick off NEMO scheduling
1549 */
1550 rge_restart(rgep);
1551 rgep->suspended = B_FALSE;
1552
1553 mutex_exit(rgep->genlock);
1554
1555 return (DDI_SUCCESS);
1556 }
1557
1558
1559 /*
1560 * attach(9E) -- Attach a device to the system
1561 *
1562 * Called once for each board successfully probed.
1563 */
1564 static int
rge_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)1565 rge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1566 {
1567 rge_t *rgep; /* Our private data */
1568 mac_register_t *macp;
1569 chip_id_t *cidp;
1570 int intr_types;
1571 caddr_t regs;
1572 int instance;
1573 int i;
1574 int err;
1575
1576 /*
1577 * we don't support high level interrupts in the driver
1578 */
1579 if (ddi_intr_hilevel(devinfo, 0) != 0) {
1580 cmn_err(CE_WARN,
1581 "rge_attach -- unsupported high level interrupt");
1582 return (DDI_FAILURE);
1583 }
1584
1585 instance = ddi_get_instance(devinfo);
1586 RGE_GTRACE(("rge_attach($%p, %d) instance %d",
1587 (void *)devinfo, cmd, instance));
1588 RGE_BRKPT(NULL, "rge_attach");
1589
1590 switch (cmd) {
1591 default:
1592 return (DDI_FAILURE);
1593
1594 case DDI_RESUME:
1595 return (rge_resume(devinfo));
1596
1597 case DDI_ATTACH:
1598 break;
1599 }
1600
1601 rgep = kmem_zalloc(sizeof (*rgep), KM_SLEEP);
1602 ddi_set_driver_private(devinfo, rgep);
1603 rgep->devinfo = devinfo;
1604
1605 /*
1606 * Initialize more fields in RGE private data
1607 */
1608 rgep->rge_mac_state = RGE_MAC_ATTACH;
1609 rgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1610 DDI_PROP_DONTPASS, debug_propname, rge_debug);
1611 rgep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1612 DDI_PROP_DONTPASS, mtu_propname, ETHERMTU);
1613 rgep->msi_enable = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
1614 DDI_PROP_DONTPASS, msi_propname, B_TRUE);
1615 (void) snprintf(rgep->ifname, sizeof (rgep->ifname), "%s%d",
1616 RGE_DRIVER_NAME, instance);
1617
1618 /*
1619 * Map config space registers
1620 * Read chip ID & set up config space command register(s)
1621 *
1622 * Note: this leaves the chip accessible by Memory Space
1623 * accesses, but with interrupts and Bus Mastering off.
1624 * This should ensure that nothing untoward will happen
1625 * if it has been left active by the (net-)bootloader.
1626 * We'll re-enable Bus Mastering once we've reset the chip,
1627 * and allow interrupts only when everything else is set up.
1628 */
1629 err = pci_config_setup(devinfo, &rgep->cfg_handle);
1630 if (err != DDI_SUCCESS) {
1631 rge_problem(rgep, "pci_config_setup() failed");
1632 goto attach_fail;
1633 }
1634 rgep->progress |= PROGRESS_CFG;
1635 cidp = &rgep->chipid;
1636 bzero(cidp, sizeof (*cidp));
1637 rge_chip_cfg_init(rgep, cidp);
1638
1639 /*
1640 * Map operating registers
1641 */
1642 err = ddi_regs_map_setup(devinfo, 2, ®s,
1643 0, 0, &rge_reg_accattr, &rgep->io_handle);
1644
1645 /*
1646 * MMIO map will fail if the assigned address is bigger than 4G
1647 * then choose I/O map
1648 */
1649 if (err != DDI_SUCCESS) {
1650 err = ddi_regs_map_setup(devinfo, 1, ®s,
1651 0, 0, &rge_reg_accattr, &rgep->io_handle);
1652 }
1653 if (err != DDI_SUCCESS) {
1654 rge_problem(rgep, "ddi_regs_map_setup() failed");
1655 goto attach_fail;
1656 }
1657 rgep->io_regs = regs;
1658 rgep->progress |= PROGRESS_REGS;
1659
1660 /*
1661 * Characterise the device, so we know its requirements.
1662 * Then allocate the appropriate TX and RX descriptors & buffers.
1663 */
1664 rge_chip_ident(rgep);
1665 err = rge_alloc_bufs(rgep);
1666 if (err != DDI_SUCCESS) {
1667 rge_problem(rgep, "DMA buffer allocation failed");
1668 goto attach_fail;
1669 }
1670
1671 /*
1672 * Register NDD-tweakable parameters
1673 */
1674 if (rge_nd_init(rgep)) {
1675 rge_problem(rgep, "rge_nd_init() failed");
1676 goto attach_fail;
1677 }
1678 rgep->progress |= PROGRESS_NDD;
1679
1680 /*
1681 * Add the softint handlers:
1682 *
1683 * Both of these handlers are used to avoid restrictions on the
1684 * context and/or mutexes required for some operations. In
1685 * particular, the hardware interrupt handler and its subfunctions
1686 * can detect a number of conditions that we don't want to handle
1687 * in that context or with that set of mutexes held. So, these
1688 * softints are triggered instead:
1689 *
1690 * the <resched> softint is triggered if if we have previously
1691 * had to refuse to send a packet because of resource shortage
1692 * (we've run out of transmit buffers), but the send completion
1693 * interrupt handler has now detected that more buffers have
1694 * become available.
1695 *
1696 * the <factotum> is triggered if the h/w interrupt handler
1697 * sees the <link state changed> or <error> bits in the status
1698 * block. It's also triggered periodically to poll the link
1699 * state, just in case we aren't getting link status change
1700 * interrupts ...
1701 */
1702 err = ddi_intr_add_softint(devinfo, &rgep->resched_hdl,
1703 DDI_INTR_SOFTPRI_MIN, rge_reschedule, (caddr_t)rgep);
1704 if (err != DDI_SUCCESS) {
1705 rge_problem(rgep, "ddi_intr_add_softint() failed");
1706 goto attach_fail;
1707 }
1708 rgep->progress |= PROGRESS_RESCHED;
1709 err = ddi_intr_add_softint(devinfo, &rgep->factotum_hdl,
1710 DDI_INTR_SOFTPRI_MIN, rge_chip_factotum, (caddr_t)rgep);
1711 if (err != DDI_SUCCESS) {
1712 rge_problem(rgep, "ddi_intr_add_softint() failed");
1713 goto attach_fail;
1714 }
1715 rgep->progress |= PROGRESS_FACTOTUM;
1716
1717 /*
1718 * Get supported interrupt types
1719 */
1720 if (ddi_intr_get_supported_types(devinfo, &intr_types)
1721 != DDI_SUCCESS) {
1722 rge_error(rgep, "ddi_intr_get_supported_types failed\n");
1723 goto attach_fail;
1724 }
1725
1726 /*
1727 * Add the h/w interrupt handler and initialise mutexes
1728 * RTL8101E is observed to have MSI invalidation issue after S/R.
1729 * So the FIXED interrupt is used instead.
1730 */
1731 if (rgep->chipid.mac_ver == MAC_VER_8101E)
1732 rgep->msi_enable = B_FALSE;
1733 if ((intr_types & DDI_INTR_TYPE_MSI) && rgep->msi_enable) {
1734 if (rge_add_intrs(rgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
1735 rge_error(rgep, "MSI registration failed, "
1736 "trying FIXED interrupt type\n");
1737 } else {
1738 rge_log(rgep, "Using MSI interrupt type\n");
1739 rgep->intr_type = DDI_INTR_TYPE_MSI;
1740 rgep->progress |= PROGRESS_INTR;
1741 }
1742 }
1743 if (!(rgep->progress & PROGRESS_INTR) &&
1744 (intr_types & DDI_INTR_TYPE_FIXED)) {
1745 if (rge_add_intrs(rgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
1746 rge_error(rgep, "FIXED interrupt "
1747 "registration failed\n");
1748 goto attach_fail;
1749 }
1750 rge_log(rgep, "Using FIXED interrupt type\n");
1751 rgep->intr_type = DDI_INTR_TYPE_FIXED;
1752 rgep->progress |= PROGRESS_INTR;
1753 }
1754 if (!(rgep->progress & PROGRESS_INTR)) {
1755 rge_error(rgep, "No interrupts registered\n");
1756 goto attach_fail;
1757 }
1758 mutex_init(rgep->genlock, NULL, MUTEX_DRIVER,
1759 DDI_INTR_PRI(rgep->intr_pri));
1760 rw_init(rgep->errlock, NULL, RW_DRIVER,
1761 DDI_INTR_PRI(rgep->intr_pri));
1762 mutex_init(rgep->tx_lock, NULL, MUTEX_DRIVER,
1763 DDI_INTR_PRI(rgep->intr_pri));
1764 mutex_init(rgep->tc_lock, NULL, MUTEX_DRIVER,
1765 DDI_INTR_PRI(rgep->intr_pri));
1766 mutex_init(rgep->rx_lock, NULL, MUTEX_DRIVER,
1767 DDI_INTR_PRI(rgep->intr_pri));
1768 mutex_init(rgep->rc_lock, NULL, MUTEX_DRIVER,
1769 DDI_INTR_PRI(rgep->intr_pri));
1770
1771 /*
1772 * Initialize rings
1773 */
1774 err = rge_init_rings(rgep);
1775 if (err != DDI_SUCCESS) {
1776 rge_problem(rgep, "rge_init_rings() failed");
1777 goto attach_fail;
1778 }
1779 rgep->progress |= PROGRESS_INIT;
1780
1781 /*
1782 * Now that mutex locks are initialized, enable interrupts.
1783 */
1784 if (rgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
1785 /* Call ddi_intr_block_enable() for MSI interrupts */
1786 (void) ddi_intr_block_enable(rgep->htable, rgep->intr_cnt);
1787 } else {
1788 /* Call ddi_intr_enable for MSI or FIXED interrupts */
1789 for (i = 0; i < rgep->intr_cnt; i++) {
1790 (void) ddi_intr_enable(rgep->htable[i]);
1791 }
1792 }
1793
1794 /*
1795 * Initialise link state variables
1796 * Stop, reset & reinitialise the chip.
1797 * Initialise the (internal) PHY.
1798 */
1799 rgep->param_link_up = LINK_STATE_UNKNOWN;
1800
1801 /*
1802 * Reset chip & rings to initial state; also reset address
1803 * filtering, promiscuity, loopback mode.
1804 */
1805 mutex_enter(rgep->genlock);
1806 (void) rge_chip_reset(rgep);
1807 rge_chip_sync(rgep, RGE_GET_MAC);
1808 bzero(rgep->mcast_hash, sizeof (rgep->mcast_hash));
1809 bzero(rgep->mcast_refs, sizeof (rgep->mcast_refs));
1810 rgep->promisc = B_FALSE;
1811 rgep->param_loop_mode = RGE_LOOP_NONE;
1812 mutex_exit(rgep->genlock);
1813 rge_phy_init(rgep);
1814 rgep->progress |= PROGRESS_PHY;
1815
1816 /*
1817 * Create & initialise named kstats
1818 */
1819 rge_init_kstats(rgep, instance);
1820 rgep->progress |= PROGRESS_KSTATS;
1821
1822 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1823 goto attach_fail;
1824 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1825 macp->m_driver = rgep;
1826 macp->m_dip = devinfo;
1827 macp->m_src_addr = rgep->netaddr;
1828 macp->m_callbacks = &rge_m_callbacks;
1829 macp->m_min_sdu = 0;
1830 macp->m_max_sdu = rgep->default_mtu;
1831 macp->m_margin = VLAN_TAGSZ;
1832
1833 /*
1834 * Finally, we're ready to register ourselves with the MAC layer
1835 * interface; if this succeeds, we're all ready to start()
1836 */
1837 err = mac_register(macp, &rgep->mh);
1838 mac_free(macp);
1839 if (err != 0)
1840 goto attach_fail;
1841
1842 /*
1843 * Register a periodical handler.
1844 * reg_chip_cyclic() is invoked in kernel context.
1845 */
1846 rgep->periodic_id = ddi_periodic_add(rge_chip_cyclic, rgep,
1847 RGE_CYCLIC_PERIOD, DDI_IPL_0);
1848
1849 rgep->progress |= PROGRESS_READY;
1850 return (DDI_SUCCESS);
1851
1852 attach_fail:
1853 rge_unattach(rgep);
1854 return (DDI_FAILURE);
1855 }
1856
1857 /*
1858 * rge_suspend() -- suspend transmit/receive for powerdown
1859 */
1860 static int
rge_suspend(rge_t * rgep)1861 rge_suspend(rge_t *rgep)
1862 {
1863 /*
1864 * Stop processing and idle (powerdown) the PHY ...
1865 */
1866 mutex_enter(rgep->genlock);
1867 rw_enter(rgep->errlock, RW_WRITER);
1868
1869 if (rgep->rge_mac_state != RGE_MAC_STARTED) {
1870 rw_exit(rgep->errlock);
1871 mutex_exit(rgep->genlock);
1872 return (DDI_SUCCESS);
1873 }
1874
1875 rgep->suspended = B_TRUE;
1876 rge_stop(rgep);
1877 rgep->rge_mac_state = RGE_MAC_STOPPED;
1878
1879 rw_exit(rgep->errlock);
1880 mutex_exit(rgep->genlock);
1881
1882 return (DDI_SUCCESS);
1883 }
1884
1885 /*
1886 * quiesce(9E) entry point.
1887 *
1888 * This function is called when the system is single-threaded at high
1889 * PIL with preemption disabled. Therefore, this function must not be
1890 * blocked.
1891 *
1892 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1893 * DDI_FAILURE indicates an error condition and should almost never happen.
1894 */
1895 static int
rge_quiesce(dev_info_t * devinfo)1896 rge_quiesce(dev_info_t *devinfo)
1897 {
1898 rge_t *rgep = ddi_get_driver_private(devinfo);
1899
1900 if (rgep == NULL)
1901 return (DDI_FAILURE);
1902
1903 /*
1904 * Turn off debugging
1905 */
1906 rge_debug = 0;
1907 rgep->debug = 0;
1908
1909 /* Stop the chip */
1910 rge_chip_stop(rgep, B_FALSE);
1911
1912 return (DDI_SUCCESS);
1913 }
1914
1915 /*
1916 * detach(9E) -- Detach a device from the system
1917 */
1918 static int
rge_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)1919 rge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1920 {
1921 rge_t *rgep;
1922
1923 RGE_GTRACE(("rge_detach($%p, %d)", (void *)devinfo, cmd));
1924
1925 rgep = ddi_get_driver_private(devinfo);
1926
1927 switch (cmd) {
1928 default:
1929 return (DDI_FAILURE);
1930
1931 case DDI_SUSPEND:
1932 return (rge_suspend(rgep));
1933
1934 case DDI_DETACH:
1935 break;
1936 }
1937
1938 /*
1939 * If there is any posted buffer, the driver should reject to be
1940 * detached. Need notice upper layer to release them.
1941 */
1942 if (!(rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) &&
1943 rgep->rx_free != RGE_BUF_SLOTS)
1944 return (DDI_FAILURE);
1945
1946 /*
1947 * Unregister from the MAC layer subsystem. This can fail, in
1948 * particular if there are DLPI style-2 streams still open -
1949 * in which case we just return failure without shutting
1950 * down chip operations.
1951 */
1952 if (mac_unregister(rgep->mh) != 0)
1953 return (DDI_FAILURE);
1954
1955 /*
1956 * All activity stopped, so we can clean up & exit
1957 */
1958 rge_unattach(rgep);
1959 return (DDI_SUCCESS);
1960 }
1961
1962
1963 /*
1964 * ========== Module Loading Data & Entry Points ==========
1965 */
1966
1967 #undef RGE_DBG
1968 #define RGE_DBG RGE_DBG_INIT /* debug flag for this code */
1969 DDI_DEFINE_STREAM_OPS(rge_dev_ops, nulldev, nulldev, rge_attach, rge_detach,
1970 nodev, NULL, D_MP, NULL, rge_quiesce);
1971
1972 static struct modldrv rge_modldrv = {
1973 &mod_driverops, /* Type of module. This one is a driver */
1974 rge_ident, /* short description */
1975 &rge_dev_ops /* driver specific ops */
1976 };
1977
1978 static struct modlinkage modlinkage = {
1979 MODREV_1, (void *)&rge_modldrv, NULL
1980 };
1981
1982
1983 int
_info(struct modinfo * modinfop)1984 _info(struct modinfo *modinfop)
1985 {
1986 return (mod_info(&modlinkage, modinfop));
1987 }
1988
1989 int
_init(void)1990 _init(void)
1991 {
1992 int status;
1993
1994 mac_init_ops(&rge_dev_ops, "rge");
1995 status = mod_install(&modlinkage);
1996 if (status == DDI_SUCCESS)
1997 mutex_init(rge_log_mutex, NULL, MUTEX_DRIVER, NULL);
1998 else
1999 mac_fini_ops(&rge_dev_ops);
2000
2001 return (status);
2002 }
2003
2004 int
_fini(void)2005 _fini(void)
2006 {
2007 int status;
2008
2009 status = mod_remove(&modlinkage);
2010 if (status == DDI_SUCCESS) {
2011 mac_fini_ops(&rge_dev_ops);
2012 mutex_destroy(rge_log_mutex);
2013 }
2014 return (status);
2015 }
2016