1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2012 Milan Jurik. All rights reserved.
25 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
26 */
27
28 /*
29 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
30 */
31 #include <hxge_impl.h>
32 #include <hxge_pfc.h>
33
34 /*
35 * PSARC/2007/453 MSI-X interrupt limit override
36 * (This PSARC case is limited to MSI-X vectors
37 * and SPARC platforms only).
38 */
39 uint32_t hxge_msi_enable = 2;
40
41 /*
42 * Globals: tunable parameters (/etc/system or adb)
43 *
44 */
45 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
46 uint32_t hxge_rbr_spare_size = 0;
47 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
48 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
49 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
50 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
51 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
52 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
53
54 static hxge_os_mutex_t hxgedebuglock;
55 static int hxge_debug_init = 0;
56
57 /*
58 * Debugging flags:
59 * hxge_no_tx_lb : transmit load balancing
60 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
61 * 1 - From the Stack
62 * 2 - Destination IP Address
63 */
64 uint32_t hxge_no_tx_lb = 0;
65 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
66
67 /*
68 * Tunables to manage the receive buffer blocks.
69 *
70 * hxge_rx_threshold_hi: copy all buffers.
71 * hxge_rx_bcopy_size_type: receive buffer block size type.
72 * hxge_rx_threshold_lo: copy only up to tunable block size type.
73 */
74 #if defined(__sparc)
75 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
76 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
77 #else
78 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
79 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
80 #endif
81 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
82
83 rtrace_t hpi_rtracebuf;
84
85 /*
86 * Function Prototypes
87 */
88 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
89 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
90 static void hxge_unattach(p_hxge_t);
91
92 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
93
94 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
95 static void hxge_destroy_mutexes(p_hxge_t);
96
97 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
98 static void hxge_unmap_regs(p_hxge_t hxgep);
99
100 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
101 static void hxge_remove_intrs(p_hxge_t hxgep);
102 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
103 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
104 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
105 static void hxge_intrs_enable(p_hxge_t hxgep);
106 static void hxge_intrs_disable(p_hxge_t hxgep);
107 static void hxge_suspend(p_hxge_t);
108 static hxge_status_t hxge_resume(p_hxge_t);
109 static hxge_status_t hxge_setup_dev(p_hxge_t);
110 static void hxge_destroy_dev(p_hxge_t);
111 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
112 static void hxge_free_mem_pool(p_hxge_t);
113 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
114 static void hxge_free_rx_mem_pool(p_hxge_t);
115 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
116 static void hxge_free_tx_mem_pool(p_hxge_t);
117 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
118 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
119 p_hxge_dma_common_t);
120 static void hxge_dma_mem_free(p_hxge_dma_common_t);
121 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
122 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
123 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
124 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
125 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
126 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
127 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
128 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
129 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
130 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
131 p_hxge_dma_common_t *, size_t);
132 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
133 static int hxge_init_common_dev(p_hxge_t);
134 static void hxge_uninit_common_dev(p_hxge_t);
135
136 /*
137 * The next declarations are for the GLDv3 interface.
138 */
139 static int hxge_m_start(void *);
140 static void hxge_m_stop(void *);
141 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
142 static int hxge_m_promisc(void *, boolean_t);
143 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
144 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
145
146 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
147 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
148 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
149 uint_t pr_valsize, const void *pr_val);
150 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
151 uint_t pr_valsize, void *pr_val);
152 static void hxge_m_propinfo(void *barg, const char *pr_name,
153 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
154 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
155 uint_t pr_valsize, const void *pr_val);
156 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
157 uint_t pr_valsize, void *pr_val);
158 static void hxge_link_poll(void *arg);
159 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
160 static void hxge_msix_init(p_hxge_t hxgep);
161
162 char *hxge_priv_props[] = {
163 "_rxdma_intr_time",
164 "_rxdma_intr_pkts",
165 "_class_opt_ipv4_tcp",
166 "_class_opt_ipv4_udp",
167 "_class_opt_ipv4_ah",
168 "_class_opt_ipv4_sctp",
169 "_class_opt_ipv6_tcp",
170 "_class_opt_ipv6_udp",
171 "_class_opt_ipv6_ah",
172 "_class_opt_ipv6_sctp",
173 NULL
174 };
175
176 #define HXGE_MAX_PRIV_PROPS \
177 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
178
179 #define HXGE_MAGIC 0x4E584745UL
180 #define MAX_DUMP_SZ 256
181
182 #define HXGE_M_CALLBACK_FLAGS \
183 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
184
185 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
186
187 static mac_callbacks_t hxge_m_callbacks = {
188 HXGE_M_CALLBACK_FLAGS,
189 hxge_m_stat,
190 hxge_m_start,
191 hxge_m_stop,
192 hxge_m_promisc,
193 hxge_m_multicst,
194 NULL,
195 NULL,
196 NULL,
197 hxge_m_ioctl,
198 hxge_m_getcapab,
199 NULL,
200 NULL,
201 hxge_m_setprop,
202 hxge_m_getprop,
203 hxge_m_propinfo
204 };
205
206 /* PSARC/2007/453 MSI-X interrupt limit override. */
207 #define HXGE_MSIX_REQUEST_10G 8
208 static int hxge_create_msi_property(p_hxge_t);
209
210 /* Enable debug messages as necessary. */
211 uint64_t hxge_debug_level = 0;
212
213 /*
214 * This list contains the instance structures for the Hydra
215 * devices present in the system. The lock exists to guarantee
216 * mutually exclusive access to the list.
217 */
218 void *hxge_list = NULL;
219 void *hxge_hw_list = NULL;
220 hxge_os_mutex_t hxge_common_lock;
221
222 extern uint64_t hpi_debug_level;
223
224 extern hxge_status_t hxge_ldgv_init(p_hxge_t, int *, int *);
225 extern hxge_status_t hxge_ldgv_uninit(p_hxge_t);
226 extern hxge_status_t hxge_intr_ldgv_init(p_hxge_t);
227 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
228 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
229 extern void hxge_fm_fini(p_hxge_t hxgep);
230
231 /*
232 * Count used to maintain the number of buffers being used
233 * by Hydra instances and loaned up to the upper layers.
234 */
235 uint32_t hxge_mblks_pending = 0;
236
237 /*
238 * Device register access attributes for PIO.
239 */
240 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
241 DDI_DEVICE_ATTR_V0,
242 DDI_STRUCTURE_LE_ACC,
243 DDI_STRICTORDER_ACC,
244 };
245
246 /*
247 * Device descriptor access attributes for DMA.
248 */
249 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
250 DDI_DEVICE_ATTR_V0,
251 DDI_STRUCTURE_LE_ACC,
252 DDI_STRICTORDER_ACC
253 };
254
255 /*
256 * Device buffer access attributes for DMA.
257 */
258 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
259 DDI_DEVICE_ATTR_V0,
260 DDI_STRUCTURE_BE_ACC,
261 DDI_STRICTORDER_ACC
262 };
263
264 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
265 DMA_ATTR_V0, /* version number. */
266 0, /* low address */
267 0xffffffffffffffff, /* high address */
268 0xffffffffffffffff, /* address counter max */
269 0x80000, /* alignment */
270 0xfc00fc, /* dlim_burstsizes */
271 0x1, /* minimum transfer size */
272 0xffffffffffffffff, /* maximum transfer size */
273 0xffffffffffffffff, /* maximum segment size */
274 1, /* scatter/gather list length */
275 (unsigned int)1, /* granularity */
276 0 /* attribute flags */
277 };
278
279 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
280 DMA_ATTR_V0, /* version number. */
281 0, /* low address */
282 0xffffffffffffffff, /* high address */
283 0xffffffffffffffff, /* address counter max */
284 0x100000, /* alignment */
285 0xfc00fc, /* dlim_burstsizes */
286 0x1, /* minimum transfer size */
287 0xffffffffffffffff, /* maximum transfer size */
288 0xffffffffffffffff, /* maximum segment size */
289 1, /* scatter/gather list length */
290 (unsigned int)1, /* granularity */
291 0 /* attribute flags */
292 };
293
294 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
295 DMA_ATTR_V0, /* version number. */
296 0, /* low address */
297 0xffffffffffffffff, /* high address */
298 0xffffffffffffffff, /* address counter max */
299 0x40000, /* alignment */
300 0xfc00fc, /* dlim_burstsizes */
301 0x1, /* minimum transfer size */
302 0xffffffffffffffff, /* maximum transfer size */
303 0xffffffffffffffff, /* maximum segment size */
304 1, /* scatter/gather list length */
305 (unsigned int)1, /* granularity */
306 0 /* attribute flags */
307 };
308
309 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
310 DMA_ATTR_V0, /* version number. */
311 0, /* low address */
312 0xffffffffffffffff, /* high address */
313 0xffffffffffffffff, /* address counter max */
314 #if defined(_BIG_ENDIAN)
315 0x2000, /* alignment */
316 #else
317 0x1000, /* alignment */
318 #endif
319 0xfc00fc, /* dlim_burstsizes */
320 0x1, /* minimum transfer size */
321 0xffffffffffffffff, /* maximum transfer size */
322 0xffffffffffffffff, /* maximum segment size */
323 5, /* scatter/gather list length */
324 (unsigned int)1, /* granularity */
325 0 /* attribute flags */
326 };
327
328 ddi_dma_attr_t hxge_tx_dma_attr = {
329 DMA_ATTR_V0, /* version number. */
330 0, /* low address */
331 0xffffffffffffffff, /* high address */
332 0xffffffffffffffff, /* address counter max */
333 #if defined(_BIG_ENDIAN)
334 0x2000, /* alignment */
335 #else
336 0x1000, /* alignment */
337 #endif
338 0xfc00fc, /* dlim_burstsizes */
339 0x1, /* minimum transfer size */
340 0xffffffffffffffff, /* maximum transfer size */
341 0xffffffffffffffff, /* maximum segment size */
342 5, /* scatter/gather list length */
343 (unsigned int)1, /* granularity */
344 0 /* attribute flags */
345 };
346
347 ddi_dma_attr_t hxge_rx_dma_attr = {
348 DMA_ATTR_V0, /* version number. */
349 0, /* low address */
350 0xffffffffffffffff, /* high address */
351 0xffffffffffffffff, /* address counter max */
352 0x10000, /* alignment */
353 0xfc00fc, /* dlim_burstsizes */
354 0x1, /* minimum transfer size */
355 0xffffffffffffffff, /* maximum transfer size */
356 0xffffffffffffffff, /* maximum segment size */
357 1, /* scatter/gather list length */
358 (unsigned int)1, /* granularity */
359 DDI_DMA_RELAXED_ORDERING /* attribute flags */
360 };
361
362 ddi_dma_lim_t hxge_dma_limits = {
363 (uint_t)0, /* dlim_addr_lo */
364 (uint_t)0xffffffff, /* dlim_addr_hi */
365 (uint_t)0xffffffff, /* dlim_cntr_max */
366 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
367 0x1, /* dlim_minxfer */
368 1024 /* dlim_speed */
369 };
370
371 dma_method_t hxge_force_dma = DVMA;
372
373 /*
374 * dma chunk sizes.
375 *
376 * Try to allocate the largest possible size
377 * so that fewer number of dma chunks would be managed
378 */
379 size_t alloc_sizes[] = {
380 0x1000, 0x2000, 0x4000, 0x8000,
381 0x10000, 0x20000, 0x40000, 0x80000,
382 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
383 };
384
385 /*
386 * Translate "dev_t" to a pointer to the associated "dev_info_t".
387 */
388 static int
hxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)389 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
390 {
391 p_hxge_t hxgep = NULL;
392 int instance;
393 int status = DDI_SUCCESS;
394 int i;
395
396 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
397
398 /*
399 * Get the device instance since we'll need to setup or retrieve a soft
400 * state for this instance.
401 */
402 instance = ddi_get_instance(dip);
403
404 switch (cmd) {
405 case DDI_ATTACH:
406 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
407 break;
408
409 case DDI_RESUME:
410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
411 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
412 if (hxgep == NULL) {
413 status = DDI_FAILURE;
414 break;
415 }
416 if (hxgep->dip != dip) {
417 status = DDI_FAILURE;
418 break;
419 }
420 if (hxgep->suspended == DDI_PM_SUSPEND) {
421 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
422 } else {
423 (void) hxge_resume(hxgep);
424 }
425 goto hxge_attach_exit;
426
427 case DDI_PM_RESUME:
428 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
429 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
430 if (hxgep == NULL) {
431 status = DDI_FAILURE;
432 break;
433 }
434 if (hxgep->dip != dip) {
435 status = DDI_FAILURE;
436 break;
437 }
438 (void) hxge_resume(hxgep);
439 goto hxge_attach_exit;
440
441 default:
442 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
443 status = DDI_FAILURE;
444 goto hxge_attach_exit;
445 }
446
447 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
448 status = DDI_FAILURE;
449 HXGE_ERROR_MSG((hxgep, DDI_CTL,
450 "ddi_soft_state_zalloc failed"));
451 goto hxge_attach_exit;
452 }
453
454 hxgep = ddi_get_soft_state(hxge_list, instance);
455 if (hxgep == NULL) {
456 status = HXGE_ERROR;
457 HXGE_ERROR_MSG((hxgep, DDI_CTL,
458 "ddi_get_soft_state failed"));
459 goto hxge_attach_fail2;
460 }
461
462 hxgep->drv_state = 0;
463 hxgep->dip = dip;
464 hxgep->instance = instance;
465 hxgep->p_dip = ddi_get_parent(dip);
466 hxgep->hxge_debug_level = hxge_debug_level;
467 hpi_debug_level = hxge_debug_level;
468
469 /*
470 * Initialize MMAC struture.
471 */
472 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
473 hxgep->mmac.available = hxgep->mmac.total;
474 for (i = 0; i < hxgep->mmac.total; i++) {
475 hxgep->mmac.addrs[i].set = B_FALSE;
476 hxgep->mmac.addrs[i].primary = B_FALSE;
477 }
478
479 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
480 &hxge_rx_dma_attr);
481
482 status = hxge_map_regs(hxgep);
483 if (status != HXGE_OK) {
484 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
485 goto hxge_attach_fail3;
486 }
487
488 status = hxge_init_common_dev(hxgep);
489 if (status != HXGE_OK) {
490 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
491 "hxge_init_common_dev failed"));
492 goto hxge_attach_fail4;
493 }
494
495 /*
496 * Setup the Ndd parameters for this instance.
497 */
498 hxge_init_param(hxgep);
499
500 /*
501 * Setup Register Tracing Buffer.
502 */
503 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
504
505 /* init stats ptr */
506 hxge_init_statsp(hxgep);
507
508 status = hxge_setup_mutexes(hxgep);
509 if (status != HXGE_OK) {
510 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
511 goto hxge_attach_fail;
512 }
513
514 /* Scrub the MSI-X memory */
515 hxge_msix_init(hxgep);
516
517 status = hxge_get_config_properties(hxgep);
518 if (status != HXGE_OK) {
519 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
520 goto hxge_attach_fail;
521 }
522
523 /*
524 * Setup the Kstats for the driver.
525 */
526 hxge_setup_kstats(hxgep);
527 hxge_setup_param(hxgep);
528
529 status = hxge_setup_system_dma_pages(hxgep);
530 if (status != HXGE_OK) {
531 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
532 goto hxge_attach_fail;
533 }
534
535 hxge_hw_id_init(hxgep);
536 hxge_hw_init_niu_common(hxgep);
537
538 status = hxge_setup_dev(hxgep);
539 if (status != DDI_SUCCESS) {
540 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
541 goto hxge_attach_fail;
542 }
543
544 status = hxge_add_intrs(hxgep);
545 if (status != DDI_SUCCESS) {
546 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
547 goto hxge_attach_fail;
548 }
549
550 /*
551 * Enable interrupts.
552 */
553 hxge_intrs_enable(hxgep);
554
555 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
556 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
557 "unable to register to mac layer (%d)", status));
558 goto hxge_attach_fail;
559 }
560 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
561
562 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
563 instance));
564
565 goto hxge_attach_exit;
566
567 hxge_attach_fail:
568 hxge_unattach(hxgep);
569 goto hxge_attach_fail1;
570
571 hxge_attach_fail4:
572 if (hxgep->hxge_hw_p) {
573 hxge_uninit_common_dev(hxgep);
574 hxgep->hxge_hw_p = NULL;
575 }
576 hxge_attach_fail3:
577 /*
578 * Unmap the register setup.
579 */
580 hxge_unmap_regs(hxgep);
581
582 hxge_fm_fini(hxgep);
583
584 hxge_attach_fail2:
585 ddi_soft_state_free(hxge_list, hxgep->instance);
586
587 hxge_attach_fail1:
588 if (status != HXGE_OK)
589 status = (HXGE_ERROR | HXGE_DDI_FAILED);
590 hxgep = NULL;
591
592 hxge_attach_exit:
593 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
594 status));
595
596 return (status);
597 }
598
599 static int
hxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)600 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
601 {
602 int status = DDI_SUCCESS;
603 int instance;
604 p_hxge_t hxgep = NULL;
605
606 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
607 instance = ddi_get_instance(dip);
608 hxgep = ddi_get_soft_state(hxge_list, instance);
609 if (hxgep == NULL) {
610 status = DDI_FAILURE;
611 goto hxge_detach_exit;
612 }
613
614 switch (cmd) {
615 case DDI_DETACH:
616 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
617 break;
618
619 case DDI_PM_SUSPEND:
620 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
621 hxgep->suspended = DDI_PM_SUSPEND;
622 hxge_suspend(hxgep);
623 break;
624
625 case DDI_SUSPEND:
626 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
627 if (hxgep->suspended != DDI_PM_SUSPEND) {
628 hxgep->suspended = DDI_SUSPEND;
629 hxge_suspend(hxgep);
630 }
631 break;
632
633 default:
634 status = DDI_FAILURE;
635 break;
636 }
637
638 if (cmd != DDI_DETACH)
639 goto hxge_detach_exit;
640
641 /*
642 * Stop the xcvr polling.
643 */
644 hxgep->suspended = cmd;
645
646 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
647 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
648 "<== hxge_detach status = 0x%08X", status));
649 return (DDI_FAILURE);
650 }
651 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
652 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
653
654 hxge_unattach(hxgep);
655 hxgep = NULL;
656
657 hxge_detach_exit:
658 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
659 status));
660
661 return (status);
662 }
663
664 static void
hxge_unattach(p_hxge_t hxgep)665 hxge_unattach(p_hxge_t hxgep)
666 {
667 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
668
669 if (hxgep == NULL || hxgep->dev_regs == NULL) {
670 return;
671 }
672
673 if (hxgep->hxge_hw_p) {
674 hxge_uninit_common_dev(hxgep);
675 hxgep->hxge_hw_p = NULL;
676 }
677
678 if (hxgep->hxge_timerid) {
679 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
680 hxgep->hxge_timerid = 0;
681 }
682
683 /* Stop interrupts. */
684 hxge_intrs_disable(hxgep);
685
686 /* Stop any further interrupts. */
687 hxge_remove_intrs(hxgep);
688
689 /* Stop the device and free resources. */
690 hxge_destroy_dev(hxgep);
691
692 /* Tear down the ndd parameters setup. */
693 hxge_destroy_param(hxgep);
694
695 /* Tear down the kstat setup. */
696 hxge_destroy_kstats(hxgep);
697
698 /*
699 * Remove the list of ndd parameters which were setup during attach.
700 */
701 if (hxgep->dip) {
702 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
703 " hxge_unattach: remove all properties"));
704 (void) ddi_prop_remove_all(hxgep->dip);
705 }
706
707 /*
708 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
709 * previous state before unmapping the registers.
710 */
711 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
712 HXGE_DELAY(1000);
713
714 /*
715 * Unmap the register setup.
716 */
717 hxge_unmap_regs(hxgep);
718
719 hxge_fm_fini(hxgep);
720
721 /* Destroy all mutexes. */
722 hxge_destroy_mutexes(hxgep);
723
724 /*
725 * Free the soft state data structures allocated with this instance.
726 */
727 ddi_soft_state_free(hxge_list, hxgep->instance);
728
729 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
730 }
731
732 static hxge_status_t
hxge_map_regs(p_hxge_t hxgep)733 hxge_map_regs(p_hxge_t hxgep)
734 {
735 int ddi_status = DDI_SUCCESS;
736 p_dev_regs_t dev_regs;
737
738 #ifdef HXGE_DEBUG
739 char *sysname;
740 #endif
741
742 off_t regsize;
743 hxge_status_t status = HXGE_OK;
744 int nregs;
745
746 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
747
748 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
749 return (HXGE_ERROR);
750
751 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
752
753 hxgep->dev_regs = NULL;
754 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
755 dev_regs->hxge_regh = NULL;
756 dev_regs->hxge_pciregh = NULL;
757 dev_regs->hxge_msix_regh = NULL;
758
759 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
760 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
761 "hxge_map_regs: pci config size 0x%x", regsize));
762
763 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
764 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
765 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
766 if (ddi_status != DDI_SUCCESS) {
767 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
768 "ddi_map_regs, hxge bus config regs failed"));
769 goto hxge_map_regs_fail0;
770 }
771
772 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
773 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
774 dev_regs->hxge_pciregp,
775 dev_regs->hxge_pciregh));
776
777 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
778 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
779 "hxge_map_regs: pio size 0x%x", regsize));
780
781 /* set up the device mapped register */
782 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
783 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
784 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
785
786 if (ddi_status != DDI_SUCCESS) {
787 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
788 "ddi_map_regs for Hydra global reg failed"));
789 goto hxge_map_regs_fail1;
790 }
791
792 /* set up the msi/msi-x mapped register */
793 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
794 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
795 "hxge_map_regs: msix size 0x%x", regsize));
796
797 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
798 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
799 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
800
801 if (ddi_status != DDI_SUCCESS) {
802 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
803 "ddi_map_regs for msi reg failed"));
804 goto hxge_map_regs_fail2;
805 }
806
807 hxgep->dev_regs = dev_regs;
808
809 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
810 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
811 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
812 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
813
814 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
815 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
816
817 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
818 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
819
820 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
821 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
822
823 goto hxge_map_regs_exit;
824
825 hxge_map_regs_fail2:
826 if (dev_regs->hxge_regh) {
827 ddi_regs_map_free(&dev_regs->hxge_regh);
828 }
829
830 hxge_map_regs_fail1:
831 if (dev_regs->hxge_pciregh) {
832 ddi_regs_map_free(&dev_regs->hxge_pciregh);
833 }
834
835 hxge_map_regs_fail0:
836 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
837 kmem_free(dev_regs, sizeof (dev_regs_t));
838
839 hxge_map_regs_exit:
840 if (ddi_status != DDI_SUCCESS)
841 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
842 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
843 return (status);
844 }
845
846 static void
hxge_unmap_regs(p_hxge_t hxgep)847 hxge_unmap_regs(p_hxge_t hxgep)
848 {
849 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
850 if (hxgep->dev_regs) {
851 if (hxgep->dev_regs->hxge_pciregh) {
852 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
853 "==> hxge_unmap_regs: bus"));
854 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
855 hxgep->dev_regs->hxge_pciregh = NULL;
856 }
857
858 if (hxgep->dev_regs->hxge_regh) {
859 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
860 "==> hxge_unmap_regs: device registers"));
861 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
862 hxgep->dev_regs->hxge_regh = NULL;
863 }
864
865 if (hxgep->dev_regs->hxge_msix_regh) {
866 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
867 "==> hxge_unmap_regs: device interrupts"));
868 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
869 hxgep->dev_regs->hxge_msix_regh = NULL;
870 }
871 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
872 hxgep->dev_regs = NULL;
873 }
874 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
875 }
876
877 static hxge_status_t
hxge_setup_mutexes(p_hxge_t hxgep)878 hxge_setup_mutexes(p_hxge_t hxgep)
879 {
880 int ddi_status = DDI_SUCCESS;
881 hxge_status_t status = HXGE_OK;
882
883 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
884
885 /*
886 * Get the interrupt cookie so the mutexes can be Initialised.
887 */
888 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
889 &hxgep->interrupt_cookie);
890
891 if (ddi_status != DDI_SUCCESS) {
892 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
893 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
894 goto hxge_setup_mutexes_exit;
895 }
896
897 /*
898 * Initialize mutex's for this device.
899 */
900 MUTEX_INIT(hxgep->genlock, NULL,
901 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
902 MUTEX_INIT(&hxgep->vmac_lock, NULL,
903 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
904 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
905 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
906 RW_INIT(&hxgep->filter_lock, NULL,
907 RW_DRIVER, (void *) hxgep->interrupt_cookie);
908 MUTEX_INIT(&hxgep->pio_lock, NULL,
909 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
910 MUTEX_INIT(&hxgep->timeout.lock, NULL,
911 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
912
913 hxge_setup_mutexes_exit:
914 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
915 "<== hxge_setup_mutexes status = %x", status));
916
917 if (ddi_status != DDI_SUCCESS)
918 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
919
920 return (status);
921 }
922
923 static void
hxge_destroy_mutexes(p_hxge_t hxgep)924 hxge_destroy_mutexes(p_hxge_t hxgep)
925 {
926 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
927 RW_DESTROY(&hxgep->filter_lock);
928 MUTEX_DESTROY(&hxgep->vmac_lock);
929 MUTEX_DESTROY(&hxgep->ouraddr_lock);
930 MUTEX_DESTROY(hxgep->genlock);
931 MUTEX_DESTROY(&hxgep->pio_lock);
932 MUTEX_DESTROY(&hxgep->timeout.lock);
933
934 if (hxge_debug_init == 1) {
935 MUTEX_DESTROY(&hxgedebuglock);
936 hxge_debug_init = 0;
937 }
938
939 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
940 }
941
942 hxge_status_t
hxge_init(p_hxge_t hxgep)943 hxge_init(p_hxge_t hxgep)
944 {
945 hxge_status_t status = HXGE_OK;
946
947 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
948
949 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
950 return (status);
951 }
952
953 /*
954 * Allocate system memory for the receive/transmit buffer blocks and
955 * receive/transmit descriptor rings.
956 */
957 status = hxge_alloc_mem_pool(hxgep);
958 if (status != HXGE_OK) {
959 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
960 goto hxge_init_fail1;
961 }
962
963 /*
964 * Initialize and enable TXDMA channels.
965 */
966 status = hxge_init_txdma_channels(hxgep);
967 if (status != HXGE_OK) {
968 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
969 goto hxge_init_fail3;
970 }
971
972 /*
973 * Initialize and enable RXDMA channels.
974 */
975 status = hxge_init_rxdma_channels(hxgep);
976 if (status != HXGE_OK) {
977 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
978 goto hxge_init_fail4;
979 }
980
981 /*
982 * Initialize TCAM
983 */
984 status = hxge_classify_init(hxgep);
985 if (status != HXGE_OK) {
986 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
987 goto hxge_init_fail5;
988 }
989
990 /*
991 * Initialize the VMAC block.
992 */
993 status = hxge_vmac_init(hxgep);
994 if (status != HXGE_OK) {
995 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
996 goto hxge_init_fail5;
997 }
998
999 /* Bringup - this may be unnecessary when PXE and FCODE available */
1000 status = hxge_pfc_set_default_mac_addr(hxgep);
1001 if (status != HXGE_OK) {
1002 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1003 "Default Address Failure\n"));
1004 goto hxge_init_fail5;
1005 }
1006
1007 /*
1008 * Enable hardware interrupts.
1009 */
1010 hxge_intr_hw_enable(hxgep);
1011 hxgep->drv_state |= STATE_HW_INITIALIZED;
1012
1013 goto hxge_init_exit;
1014
1015 hxge_init_fail5:
1016 hxge_uninit_rxdma_channels(hxgep);
1017 hxge_init_fail4:
1018 hxge_uninit_txdma_channels(hxgep);
1019 hxge_init_fail3:
1020 hxge_free_mem_pool(hxgep);
1021 hxge_init_fail1:
1022 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1023 "<== hxge_init status (failed) = 0x%08x", status));
1024 return (status);
1025
1026 hxge_init_exit:
1027
1028 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1029 status));
1030
1031 return (status);
1032 }
1033
1034 timeout_id_t
hxge_start_timer(p_hxge_t hxgep,fptrv_t func,int msec)1035 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1036 {
1037 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1038 return (timeout(func, (caddr_t)hxgep,
1039 drv_usectohz(1000 * msec)));
1040 }
1041 return (NULL);
1042 }
1043
1044 /*ARGSUSED*/
1045 void
hxge_stop_timer(p_hxge_t hxgep,timeout_id_t timerid)1046 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1047 {
1048 if (timerid) {
1049 (void) untimeout(timerid);
1050 }
1051 }
1052
1053 void
hxge_uninit(p_hxge_t hxgep)1054 hxge_uninit(p_hxge_t hxgep)
1055 {
1056 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1057
1058 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1059 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1060 "==> hxge_uninit: not initialized"));
1061 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1062 return;
1063 }
1064
1065 /* Stop timer */
1066 if (hxgep->hxge_timerid) {
1067 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1068 hxgep->hxge_timerid = 0;
1069 }
1070
1071 (void) hxge_intr_hw_disable(hxgep);
1072
1073 /* Reset the receive VMAC side. */
1074 (void) hxge_rx_vmac_disable(hxgep);
1075
1076 /* Free classification resources */
1077 (void) hxge_classify_uninit(hxgep);
1078
1079 /* Reset the transmit/receive DMA side. */
1080 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1081 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1082
1083 hxge_uninit_txdma_channels(hxgep);
1084 hxge_uninit_rxdma_channels(hxgep);
1085
1086 /* Reset the transmit VMAC side. */
1087 (void) hxge_tx_vmac_disable(hxgep);
1088
1089 hxge_free_mem_pool(hxgep);
1090
1091 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1092
1093 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1094 }
1095
1096 /*ARGSUSED*/
1097 /*VARARGS*/
1098 void
hxge_debug_msg(p_hxge_t hxgep,uint64_t level,char * fmt,...)1099 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1100 {
1101 char msg_buffer[1048];
1102 char prefix_buffer[32];
1103 int instance;
1104 uint64_t debug_level;
1105 int cmn_level = CE_CONT;
1106 va_list ap;
1107
1108 debug_level = (hxgep == NULL) ? hxge_debug_level :
1109 hxgep->hxge_debug_level;
1110
1111 if ((level & debug_level) || (level == HXGE_NOTE) ||
1112 (level == HXGE_ERR_CTL)) {
1113 /* do the msg processing */
1114 if (hxge_debug_init == 0) {
1115 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1116 hxge_debug_init = 1;
1117 }
1118
1119 MUTEX_ENTER(&hxgedebuglock);
1120
1121 if ((level & HXGE_NOTE)) {
1122 cmn_level = CE_NOTE;
1123 }
1124
1125 if (level & HXGE_ERR_CTL) {
1126 cmn_level = CE_WARN;
1127 }
1128
1129 va_start(ap, fmt);
1130 (void) vsprintf(msg_buffer, fmt, ap);
1131 va_end(ap);
1132
1133 if (hxgep == NULL) {
1134 instance = -1;
1135 (void) sprintf(prefix_buffer, "%s :", "hxge");
1136 } else {
1137 instance = hxgep->instance;
1138 (void) sprintf(prefix_buffer,
1139 "%s%d :", "hxge", instance);
1140 }
1141
1142 MUTEX_EXIT(&hxgedebuglock);
1143 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1144 }
1145 }
1146
1147 char *
hxge_dump_packet(char * addr,int size)1148 hxge_dump_packet(char *addr, int size)
1149 {
1150 uchar_t *ap = (uchar_t *)addr;
1151 int i;
1152 static char etherbuf[1024];
1153 char *cp = etherbuf;
1154 char digits[] = "0123456789abcdef";
1155
1156 if (!size)
1157 size = 60;
1158
1159 if (size > MAX_DUMP_SZ) {
1160 /* Dump the leading bytes */
1161 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1162 if (*ap > 0x0f)
1163 *cp++ = digits[*ap >> 4];
1164 *cp++ = digits[*ap++ & 0xf];
1165 *cp++ = ':';
1166 }
1167 for (i = 0; i < 20; i++)
1168 *cp++ = '.';
1169 /* Dump the last MAX_DUMP_SZ/2 bytes */
1170 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1171 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1172 if (*ap > 0x0f)
1173 *cp++ = digits[*ap >> 4];
1174 *cp++ = digits[*ap++ & 0xf];
1175 *cp++ = ':';
1176 }
1177 } else {
1178 for (i = 0; i < size; i++) {
1179 if (*ap > 0x0f)
1180 *cp++ = digits[*ap >> 4];
1181 *cp++ = digits[*ap++ & 0xf];
1182 *cp++ = ':';
1183 }
1184 }
1185 *--cp = 0;
1186 return (etherbuf);
1187 }
1188
1189 static void
hxge_suspend(p_hxge_t hxgep)1190 hxge_suspend(p_hxge_t hxgep)
1191 {
1192 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1193
1194 /*
1195 * Stop the link status timer before hxge_intrs_disable() to avoid
1196 * accessing the the MSIX table simultaneously. Note that the timer
1197 * routine polls for MSIX parity errors.
1198 */
1199 MUTEX_ENTER(&hxgep->timeout.lock);
1200 if (hxgep->timeout.id)
1201 (void) untimeout(hxgep->timeout.id);
1202 MUTEX_EXIT(&hxgep->timeout.lock);
1203
1204 hxge_intrs_disable(hxgep);
1205 hxge_destroy_dev(hxgep);
1206
1207 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1208 }
1209
1210 static hxge_status_t
hxge_resume(p_hxge_t hxgep)1211 hxge_resume(p_hxge_t hxgep)
1212 {
1213 hxge_status_t status = HXGE_OK;
1214
1215 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1216 hxgep->suspended = DDI_RESUME;
1217
1218 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1219 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1220
1221 (void) hxge_rx_vmac_enable(hxgep);
1222 (void) hxge_tx_vmac_enable(hxgep);
1223
1224 hxge_intrs_enable(hxgep);
1225
1226 hxgep->suspended = 0;
1227
1228 /*
1229 * Resume the link status timer after hxge_intrs_enable to avoid
1230 * accessing MSIX table simultaneously.
1231 */
1232 MUTEX_ENTER(&hxgep->timeout.lock);
1233 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1234 hxgep->timeout.ticks);
1235 MUTEX_EXIT(&hxgep->timeout.lock);
1236
1237 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1238 "<== hxge_resume status = 0x%x", status));
1239
1240 return (status);
1241 }
1242
1243 static hxge_status_t
hxge_setup_dev(p_hxge_t hxgep)1244 hxge_setup_dev(p_hxge_t hxgep)
1245 {
1246 hxge_status_t status = HXGE_OK;
1247
1248 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1249
1250 status = hxge_link_init(hxgep);
1251 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1252 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1253 "Bad register acc handle"));
1254 status = HXGE_ERROR;
1255 }
1256
1257 if (status != HXGE_OK) {
1258 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1259 " hxge_setup_dev status (link init 0x%08x)", status));
1260 goto hxge_setup_dev_exit;
1261 }
1262
1263 hxge_setup_dev_exit:
1264 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1265 "<== hxge_setup_dev status = 0x%08x", status));
1266
1267 return (status);
1268 }
1269
1270 static void
hxge_destroy_dev(p_hxge_t hxgep)1271 hxge_destroy_dev(p_hxge_t hxgep)
1272 {
1273 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1274
1275 (void) hxge_hw_stop(hxgep);
1276
1277 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1278 }
1279
1280 static hxge_status_t
hxge_setup_system_dma_pages(p_hxge_t hxgep)1281 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1282 {
1283 int ddi_status = DDI_SUCCESS;
1284 uint_t count;
1285 ddi_dma_cookie_t cookie;
1286 uint_t iommu_pagesize;
1287 hxge_status_t status = HXGE_OK;
1288
1289 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1290
1291 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1292 iommu_pagesize = dvma_pagesize(hxgep->dip);
1293
1294 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1295 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1296 " default_block_size %d iommu_pagesize %d",
1297 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1298 hxgep->rx_default_block_size, iommu_pagesize));
1299
1300 if (iommu_pagesize != 0) {
1301 if (hxgep->sys_page_sz == iommu_pagesize) {
1302 /* Hydra support up to 8K pages */
1303 if (iommu_pagesize > 0x2000)
1304 hxgep->sys_page_sz = 0x2000;
1305 } else {
1306 if (hxgep->sys_page_sz > iommu_pagesize)
1307 hxgep->sys_page_sz = iommu_pagesize;
1308 }
1309 }
1310
1311 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1312
1313 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1314 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1315 "default_block_size %d page mask %d",
1316 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1317 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1318
1319 switch (hxgep->sys_page_sz) {
1320 default:
1321 hxgep->sys_page_sz = 0x1000;
1322 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1323 hxgep->rx_default_block_size = 0x1000;
1324 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1325 break;
1326 case 0x1000:
1327 hxgep->rx_default_block_size = 0x1000;
1328 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1329 break;
1330 case 0x2000:
1331 hxgep->rx_default_block_size = 0x2000;
1332 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1333 break;
1334 }
1335
1336 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1337 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1338
1339 /*
1340 * Get the system DMA burst size.
1341 */
1342 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1343 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1344 if (ddi_status != DDI_SUCCESS) {
1345 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1346 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1347 goto hxge_get_soft_properties_exit;
1348 }
1349
1350 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1351 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1352 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1353 &cookie, &count);
1354 if (ddi_status != DDI_DMA_MAPPED) {
1355 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1356 "Binding spare handle to find system burstsize failed."));
1357 ddi_status = DDI_FAILURE;
1358 goto hxge_get_soft_properties_fail1;
1359 }
1360
1361 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1362 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1363
1364 hxge_get_soft_properties_fail1:
1365 ddi_dma_free_handle(&hxgep->dmasparehandle);
1366
1367 hxge_get_soft_properties_exit:
1368
1369 if (ddi_status != DDI_SUCCESS)
1370 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1371
1372 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1373 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1374
1375 return (status);
1376 }
1377
1378 static hxge_status_t
hxge_alloc_mem_pool(p_hxge_t hxgep)1379 hxge_alloc_mem_pool(p_hxge_t hxgep)
1380 {
1381 hxge_status_t status = HXGE_OK;
1382
1383 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1384
1385 status = hxge_alloc_rx_mem_pool(hxgep);
1386 if (status != HXGE_OK) {
1387 return (HXGE_ERROR);
1388 }
1389
1390 status = hxge_alloc_tx_mem_pool(hxgep);
1391 if (status != HXGE_OK) {
1392 hxge_free_rx_mem_pool(hxgep);
1393 return (HXGE_ERROR);
1394 }
1395
1396 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1397 return (HXGE_OK);
1398 }
1399
1400 static void
hxge_free_mem_pool(p_hxge_t hxgep)1401 hxge_free_mem_pool(p_hxge_t hxgep)
1402 {
1403 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1404
1405 hxge_free_rx_mem_pool(hxgep);
1406 hxge_free_tx_mem_pool(hxgep);
1407
1408 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1409 }
1410
1411 static hxge_status_t
hxge_alloc_rx_mem_pool(p_hxge_t hxgep)1412 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1413 {
1414 int i, j;
1415 uint32_t ndmas, st_rdc;
1416 p_hxge_dma_pt_cfg_t p_all_cfgp;
1417 p_hxge_hw_pt_cfg_t p_cfgp;
1418 p_hxge_dma_pool_t dma_poolp;
1419 p_hxge_dma_common_t *dma_buf_p;
1420 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1421 p_hxge_dma_common_t *dma_rbr_cntl_p;
1422 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1423 p_hxge_dma_common_t *dma_rcr_cntl_p;
1424 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1425 p_hxge_dma_common_t *dma_mbox_cntl_p;
1426 size_t rx_buf_alloc_size;
1427 size_t rx_rbr_cntl_alloc_size;
1428 size_t rx_rcr_cntl_alloc_size;
1429 size_t rx_mbox_cntl_alloc_size;
1430 uint32_t *num_chunks; /* per dma */
1431 hxge_status_t status = HXGE_OK;
1432
1433 uint32_t hxge_port_rbr_size;
1434 uint32_t hxge_port_rbr_spare_size;
1435 uint32_t hxge_port_rcr_size;
1436
1437 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1438
1439 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1440 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1441 st_rdc = p_cfgp->start_rdc;
1442 ndmas = p_cfgp->max_rdcs;
1443
1444 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1445 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1446
1447 /*
1448 * Allocate memory for each receive DMA channel.
1449 */
1450 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1451 KM_SLEEP);
1452 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1453 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1454
1455 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1456 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1457 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1458 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1459 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1460 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1461 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1462 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1463 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1464 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1465 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1466 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1467
1468 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1469 KM_SLEEP);
1470
1471 /*
1472 * Assume that each DMA channel will be configured with default block
1473 * size. rbr block counts are mod of batch count (16).
1474 */
1475 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1476 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1477
1478 if (!hxge_port_rbr_size) {
1479 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1480 }
1481
1482 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1483 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1484 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1485 }
1486
1487 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1488 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1489
1490 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1491 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1492 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1493 }
1494
1495 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1496 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1497
1498 /*
1499 * Addresses of receive block ring, receive completion ring and the
1500 * mailbox must be all cache-aligned (64 bytes).
1501 */
1502 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1503 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1504 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1505 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1506
1507 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1508 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1509 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1510 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1511 hxge_port_rcr_size, rx_cntl_alloc_size));
1512
1513 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1514 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1515
1516 /*
1517 * Allocate memory for receive buffers and descriptor rings. Replace
1518 * allocation functions with interface functions provided by the
1519 * partition manager when it is available.
1520 */
1521 /*
1522 * Allocate memory for the receive buffer blocks.
1523 */
1524 for (i = 0; i < ndmas; i++) {
1525 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1526 " hxge_alloc_rx_mem_pool to alloc mem: "
1527 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1528 i, dma_buf_p[i], &dma_buf_p[i]));
1529
1530 num_chunks[i] = 0;
1531
1532 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1533 rx_buf_alloc_size, hxgep->rx_default_block_size,
1534 &num_chunks[i]);
1535 if (status != HXGE_OK) {
1536 break;
1537 }
1538
1539 st_rdc++;
1540 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1541 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1542 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1543 dma_buf_p[i], &dma_buf_p[i]));
1544 }
1545
1546 if (i < ndmas) {
1547 goto hxge_alloc_rx_mem_fail1;
1548 }
1549
1550 /*
1551 * Allocate memory for descriptor rings and mailbox.
1552 */
1553 st_rdc = p_cfgp->start_rdc;
1554 for (j = 0; j < ndmas; j++) {
1555 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1556 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1557 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1558 break;
1559 }
1560
1561 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1562 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1563 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1564 break;
1565 }
1566
1567 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1568 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1569 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1570 break;
1571 }
1572 st_rdc++;
1573 }
1574
1575 if (j < ndmas) {
1576 goto hxge_alloc_rx_mem_fail2;
1577 }
1578
1579 dma_poolp->ndmas = ndmas;
1580 dma_poolp->num_chunks = num_chunks;
1581 dma_poolp->buf_allocated = B_TRUE;
1582 hxgep->rx_buf_pool_p = dma_poolp;
1583 dma_poolp->dma_buf_pool_p = dma_buf_p;
1584
1585 dma_rbr_cntl_poolp->ndmas = ndmas;
1586 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1587 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1588 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1589
1590 dma_rcr_cntl_poolp->ndmas = ndmas;
1591 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1592 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1593 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1594
1595 dma_mbox_cntl_poolp->ndmas = ndmas;
1596 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1597 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1598 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1599
1600 goto hxge_alloc_rx_mem_pool_exit;
1601
1602 hxge_alloc_rx_mem_fail2:
1603 /* Free control buffers */
1604 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1605 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1606 for (; j >= 0; j--) {
1607 hxge_free_rx_cntl_dma(hxgep,
1608 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1609 hxge_free_rx_cntl_dma(hxgep,
1610 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1611 hxge_free_rx_cntl_dma(hxgep,
1612 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1613 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1614 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1615 }
1616 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1617 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1618
1619 hxge_alloc_rx_mem_fail1:
1620 /* Free data buffers */
1621 i--;
1622 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1623 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1624 for (; i >= 0; i--) {
1625 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1626 num_chunks[i]);
1627 }
1628 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1629 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1630
1631 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1632 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1633 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1634 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1635 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1636 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1637 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1638 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1639 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1640
1641 hxge_alloc_rx_mem_pool_exit:
1642 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1643 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1644
1645 return (status);
1646 }
1647
1648 static void
hxge_free_rx_mem_pool(p_hxge_t hxgep)1649 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1650 {
1651 uint32_t i, ndmas;
1652 p_hxge_dma_pool_t dma_poolp;
1653 p_hxge_dma_common_t *dma_buf_p;
1654 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1655 p_hxge_dma_common_t *dma_rbr_cntl_p;
1656 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1657 p_hxge_dma_common_t *dma_rcr_cntl_p;
1658 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1659 p_hxge_dma_common_t *dma_mbox_cntl_p;
1660 uint32_t *num_chunks;
1661
1662 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1663
1664 dma_poolp = hxgep->rx_buf_pool_p;
1665 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1666 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1667 "(null rx buf pool or buf not allocated"));
1668 return;
1669 }
1670
1671 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1672 if (dma_rbr_cntl_poolp == NULL ||
1673 (!dma_rbr_cntl_poolp->buf_allocated)) {
1674 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1675 "<== hxge_free_rx_mem_pool "
1676 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1677 return;
1678 }
1679
1680 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1681 if (dma_rcr_cntl_poolp == NULL ||
1682 (!dma_rcr_cntl_poolp->buf_allocated)) {
1683 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1684 "<== hxge_free_rx_mem_pool "
1685 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1686 return;
1687 }
1688
1689 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1690 if (dma_mbox_cntl_poolp == NULL ||
1691 (!dma_mbox_cntl_poolp->buf_allocated)) {
1692 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1693 "<== hxge_free_rx_mem_pool "
1694 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1695 return;
1696 }
1697
1698 dma_buf_p = dma_poolp->dma_buf_pool_p;
1699 num_chunks = dma_poolp->num_chunks;
1700
1701 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1702 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1703 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1704 ndmas = dma_rbr_cntl_poolp->ndmas;
1705
1706 for (i = 0; i < ndmas; i++) {
1707 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1708 }
1709
1710 for (i = 0; i < ndmas; i++) {
1711 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1712 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1713 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1714 }
1715
1716 for (i = 0; i < ndmas; i++) {
1717 KMEM_FREE(dma_buf_p[i],
1718 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1719 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1720 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1721 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1722 }
1723
1724 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1725 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1726 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1727 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1728 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1729 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1730 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1731 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1732 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1733
1734 hxgep->rx_buf_pool_p = NULL;
1735 hxgep->rx_rbr_cntl_pool_p = NULL;
1736 hxgep->rx_rcr_cntl_pool_p = NULL;
1737 hxgep->rx_mbox_cntl_pool_p = NULL;
1738
1739 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1740 }
1741
1742 static hxge_status_t
hxge_alloc_rx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)1743 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1744 p_hxge_dma_common_t *dmap,
1745 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1746 {
1747 p_hxge_dma_common_t rx_dmap;
1748 hxge_status_t status = HXGE_OK;
1749 size_t total_alloc_size;
1750 size_t allocated = 0;
1751 int i, size_index, array_size;
1752
1753 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1754
1755 rx_dmap = (p_hxge_dma_common_t)
1756 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1757
1758 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1759 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1760 dma_channel, alloc_size, block_size, dmap));
1761
1762 total_alloc_size = alloc_size;
1763
1764 i = 0;
1765 size_index = 0;
1766 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1767 while ((size_index < array_size) &&
1768 (alloc_sizes[size_index] < alloc_size))
1769 size_index++;
1770 if (size_index >= array_size) {
1771 size_index = array_size - 1;
1772 }
1773
1774 while ((allocated < total_alloc_size) &&
1775 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1776 rx_dmap[i].dma_chunk_index = i;
1777 rx_dmap[i].block_size = block_size;
1778 rx_dmap[i].alength = alloc_sizes[size_index];
1779 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1780 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1781 rx_dmap[i].dma_channel = dma_channel;
1782 rx_dmap[i].contig_alloc_type = B_FALSE;
1783
1784 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1785 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1786 "i %d nblocks %d alength %d",
1787 dma_channel, i, &rx_dmap[i], block_size,
1788 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1789 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1790 &hxge_rx_dma_attr, rx_dmap[i].alength,
1791 &hxge_dev_buf_dma_acc_attr,
1792 DDI_DMA_READ | DDI_DMA_STREAMING,
1793 (p_hxge_dma_common_t)(&rx_dmap[i]));
1794 if (status != HXGE_OK) {
1795 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1796 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1797 " for size: %d", alloc_sizes[size_index]));
1798 size_index--;
1799 } else {
1800 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1801 " alloc_rx_buf_dma allocated rdc %d "
1802 "chunk %d size %x dvma %x bufp %llx ",
1803 dma_channel, i, rx_dmap[i].alength,
1804 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1805 i++;
1806 allocated += alloc_sizes[size_index];
1807 }
1808 }
1809
1810 if (allocated < total_alloc_size) {
1811 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1812 " hxge_alloc_rx_buf_dma failed due to"
1813 " allocated(%d) < required(%d)",
1814 allocated, total_alloc_size));
1815 goto hxge_alloc_rx_mem_fail1;
1816 }
1817
1818 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1819 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1820
1821 *num_chunks = i;
1822 *dmap = rx_dmap;
1823
1824 goto hxge_alloc_rx_mem_exit;
1825
1826 hxge_alloc_rx_mem_fail1:
1827 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1828
1829 hxge_alloc_rx_mem_exit:
1830 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1831 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1832
1833 return (status);
1834 }
1835
1836 /*ARGSUSED*/
1837 static void
hxge_free_rx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)1838 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1839 uint32_t num_chunks)
1840 {
1841 int i;
1842
1843 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1844 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1845
1846 for (i = 0; i < num_chunks; i++) {
1847 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1848 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1849 hxge_dma_mem_free(dmap++);
1850 }
1851
1852 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1853 }
1854
1855 /*ARGSUSED*/
1856 static hxge_status_t
hxge_alloc_rx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,struct ddi_dma_attr * attr,size_t size)1857 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1858 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1859 {
1860 p_hxge_dma_common_t rx_dmap;
1861 hxge_status_t status = HXGE_OK;
1862
1863 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1864
1865 rx_dmap = (p_hxge_dma_common_t)
1866 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1867
1868 rx_dmap->contig_alloc_type = B_FALSE;
1869
1870 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1871 attr, size, &hxge_dev_desc_dma_acc_attr,
1872 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1873 if (status != HXGE_OK) {
1874 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1875 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1876 " for size: %d", size));
1877 goto hxge_alloc_rx_cntl_dma_fail1;
1878 }
1879
1880 *dmap = rx_dmap;
1881
1882 goto hxge_alloc_rx_cntl_dma_exit;
1883
1884 hxge_alloc_rx_cntl_dma_fail1:
1885 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1886
1887 hxge_alloc_rx_cntl_dma_exit:
1888 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1889 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1890
1891 return (status);
1892 }
1893
1894 /*ARGSUSED*/
1895 static void
hxge_free_rx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)1896 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1897 {
1898 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1899
1900 hxge_dma_mem_free(dmap);
1901
1902 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1903 }
1904
1905 static hxge_status_t
hxge_alloc_tx_mem_pool(p_hxge_t hxgep)1906 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1907 {
1908 hxge_status_t status = HXGE_OK;
1909 int i, j;
1910 uint32_t ndmas, st_tdc;
1911 p_hxge_dma_pt_cfg_t p_all_cfgp;
1912 p_hxge_hw_pt_cfg_t p_cfgp;
1913 p_hxge_dma_pool_t dma_poolp;
1914 p_hxge_dma_common_t *dma_buf_p;
1915 p_hxge_dma_pool_t dma_cntl_poolp;
1916 p_hxge_dma_common_t *dma_cntl_p;
1917 size_t tx_buf_alloc_size;
1918 size_t tx_cntl_alloc_size;
1919 uint32_t *num_chunks; /* per dma */
1920
1921 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1922
1923 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1924 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1925 st_tdc = p_cfgp->start_tdc;
1926 ndmas = p_cfgp->max_tdcs;
1927
1928 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1929 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1930 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1931 /*
1932 * Allocate memory for each transmit DMA channel.
1933 */
1934 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1935 KM_SLEEP);
1936 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1937 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1938
1939 dma_cntl_poolp = (p_hxge_dma_pool_t)
1940 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1941 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1942 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1943
1944 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1945
1946 /*
1947 * Assume that each DMA channel will be configured with default
1948 * transmit bufer size for copying transmit data. (For packet payload
1949 * over this limit, packets will not be copied.)
1950 */
1951 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1952
1953 /*
1954 * Addresses of transmit descriptor ring and the mailbox must be all
1955 * cache-aligned (64 bytes).
1956 */
1957 tx_cntl_alloc_size = hxge_tx_ring_size;
1958 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1959 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1960
1961 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1962 KM_SLEEP);
1963
1964 /*
1965 * Allocate memory for transmit buffers and descriptor rings. Replace
1966 * allocation functions with interface functions provided by the
1967 * partition manager when it is available.
1968 *
1969 * Allocate memory for the transmit buffer pool.
1970 */
1971 for (i = 0; i < ndmas; i++) {
1972 num_chunks[i] = 0;
1973 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1974 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1975 if (status != HXGE_OK) {
1976 break;
1977 }
1978 st_tdc++;
1979 }
1980
1981 if (i < ndmas) {
1982 goto hxge_alloc_tx_mem_pool_fail1;
1983 }
1984
1985 st_tdc = p_cfgp->start_tdc;
1986
1987 /*
1988 * Allocate memory for descriptor rings and mailbox.
1989 */
1990 for (j = 0; j < ndmas; j++) {
1991 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
1992 tx_cntl_alloc_size);
1993 if (status != HXGE_OK) {
1994 break;
1995 }
1996 st_tdc++;
1997 }
1998
1999 if (j < ndmas) {
2000 goto hxge_alloc_tx_mem_pool_fail2;
2001 }
2002
2003 dma_poolp->ndmas = ndmas;
2004 dma_poolp->num_chunks = num_chunks;
2005 dma_poolp->buf_allocated = B_TRUE;
2006 dma_poolp->dma_buf_pool_p = dma_buf_p;
2007 hxgep->tx_buf_pool_p = dma_poolp;
2008
2009 dma_cntl_poolp->ndmas = ndmas;
2010 dma_cntl_poolp->buf_allocated = B_TRUE;
2011 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2012 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2013
2014 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2015 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2016 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2017
2018 goto hxge_alloc_tx_mem_pool_exit;
2019
2020 hxge_alloc_tx_mem_pool_fail2:
2021 /* Free control buffers */
2022 j--;
2023 for (; j >= 0; j--) {
2024 hxge_free_tx_cntl_dma(hxgep,
2025 (p_hxge_dma_common_t)dma_cntl_p[j]);
2026 }
2027
2028 hxge_alloc_tx_mem_pool_fail1:
2029 /* Free data buffers */
2030 i--;
2031 for (; i >= 0; i--) {
2032 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2033 num_chunks[i]);
2034 }
2035
2036 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2037 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2038 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2039 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2040 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2041
2042 hxge_alloc_tx_mem_pool_exit:
2043 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2044 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2045
2046 return (status);
2047 }
2048
2049 static hxge_status_t
hxge_alloc_tx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2050 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2051 p_hxge_dma_common_t *dmap, size_t alloc_size,
2052 size_t block_size, uint32_t *num_chunks)
2053 {
2054 p_hxge_dma_common_t tx_dmap;
2055 hxge_status_t status = HXGE_OK;
2056 size_t total_alloc_size;
2057 size_t allocated = 0;
2058 int i, size_index, array_size;
2059
2060 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2061
2062 tx_dmap = (p_hxge_dma_common_t)
2063 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2064
2065 total_alloc_size = alloc_size;
2066 i = 0;
2067 size_index = 0;
2068 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2069 while ((size_index < array_size) &&
2070 (alloc_sizes[size_index] < alloc_size))
2071 size_index++;
2072 if (size_index >= array_size) {
2073 size_index = array_size - 1;
2074 }
2075
2076 while ((allocated < total_alloc_size) &&
2077 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2078 tx_dmap[i].dma_chunk_index = i;
2079 tx_dmap[i].block_size = block_size;
2080 tx_dmap[i].alength = alloc_sizes[size_index];
2081 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2082 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2083 tx_dmap[i].dma_channel = dma_channel;
2084 tx_dmap[i].contig_alloc_type = B_FALSE;
2085
2086 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2087 &hxge_tx_dma_attr, tx_dmap[i].alength,
2088 &hxge_dev_buf_dma_acc_attr,
2089 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2090 (p_hxge_dma_common_t)(&tx_dmap[i]));
2091 if (status != HXGE_OK) {
2092 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2093 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2094 " for size: %d", alloc_sizes[size_index]));
2095 size_index--;
2096 } else {
2097 i++;
2098 allocated += alloc_sizes[size_index];
2099 }
2100 }
2101
2102 if (allocated < total_alloc_size) {
2103 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2104 " hxge_alloc_tx_buf_dma: failed due to"
2105 " allocated(%d) < required(%d)",
2106 allocated, total_alloc_size));
2107 goto hxge_alloc_tx_mem_fail1;
2108 }
2109
2110 *num_chunks = i;
2111 *dmap = tx_dmap;
2112 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2113 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2114 *dmap, i));
2115 goto hxge_alloc_tx_mem_exit;
2116
2117 hxge_alloc_tx_mem_fail1:
2118 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2119
2120 hxge_alloc_tx_mem_exit:
2121 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2122 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2123
2124 return (status);
2125 }
2126
2127 /*ARGSUSED*/
2128 static void
hxge_free_tx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)2129 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2130 uint32_t num_chunks)
2131 {
2132 int i;
2133
2134 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2135
2136 for (i = 0; i < num_chunks; i++) {
2137 hxge_dma_mem_free(dmap++);
2138 }
2139
2140 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2141 }
2142
2143 /*ARGSUSED*/
2144 static hxge_status_t
hxge_alloc_tx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t size)2145 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2146 p_hxge_dma_common_t *dmap, size_t size)
2147 {
2148 p_hxge_dma_common_t tx_dmap;
2149 hxge_status_t status = HXGE_OK;
2150
2151 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2152
2153 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2154 KM_SLEEP);
2155
2156 tx_dmap->contig_alloc_type = B_FALSE;
2157
2158 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2159 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2160 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2161 if (status != HXGE_OK) {
2162 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2163 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2164 " for size: %d", size));
2165 goto hxge_alloc_tx_cntl_dma_fail1;
2166 }
2167
2168 *dmap = tx_dmap;
2169
2170 goto hxge_alloc_tx_cntl_dma_exit;
2171
2172 hxge_alloc_tx_cntl_dma_fail1:
2173 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2174
2175 hxge_alloc_tx_cntl_dma_exit:
2176 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2177 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2178
2179 return (status);
2180 }
2181
2182 /*ARGSUSED*/
2183 static void
hxge_free_tx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)2184 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2185 {
2186 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2187
2188 hxge_dma_mem_free(dmap);
2189
2190 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2191 }
2192
2193 static void
hxge_free_tx_mem_pool(p_hxge_t hxgep)2194 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2195 {
2196 uint32_t i, ndmas;
2197 p_hxge_dma_pool_t dma_poolp;
2198 p_hxge_dma_common_t *dma_buf_p;
2199 p_hxge_dma_pool_t dma_cntl_poolp;
2200 p_hxge_dma_common_t *dma_cntl_p;
2201 uint32_t *num_chunks;
2202
2203 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2204
2205 dma_poolp = hxgep->tx_buf_pool_p;
2206 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2207 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2208 "<== hxge_free_tx_mem_pool "
2209 "(null rx buf pool or buf not allocated"));
2210 return;
2211 }
2212
2213 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2214 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2215 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2216 "<== hxge_free_tx_mem_pool "
2217 "(null tx cntl buf pool or cntl buf not allocated"));
2218 return;
2219 }
2220
2221 dma_buf_p = dma_poolp->dma_buf_pool_p;
2222 num_chunks = dma_poolp->num_chunks;
2223
2224 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2225 ndmas = dma_cntl_poolp->ndmas;
2226
2227 for (i = 0; i < ndmas; i++) {
2228 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2229 }
2230
2231 for (i = 0; i < ndmas; i++) {
2232 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2233 }
2234
2235 for (i = 0; i < ndmas; i++) {
2236 KMEM_FREE(dma_buf_p[i],
2237 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2238 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2239 }
2240
2241 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2242 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2243 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2244 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2245 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2246
2247 hxgep->tx_buf_pool_p = NULL;
2248 hxgep->tx_cntl_pool_p = NULL;
2249
2250 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2251 }
2252
2253 /*ARGSUSED*/
2254 static hxge_status_t
hxge_dma_mem_alloc(p_hxge_t hxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_hxge_dma_common_t dma_p)2255 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2256 struct ddi_dma_attr *dma_attrp,
2257 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2258 p_hxge_dma_common_t dma_p)
2259 {
2260 caddr_t kaddrp;
2261 int ddi_status = DDI_SUCCESS;
2262
2263 dma_p->dma_handle = NULL;
2264 dma_p->acc_handle = NULL;
2265 dma_p->kaddrp = NULL;
2266
2267 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2268 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2269 if (ddi_status != DDI_SUCCESS) {
2270 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2271 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2272 return (HXGE_ERROR | HXGE_DDI_FAILED);
2273 }
2274
2275 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2276 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2277 &dma_p->acc_handle);
2278 if (ddi_status != DDI_SUCCESS) {
2279 /* The caller will decide whether it is fatal */
2280 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2281 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2282 ddi_dma_free_handle(&dma_p->dma_handle);
2283 dma_p->dma_handle = NULL;
2284 return (HXGE_ERROR | HXGE_DDI_FAILED);
2285 }
2286
2287 if (dma_p->alength < length) {
2288 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2289 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2290 ddi_dma_mem_free(&dma_p->acc_handle);
2291 ddi_dma_free_handle(&dma_p->dma_handle);
2292 dma_p->acc_handle = NULL;
2293 dma_p->dma_handle = NULL;
2294 return (HXGE_ERROR);
2295 }
2296
2297 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2298 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2299 &dma_p->dma_cookie, &dma_p->ncookies);
2300 if (ddi_status != DDI_DMA_MAPPED) {
2301 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2302 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2303 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2304 if (dma_p->acc_handle) {
2305 ddi_dma_mem_free(&dma_p->acc_handle);
2306 dma_p->acc_handle = NULL;
2307 }
2308 ddi_dma_free_handle(&dma_p->dma_handle);
2309 dma_p->dma_handle = NULL;
2310 return (HXGE_ERROR | HXGE_DDI_FAILED);
2311 }
2312
2313 if (dma_p->ncookies != 1) {
2314 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2315 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2316 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2317 if (dma_p->acc_handle) {
2318 ddi_dma_mem_free(&dma_p->acc_handle);
2319 dma_p->acc_handle = NULL;
2320 }
2321 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2322 ddi_dma_free_handle(&dma_p->dma_handle);
2323 dma_p->dma_handle = NULL;
2324 return (HXGE_ERROR);
2325 }
2326
2327 dma_p->kaddrp = kaddrp;
2328 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2329
2330 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2331
2332 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2333 "dma buffer allocated: dma_p $%p "
2334 "return dmac_ladress from cookie $%p dmac_size %d "
2335 "dma_p->ioaddr_p $%p "
2336 "dma_p->orig_ioaddr_p $%p "
2337 "orig_vatopa $%p "
2338 "alength %d (0x%x) "
2339 "kaddrp $%p "
2340 "length %d (0x%x)",
2341 dma_p,
2342 dma_p->dma_cookie.dmac_laddress,
2343 dma_p->dma_cookie.dmac_size,
2344 dma_p->ioaddr_pp,
2345 dma_p->orig_ioaddr_pp,
2346 dma_p->orig_vatopa,
2347 dma_p->alength, dma_p->alength,
2348 kaddrp,
2349 length, length));
2350
2351 return (HXGE_OK);
2352 }
2353
2354 static void
hxge_dma_mem_free(p_hxge_dma_common_t dma_p)2355 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2356 {
2357 if (dma_p == NULL)
2358 return;
2359
2360 if (dma_p->dma_handle != NULL) {
2361 if (dma_p->ncookies) {
2362 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2363 dma_p->ncookies = 0;
2364 }
2365 ddi_dma_free_handle(&dma_p->dma_handle);
2366 dma_p->dma_handle = NULL;
2367 }
2368
2369 if (dma_p->acc_handle != NULL) {
2370 ddi_dma_mem_free(&dma_p->acc_handle);
2371 dma_p->acc_handle = NULL;
2372 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2373 }
2374
2375 dma_p->kaddrp = NULL;
2376 dma_p->alength = 0;
2377 }
2378
2379 /*
2380 * hxge_m_start() -- start transmitting and receiving.
2381 *
2382 * This function is called by the MAC layer when the first
2383 * stream is open to prepare the hardware ready for sending
2384 * and transmitting packets.
2385 */
2386 static int
hxge_m_start(void * arg)2387 hxge_m_start(void *arg)
2388 {
2389 p_hxge_t hxgep = (p_hxge_t)arg;
2390
2391 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2392
2393 MUTEX_ENTER(hxgep->genlock);
2394
2395 if (hxge_init(hxgep) != DDI_SUCCESS) {
2396 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2397 "<== hxge_m_start: initialization failed"));
2398 MUTEX_EXIT(hxgep->genlock);
2399 return (EIO);
2400 }
2401
2402 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2403 /*
2404 * Start timer to check the system error and tx hangs
2405 */
2406 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2407 hxge_check_hw_state, HXGE_CHECK_TIMER);
2408
2409 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2410
2411 hxgep->timeout.link_status = 0;
2412 hxgep->timeout.report_link_status = B_TRUE;
2413 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2414
2415 /* Start the link status timer to check the link status */
2416 MUTEX_ENTER(&hxgep->timeout.lock);
2417 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2418 hxgep->timeout.ticks);
2419 MUTEX_EXIT(&hxgep->timeout.lock);
2420 }
2421
2422 MUTEX_EXIT(hxgep->genlock);
2423
2424 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2425
2426 return (0);
2427 }
2428
2429 /*
2430 * hxge_m_stop(): stop transmitting and receiving.
2431 */
2432 static void
hxge_m_stop(void * arg)2433 hxge_m_stop(void *arg)
2434 {
2435 p_hxge_t hxgep = (p_hxge_t)arg;
2436
2437 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2438
2439 if (hxgep->hxge_timerid) {
2440 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2441 hxgep->hxge_timerid = 0;
2442 }
2443
2444 /* Stop the link status timer before unregistering */
2445 MUTEX_ENTER(&hxgep->timeout.lock);
2446 if (hxgep->timeout.id) {
2447 (void) untimeout(hxgep->timeout.id);
2448 hxgep->timeout.id = 0;
2449 }
2450 hxge_link_update(hxgep, LINK_STATE_DOWN);
2451 MUTEX_EXIT(&hxgep->timeout.lock);
2452
2453 MUTEX_ENTER(hxgep->genlock);
2454
2455 hxge_uninit(hxgep);
2456
2457 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2458
2459 MUTEX_EXIT(hxgep->genlock);
2460
2461 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2462 }
2463
2464 static int
hxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)2465 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2466 {
2467 p_hxge_t hxgep = (p_hxge_t)arg;
2468 struct ether_addr addrp;
2469
2470 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2471
2472 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2473
2474 if (add) {
2475 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2476 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2477 "<== hxge_m_multicst: add multicast failed"));
2478 return (EINVAL);
2479 }
2480 } else {
2481 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2482 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2483 "<== hxge_m_multicst: del multicast failed"));
2484 return (EINVAL);
2485 }
2486 }
2487
2488 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2489
2490 return (0);
2491 }
2492
2493 static int
hxge_m_promisc(void * arg,boolean_t on)2494 hxge_m_promisc(void *arg, boolean_t on)
2495 {
2496 p_hxge_t hxgep = (p_hxge_t)arg;
2497
2498 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2499
2500 if (hxge_set_promisc(hxgep, on)) {
2501 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2502 "<== hxge_m_promisc: set promisc failed"));
2503 return (EINVAL);
2504 }
2505
2506 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2507
2508 return (0);
2509 }
2510
2511 static void
hxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2512 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2513 {
2514 p_hxge_t hxgep = (p_hxge_t)arg;
2515 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2516 boolean_t need_privilege;
2517 int err;
2518 int cmd;
2519
2520 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2521
2522 iocp = (struct iocblk *)mp->b_rptr;
2523 iocp->ioc_error = 0;
2524 need_privilege = B_TRUE;
2525 cmd = iocp->ioc_cmd;
2526
2527 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2528 switch (cmd) {
2529 default:
2530 miocnak(wq, mp, 0, EINVAL);
2531 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2532 return;
2533
2534 case LB_GET_INFO_SIZE:
2535 case LB_GET_INFO:
2536 case LB_GET_MODE:
2537 need_privilege = B_FALSE;
2538 break;
2539
2540 case LB_SET_MODE:
2541 break;
2542
2543 case ND_GET:
2544 need_privilege = B_FALSE;
2545 break;
2546 case ND_SET:
2547 break;
2548
2549 case HXGE_GET_TX_RING_SZ:
2550 case HXGE_GET_TX_DESC:
2551 case HXGE_TX_SIDE_RESET:
2552 case HXGE_RX_SIDE_RESET:
2553 case HXGE_GLOBAL_RESET:
2554 case HXGE_RESET_MAC:
2555 case HXGE_PUT_TCAM:
2556 case HXGE_GET_TCAM:
2557 case HXGE_RTRACE:
2558
2559 need_privilege = B_FALSE;
2560 break;
2561 }
2562
2563 if (need_privilege) {
2564 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2565 if (err != 0) {
2566 miocnak(wq, mp, 0, err);
2567 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2568 "<== hxge_m_ioctl: no priv"));
2569 return;
2570 }
2571 }
2572
2573 switch (cmd) {
2574 case ND_GET:
2575 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2576 case ND_SET:
2577 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2578 hxge_param_ioctl(hxgep, wq, mp, iocp);
2579 break;
2580
2581 case LB_GET_MODE:
2582 case LB_SET_MODE:
2583 case LB_GET_INFO_SIZE:
2584 case LB_GET_INFO:
2585 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2586 break;
2587
2588 case HXGE_PUT_TCAM:
2589 case HXGE_GET_TCAM:
2590 case HXGE_GET_TX_RING_SZ:
2591 case HXGE_GET_TX_DESC:
2592 case HXGE_TX_SIDE_RESET:
2593 case HXGE_RX_SIDE_RESET:
2594 case HXGE_GLOBAL_RESET:
2595 case HXGE_RESET_MAC:
2596 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2597 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2598 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2599 break;
2600 }
2601
2602 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2603 }
2604
2605 /*ARGSUSED*/
2606 static int
hxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2607 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2608 {
2609 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2610 p_hxge_t hxgep;
2611 p_tx_ring_t ring;
2612
2613 ASSERT(rhp != NULL);
2614 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2615
2616 hxgep = rhp->hxgep;
2617
2618 /*
2619 * Get the ring pointer.
2620 */
2621 ring = hxgep->tx_rings->rings[rhp->index];
2622
2623 /*
2624 * Fill in the handle for the transmit.
2625 */
2626 MUTEX_ENTER(&ring->lock);
2627 rhp->started = B_TRUE;
2628 ring->ring_handle = rhp->ring_handle;
2629 MUTEX_EXIT(&ring->lock);
2630
2631 return (0);
2632 }
2633
2634 static void
hxge_tx_ring_stop(mac_ring_driver_t rdriver)2635 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2636 {
2637 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2638 p_hxge_t hxgep;
2639 p_tx_ring_t ring;
2640
2641 ASSERT(rhp != NULL);
2642 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2643
2644 hxgep = rhp->hxgep;
2645 ring = hxgep->tx_rings->rings[rhp->index];
2646
2647 MUTEX_ENTER(&ring->lock);
2648 ring->ring_handle = (mac_ring_handle_t)NULL;
2649 rhp->started = B_FALSE;
2650 MUTEX_EXIT(&ring->lock);
2651 }
2652
2653 static int
hxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2654 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2655 {
2656 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2657 p_hxge_t hxgep;
2658 p_rx_rcr_ring_t ring;
2659 int i;
2660
2661 ASSERT(rhp != NULL);
2662 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2663
2664 hxgep = rhp->hxgep;
2665
2666 /*
2667 * Get pointer to ring.
2668 */
2669 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2670
2671 MUTEX_ENTER(&ring->lock);
2672
2673 if (rhp->started) {
2674 MUTEX_EXIT(&ring->lock);
2675 return (0);
2676 }
2677
2678 /*
2679 * Set the ldvp and ldgp pointers to enable/disable
2680 * polling.
2681 */
2682 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2683 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2684 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2685 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2686 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2687 break;
2688 }
2689 }
2690
2691 rhp->started = B_TRUE;
2692 ring->rcr_mac_handle = rhp->ring_handle;
2693 ring->rcr_gen_num = mr_gen_num;
2694 MUTEX_EXIT(&ring->lock);
2695
2696 return (0);
2697 }
2698
2699 static void
hxge_rx_ring_stop(mac_ring_driver_t rdriver)2700 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2701 {
2702 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2703 p_hxge_t hxgep;
2704 p_rx_rcr_ring_t ring;
2705
2706 ASSERT(rhp != NULL);
2707 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2708
2709 hxgep = rhp->hxgep;
2710 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2711
2712 MUTEX_ENTER(&ring->lock);
2713 rhp->started = B_TRUE;
2714 ring->rcr_mac_handle = NULL;
2715 ring->ldvp = NULL;
2716 ring->ldgp = NULL;
2717 MUTEX_EXIT(&ring->lock);
2718 }
2719
2720 static int
hxge_rx_group_start(mac_group_driver_t gdriver)2721 hxge_rx_group_start(mac_group_driver_t gdriver)
2722 {
2723 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2724
2725 ASSERT(group->hxgep != NULL);
2726 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2727
2728 MUTEX_ENTER(group->hxgep->genlock);
2729 group->started = B_TRUE;
2730 MUTEX_EXIT(group->hxgep->genlock);
2731
2732 return (0);
2733 }
2734
2735 static void
hxge_rx_group_stop(mac_group_driver_t gdriver)2736 hxge_rx_group_stop(mac_group_driver_t gdriver)
2737 {
2738 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2739
2740 ASSERT(group->hxgep != NULL);
2741 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2742 ASSERT(group->started == B_TRUE);
2743
2744 MUTEX_ENTER(group->hxgep->genlock);
2745 group->started = B_FALSE;
2746 MUTEX_EXIT(group->hxgep->genlock);
2747 }
2748
2749 static int
hxge_mmac_get_slot(p_hxge_t hxgep,int * slot)2750 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2751 {
2752 int i;
2753
2754 /*
2755 * Find an open slot.
2756 */
2757 for (i = 0; i < hxgep->mmac.total; i++) {
2758 if (!hxgep->mmac.addrs[i].set) {
2759 *slot = i;
2760 return (0);
2761 }
2762 }
2763
2764 return (ENXIO);
2765 }
2766
2767 static int
hxge_mmac_set_addr(p_hxge_t hxgep,int slot,const uint8_t * addr)2768 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2769 {
2770 struct ether_addr eaddr;
2771 hxge_status_t status = HXGE_OK;
2772
2773 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2774
2775 /*
2776 * Set new interface local address and re-init device.
2777 * This is destructive to any other streams attached
2778 * to this device.
2779 */
2780 RW_ENTER_WRITER(&hxgep->filter_lock);
2781 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2782 RW_EXIT(&hxgep->filter_lock);
2783 if (status != HXGE_OK)
2784 return (status);
2785
2786 hxgep->mmac.addrs[slot].set = B_TRUE;
2787 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2788 hxgep->mmac.available--;
2789 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2790 hxgep->mmac.addrs[slot].primary = B_TRUE;
2791
2792 return (0);
2793 }
2794
2795 static int
hxge_mmac_find_addr(p_hxge_t hxgep,const uint8_t * addr,int * slot)2796 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2797 {
2798 int i, result;
2799
2800 for (i = 0; i < hxgep->mmac.total; i++) {
2801 if (hxgep->mmac.addrs[i].set) {
2802 result = memcmp(hxgep->mmac.addrs[i].addr,
2803 addr, ETHERADDRL);
2804 if (result == 0) {
2805 *slot = i;
2806 return (0);
2807 }
2808 }
2809 }
2810
2811 return (EINVAL);
2812 }
2813
2814 static int
hxge_mmac_unset_addr(p_hxge_t hxgep,int slot)2815 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2816 {
2817 hxge_status_t status;
2818 int i;
2819
2820 status = hxge_pfc_clear_mac_address(hxgep, slot);
2821 if (status != HXGE_OK)
2822 return (status);
2823
2824 for (i = 0; i < ETHERADDRL; i++)
2825 hxgep->mmac.addrs[slot].addr[i] = 0;
2826
2827 hxgep->mmac.addrs[slot].set = B_FALSE;
2828 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2829 hxgep->mmac.addrs[slot].primary = B_FALSE;
2830 hxgep->mmac.available++;
2831
2832 return (0);
2833 }
2834
2835 static int
hxge_rx_group_add_mac(void * arg,const uint8_t * mac_addr)2836 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2837 {
2838 hxge_ring_group_t *group = arg;
2839 p_hxge_t hxgep = group->hxgep;
2840 int slot = 0;
2841
2842 ASSERT(group->type == MAC_RING_TYPE_RX);
2843
2844 MUTEX_ENTER(hxgep->genlock);
2845
2846 /*
2847 * Find a slot for the address.
2848 */
2849 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2850 MUTEX_EXIT(hxgep->genlock);
2851 return (ENOSPC);
2852 }
2853
2854 /*
2855 * Program the MAC address.
2856 */
2857 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2858 MUTEX_EXIT(hxgep->genlock);
2859 return (ENOSPC);
2860 }
2861
2862 MUTEX_EXIT(hxgep->genlock);
2863 return (0);
2864 }
2865
2866 static int
hxge_rx_group_rem_mac(void * arg,const uint8_t * mac_addr)2867 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2868 {
2869 hxge_ring_group_t *group = arg;
2870 p_hxge_t hxgep = group->hxgep;
2871 int rv, slot;
2872
2873 ASSERT(group->type == MAC_RING_TYPE_RX);
2874
2875 MUTEX_ENTER(hxgep->genlock);
2876
2877 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2878 MUTEX_EXIT(hxgep->genlock);
2879 return (rv);
2880 }
2881
2882 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2883 MUTEX_EXIT(hxgep->genlock);
2884 return (rv);
2885 }
2886
2887 MUTEX_EXIT(hxgep->genlock);
2888 return (0);
2889 }
2890
2891 static void
hxge_group_get(void * arg,mac_ring_type_t type,int groupid,mac_group_info_t * infop,mac_group_handle_t gh)2892 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2893 mac_group_info_t *infop, mac_group_handle_t gh)
2894 {
2895 p_hxge_t hxgep = arg;
2896 hxge_ring_group_t *group;
2897
2898 ASSERT(type == MAC_RING_TYPE_RX);
2899
2900 switch (type) {
2901 case MAC_RING_TYPE_RX:
2902 group = &hxgep->rx_groups[groupid];
2903 group->hxgep = hxgep;
2904 group->ghandle = gh;
2905 group->index = groupid;
2906 group->type = type;
2907
2908 infop->mgi_driver = (mac_group_driver_t)group;
2909 infop->mgi_start = hxge_rx_group_start;
2910 infop->mgi_stop = hxge_rx_group_stop;
2911 infop->mgi_addmac = hxge_rx_group_add_mac;
2912 infop->mgi_remmac = hxge_rx_group_rem_mac;
2913 infop->mgi_count = HXGE_MAX_RDCS;
2914 break;
2915
2916 case MAC_RING_TYPE_TX:
2917 default:
2918 break;
2919 }
2920 }
2921
2922 static int
hxge_ring_get_htable_idx(p_hxge_t hxgep,mac_ring_type_t type,uint32_t channel)2923 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2924 {
2925 int i;
2926
2927 ASSERT(hxgep->ldgvp != NULL);
2928
2929 switch (type) {
2930 case MAC_RING_TYPE_RX:
2931 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2932 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2933 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2934 return ((int)
2935 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2936 }
2937 }
2938 break;
2939
2940 case MAC_RING_TYPE_TX:
2941 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2942 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2943 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2944 return ((int)
2945 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2946 }
2947 }
2948 break;
2949
2950 default:
2951 break;
2952 }
2953
2954 return (-1);
2955 }
2956
2957 /*
2958 * Callback function for the GLDv3 layer to register all rings.
2959 */
2960 /*ARGSUSED*/
2961 static void
hxge_fill_ring(void * arg,mac_ring_type_t type,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)2962 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2963 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2964 {
2965 p_hxge_t hxgep = arg;
2966
2967 ASSERT(hxgep != NULL);
2968 ASSERT(infop != NULL);
2969
2970 switch (type) {
2971 case MAC_RING_TYPE_TX: {
2972 p_hxge_ring_handle_t rhp;
2973 mac_intr_t *mintr = &infop->mri_intr;
2974 p_hxge_intr_t intrp;
2975 int htable_idx;
2976
2977 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2978 rhp = &hxgep->tx_ring_handles[index];
2979 rhp->hxgep = hxgep;
2980 rhp->index = index;
2981 rhp->ring_handle = rh;
2982 infop->mri_driver = (mac_ring_driver_t)rhp;
2983 infop->mri_start = hxge_tx_ring_start;
2984 infop->mri_stop = hxge_tx_ring_stop;
2985 infop->mri_tx = hxge_tx_ring_send;
2986 infop->mri_stat = hxge_tx_ring_stat;
2987
2988 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
2989 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
2990 if (htable_idx >= 0)
2991 mintr->mi_ddi_handle = intrp->htable[htable_idx];
2992 else
2993 mintr->mi_ddi_handle = NULL;
2994 break;
2995 }
2996
2997 case MAC_RING_TYPE_RX: {
2998 p_hxge_ring_handle_t rhp;
2999 mac_intr_t hxge_mac_intr;
3000 p_hxge_intr_t intrp;
3001 int htable_idx;
3002
3003 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3004 rhp = &hxgep->rx_ring_handles[index];
3005 rhp->hxgep = hxgep;
3006 rhp->index = index;
3007 rhp->ring_handle = rh;
3008
3009 /*
3010 * Entrypoint to enable interrupt (disable poll) and
3011 * disable interrupt (enable poll).
3012 */
3013 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3014 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3015 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3016
3017 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3018 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3019 if (htable_idx >= 0)
3020 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3021 else
3022 hxge_mac_intr.mi_ddi_handle = NULL;
3023
3024 infop->mri_driver = (mac_ring_driver_t)rhp;
3025 infop->mri_start = hxge_rx_ring_start;
3026 infop->mri_stop = hxge_rx_ring_stop;
3027 infop->mri_intr = hxge_mac_intr;
3028 infop->mri_poll = hxge_rx_poll;
3029 infop->mri_stat = hxge_rx_ring_stat;
3030 break;
3031 }
3032
3033 default:
3034 break;
3035 }
3036 }
3037
3038 /*ARGSUSED*/
3039 boolean_t
hxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)3040 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3041 {
3042 p_hxge_t hxgep = arg;
3043
3044 switch (cap) {
3045 case MAC_CAPAB_HCKSUM: {
3046 uint32_t *txflags = cap_data;
3047
3048 *txflags = HCKSUM_INET_PARTIAL;
3049 break;
3050 }
3051
3052 case MAC_CAPAB_RINGS: {
3053 mac_capab_rings_t *cap_rings = cap_data;
3054
3055 MUTEX_ENTER(hxgep->genlock);
3056 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3057 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3058 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3059 cap_rings->mr_rget = hxge_fill_ring;
3060 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3061 cap_rings->mr_gget = hxge_group_get;
3062 cap_rings->mr_gaddring = NULL;
3063 cap_rings->mr_gremring = NULL;
3064 } else {
3065 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3066 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3067 cap_rings->mr_rget = hxge_fill_ring;
3068 cap_rings->mr_gnum = 0;
3069 cap_rings->mr_gget = NULL;
3070 cap_rings->mr_gaddring = NULL;
3071 cap_rings->mr_gremring = NULL;
3072 }
3073 MUTEX_EXIT(hxgep->genlock);
3074 break;
3075 }
3076
3077 default:
3078 return (B_FALSE);
3079 }
3080 return (B_TRUE);
3081 }
3082
3083 static boolean_t
hxge_param_locked(mac_prop_id_t pr_num)3084 hxge_param_locked(mac_prop_id_t pr_num)
3085 {
3086 /*
3087 * All adv_* parameters are locked (read-only) while
3088 * the device is in any sort of loopback mode ...
3089 */
3090 switch (pr_num) {
3091 case MAC_PROP_ADV_1000FDX_CAP:
3092 case MAC_PROP_EN_1000FDX_CAP:
3093 case MAC_PROP_ADV_1000HDX_CAP:
3094 case MAC_PROP_EN_1000HDX_CAP:
3095 case MAC_PROP_ADV_100FDX_CAP:
3096 case MAC_PROP_EN_100FDX_CAP:
3097 case MAC_PROP_ADV_100HDX_CAP:
3098 case MAC_PROP_EN_100HDX_CAP:
3099 case MAC_PROP_ADV_10FDX_CAP:
3100 case MAC_PROP_EN_10FDX_CAP:
3101 case MAC_PROP_ADV_10HDX_CAP:
3102 case MAC_PROP_EN_10HDX_CAP:
3103 case MAC_PROP_AUTONEG:
3104 case MAC_PROP_FLOWCTRL:
3105 return (B_TRUE);
3106 }
3107 return (B_FALSE);
3108 }
3109
3110 /*
3111 * callback functions for set/get of properties
3112 */
3113 static int
hxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3114 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3115 uint_t pr_valsize, const void *pr_val)
3116 {
3117 hxge_t *hxgep = barg;
3118 p_hxge_stats_t statsp;
3119 int err = 0;
3120 uint32_t new_mtu, old_framesize, new_framesize;
3121
3122 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3123
3124 statsp = hxgep->statsp;
3125 MUTEX_ENTER(hxgep->genlock);
3126 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3127 hxge_param_locked(pr_num)) {
3128 /*
3129 * All adv_* parameters are locked (read-only)
3130 * while the device is in any sort of loopback mode.
3131 */
3132 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3133 "==> hxge_m_setprop: loopback mode: read only"));
3134 MUTEX_EXIT(hxgep->genlock);
3135 return (EBUSY);
3136 }
3137
3138 switch (pr_num) {
3139 /*
3140 * These properties are either not exist or read only
3141 */
3142 case MAC_PROP_EN_1000FDX_CAP:
3143 case MAC_PROP_EN_100FDX_CAP:
3144 case MAC_PROP_EN_10FDX_CAP:
3145 case MAC_PROP_EN_1000HDX_CAP:
3146 case MAC_PROP_EN_100HDX_CAP:
3147 case MAC_PROP_EN_10HDX_CAP:
3148 case MAC_PROP_ADV_1000FDX_CAP:
3149 case MAC_PROP_ADV_1000HDX_CAP:
3150 case MAC_PROP_ADV_100FDX_CAP:
3151 case MAC_PROP_ADV_100HDX_CAP:
3152 case MAC_PROP_ADV_10FDX_CAP:
3153 case MAC_PROP_ADV_10HDX_CAP:
3154 case MAC_PROP_STATUS:
3155 case MAC_PROP_SPEED:
3156 case MAC_PROP_DUPLEX:
3157 case MAC_PROP_AUTONEG:
3158 /*
3159 * Flow control is handled in the shared domain and
3160 * it is readonly here.
3161 */
3162 case MAC_PROP_FLOWCTRL:
3163 err = EINVAL;
3164 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3165 "==> hxge_m_setprop: read only property %d",
3166 pr_num));
3167 break;
3168
3169 case MAC_PROP_MTU:
3170 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3171 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3172 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3173
3174 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3175 if (new_framesize == hxgep->vmac.maxframesize) {
3176 err = 0;
3177 break;
3178 }
3179
3180 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3181 err = EBUSY;
3182 break;
3183 }
3184
3185 if (new_framesize < MIN_FRAME_SIZE ||
3186 new_framesize > MAX_FRAME_SIZE) {
3187 err = EINVAL;
3188 break;
3189 }
3190
3191 old_framesize = hxgep->vmac.maxframesize;
3192 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3193
3194 if (hxge_vmac_set_framesize(hxgep)) {
3195 hxgep->vmac.maxframesize =
3196 (uint16_t)old_framesize;
3197 err = EINVAL;
3198 break;
3199 }
3200
3201 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3202 if (err) {
3203 hxgep->vmac.maxframesize =
3204 (uint16_t)old_framesize;
3205 (void) hxge_vmac_set_framesize(hxgep);
3206 }
3207
3208 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3209 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3210 new_mtu, hxgep->vmac.maxframesize));
3211 break;
3212
3213 case MAC_PROP_PRIVATE:
3214 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3215 "==> hxge_m_setprop: private property"));
3216 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3217 pr_val);
3218 break;
3219
3220 default:
3221 err = ENOTSUP;
3222 break;
3223 }
3224
3225 MUTEX_EXIT(hxgep->genlock);
3226
3227 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3228 "<== hxge_m_setprop (return %d)", err));
3229
3230 return (err);
3231 }
3232
3233 static int
hxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3234 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3235 uint_t pr_valsize, void *pr_val)
3236 {
3237 hxge_t *hxgep = barg;
3238 p_hxge_stats_t statsp = hxgep->statsp;
3239 int err = 0;
3240 link_flowctrl_t fl;
3241 uint64_t tmp = 0;
3242 link_state_t ls;
3243
3244 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3245 "==> hxge_m_getprop: pr_num %d", pr_num));
3246
3247 switch (pr_num) {
3248 case MAC_PROP_DUPLEX:
3249 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3250 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3251 "==> hxge_m_getprop: duplex mode %d",
3252 *(uint8_t *)pr_val));
3253 break;
3254
3255 case MAC_PROP_SPEED:
3256 ASSERT(pr_valsize >= sizeof (uint64_t));
3257 tmp = statsp->mac_stats.link_speed * 1000000ull;
3258 bcopy(&tmp, pr_val, sizeof (tmp));
3259 break;
3260
3261 case MAC_PROP_STATUS:
3262 ASSERT(pr_valsize >= sizeof (link_state_t));
3263 if (!statsp->mac_stats.link_up)
3264 ls = LINK_STATE_DOWN;
3265 else
3266 ls = LINK_STATE_UP;
3267 bcopy(&ls, pr_val, sizeof (ls));
3268 break;
3269
3270 case MAC_PROP_FLOWCTRL:
3271 /*
3272 * Flow control is supported by the shared domain and
3273 * it is currently transmit only
3274 */
3275 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3276 fl = LINK_FLOWCTRL_TX;
3277 bcopy(&fl, pr_val, sizeof (fl));
3278 break;
3279 case MAC_PROP_AUTONEG:
3280 /* 10G link only and it is not negotiable */
3281 *(uint8_t *)pr_val = 0;
3282 break;
3283 case MAC_PROP_ADV_1000FDX_CAP:
3284 case MAC_PROP_ADV_100FDX_CAP:
3285 case MAC_PROP_ADV_10FDX_CAP:
3286 case MAC_PROP_ADV_1000HDX_CAP:
3287 case MAC_PROP_ADV_100HDX_CAP:
3288 case MAC_PROP_ADV_10HDX_CAP:
3289 case MAC_PROP_EN_1000FDX_CAP:
3290 case MAC_PROP_EN_100FDX_CAP:
3291 case MAC_PROP_EN_10FDX_CAP:
3292 case MAC_PROP_EN_1000HDX_CAP:
3293 case MAC_PROP_EN_100HDX_CAP:
3294 case MAC_PROP_EN_10HDX_CAP:
3295 err = ENOTSUP;
3296 break;
3297
3298 case MAC_PROP_PRIVATE:
3299 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3300 pr_val);
3301 break;
3302
3303 default:
3304 err = ENOTSUP;
3305 break;
3306 }
3307
3308 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3309
3310 return (err);
3311 }
3312
3313 static void
hxge_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3314 hxge_m_propinfo(void *arg, const char *pr_name,
3315 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3316 {
3317 _NOTE(ARGUNUSED(arg));
3318 switch (pr_num) {
3319 case MAC_PROP_DUPLEX:
3320 case MAC_PROP_SPEED:
3321 case MAC_PROP_STATUS:
3322 case MAC_PROP_AUTONEG:
3323 case MAC_PROP_FLOWCTRL:
3324 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3325 break;
3326
3327 case MAC_PROP_MTU:
3328 mac_prop_info_set_range_uint32(prh,
3329 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3330 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3331 break;
3332
3333 case MAC_PROP_PRIVATE: {
3334 char valstr[MAXNAMELEN];
3335
3336 bzero(valstr, sizeof (valstr));
3337
3338 /* Receive Interrupt Blanking Parameters */
3339 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3340 (void) snprintf(valstr, sizeof (valstr), "%d",
3341 RXDMA_RCR_TO_DEFAULT);
3342 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3343 (void) snprintf(valstr, sizeof (valstr), "%d",
3344 RXDMA_RCR_PTHRES_DEFAULT);
3345
3346 /* Classification and Load Distribution Configuration */
3347 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3348 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3349 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3350 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3351 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3352 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3353 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3354 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3355 (void) snprintf(valstr, sizeof (valstr), "%d",
3356 HXGE_CLASS_TCAM_LOOKUP);
3357 }
3358
3359 if (strlen(valstr) > 0)
3360 mac_prop_info_set_default_str(prh, valstr);
3361 break;
3362 }
3363 }
3364 }
3365
3366
3367 /* ARGSUSED */
3368 static int
hxge_set_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)3369 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3370 const void *pr_val)
3371 {
3372 p_hxge_param_t param_arr = hxgep->param_arr;
3373 int err = 0;
3374
3375 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3376 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3377
3378 if (pr_val == NULL) {
3379 return (EINVAL);
3380 }
3381
3382 /* Blanking */
3383 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3384 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3385 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3386 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3387 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3388 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3389
3390 /* Classification */
3391 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3392 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3393 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3394 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3395 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3396 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3397 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3398 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3399 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3400 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3401 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3402 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3403 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3404 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3405 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3406 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3407 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3408 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3409 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3410 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3411 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3412 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3413 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3414 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3415 } else {
3416 err = ENOTSUP;
3417 }
3418
3419 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3420 "<== hxge_set_priv_prop: err %d", err));
3421
3422 return (err);
3423 }
3424
3425 static int
hxge_get_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)3426 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3427 void *pr_val)
3428 {
3429 p_hxge_param_t param_arr = hxgep->param_arr;
3430 char valstr[MAXNAMELEN];
3431 int err = 0;
3432 uint_t strsize;
3433 int value = 0;
3434
3435 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3436 "==> hxge_get_priv_prop: property %s", pr_name));
3437
3438 /* Receive Interrupt Blanking Parameters */
3439 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3440 value = hxgep->intr_timeout;
3441 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3442 value = hxgep->intr_threshold;
3443
3444 /* Classification and Load Distribution Configuration */
3445 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3446 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3447 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3448
3449 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3450 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3451 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3452 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3453
3454 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3455 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3456 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3457 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3458
3459 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3460 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3461 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3462 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3463
3464 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3465 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3466 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3467 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3468
3469 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3470 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3471 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3472 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3473
3474 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3475 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3476 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3477 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3478
3479 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3480 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3481 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3482 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3483
3484 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3485 } else {
3486 err = ENOTSUP;
3487 }
3488
3489 if (err == 0) {
3490 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3491
3492 strsize = (uint_t)strlen(valstr);
3493 if (pr_valsize < strsize) {
3494 err = ENOBUFS;
3495 } else {
3496 (void) strlcpy(pr_val, valstr, pr_valsize);
3497 }
3498 }
3499
3500 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3501 "<== hxge_get_priv_prop: return %d", err));
3502
3503 return (err);
3504 }
3505 /*
3506 * Module loading and removing entry points.
3507 */
3508 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3509 nodev, NULL, D_MP, NULL, NULL);
3510
3511 extern struct mod_ops mod_driverops;
3512
3513 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3514
3515 /*
3516 * Module linkage information for the kernel.
3517 */
3518 static struct modldrv hxge_modldrv = {
3519 &mod_driverops,
3520 HXGE_DESC_VER,
3521 &hxge_dev_ops
3522 };
3523
3524 static struct modlinkage modlinkage = {
3525 MODREV_1, (void *) &hxge_modldrv, NULL
3526 };
3527
3528 int
_init(void)3529 _init(void)
3530 {
3531 int status;
3532
3533 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3534 mac_init_ops(&hxge_dev_ops, "hxge");
3535 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3536 if (status != 0) {
3537 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3538 "failed to init device soft state"));
3539 mac_fini_ops(&hxge_dev_ops);
3540 goto _init_exit;
3541 }
3542
3543 status = mod_install(&modlinkage);
3544 if (status != 0) {
3545 ddi_soft_state_fini(&hxge_list);
3546 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3547 goto _init_exit;
3548 }
3549
3550 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3551
3552 _init_exit:
3553 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3554
3555 return (status);
3556 }
3557
3558 int
_fini(void)3559 _fini(void)
3560 {
3561 int status;
3562
3563 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3564
3565 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3566
3567 if (hxge_mblks_pending)
3568 return (EBUSY);
3569
3570 status = mod_remove(&modlinkage);
3571 if (status != DDI_SUCCESS) {
3572 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3573 "Module removal failed 0x%08x", status));
3574 goto _fini_exit;
3575 }
3576
3577 mac_fini_ops(&hxge_dev_ops);
3578
3579 ddi_soft_state_fini(&hxge_list);
3580
3581 MUTEX_DESTROY(&hxge_common_lock);
3582
3583 _fini_exit:
3584 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3585
3586 return (status);
3587 }
3588
3589 int
_info(struct modinfo * modinfop)3590 _info(struct modinfo *modinfop)
3591 {
3592 int status;
3593
3594 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3595 status = mod_info(&modlinkage, modinfop);
3596 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3597
3598 return (status);
3599 }
3600
3601 /*ARGSUSED*/
3602 static hxge_status_t
hxge_add_intrs(p_hxge_t hxgep)3603 hxge_add_intrs(p_hxge_t hxgep)
3604 {
3605 int intr_types;
3606 int type = 0;
3607 int ddi_status = DDI_SUCCESS;
3608 hxge_status_t status = HXGE_OK;
3609
3610 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3611
3612 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3613 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3614 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3615 hxgep->hxge_intr_type.intr_added = 0;
3616 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3617 hxgep->hxge_intr_type.intr_type = 0;
3618
3619 if (hxge_msi_enable) {
3620 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3621 }
3622
3623 /* Get the supported interrupt types */
3624 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3625 != DDI_SUCCESS) {
3626 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3627 "ddi_intr_get_supported_types failed: status 0x%08x",
3628 ddi_status));
3629 return (HXGE_ERROR | HXGE_DDI_FAILED);
3630 }
3631
3632 hxgep->hxge_intr_type.intr_types = intr_types;
3633
3634 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3635 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3636
3637 /*
3638 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3639 * (1): 1 - MSI
3640 * (2): 2 - MSI-X
3641 * others - FIXED
3642 */
3643 switch (hxge_msi_enable) {
3644 default:
3645 type = DDI_INTR_TYPE_FIXED;
3646 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3647 "use fixed (intx emulation) type %08x", type));
3648 break;
3649
3650 case 2:
3651 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3652 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3653 if (intr_types & DDI_INTR_TYPE_MSIX) {
3654 type = DDI_INTR_TYPE_MSIX;
3655 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3656 "==> hxge_add_intrs: "
3657 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3658 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3659 type = DDI_INTR_TYPE_MSI;
3660 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3661 "==> hxge_add_intrs: "
3662 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3663 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3664 type = DDI_INTR_TYPE_FIXED;
3665 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3666 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3667 }
3668 break;
3669
3670 case 1:
3671 if (intr_types & DDI_INTR_TYPE_MSI) {
3672 type = DDI_INTR_TYPE_MSI;
3673 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3674 "==> hxge_add_intrs: "
3675 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3676 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3677 type = DDI_INTR_TYPE_MSIX;
3678 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3679 "==> hxge_add_intrs: "
3680 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3681 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3682 type = DDI_INTR_TYPE_FIXED;
3683 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3684 "==> hxge_add_intrs: "
3685 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3686 }
3687 }
3688
3689 hxgep->hxge_intr_type.intr_type = type;
3690 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3691 type == DDI_INTR_TYPE_FIXED) &&
3692 hxgep->hxge_intr_type.niu_msi_enable) {
3693 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3694 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3695 " hxge_add_intrs: "
3696 " hxge_add_intrs_adv failed: status 0x%08x",
3697 status));
3698 return (status);
3699 } else {
3700 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3701 "interrupts registered : type %d", type));
3702 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3703
3704 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3705 "\nAdded advanced hxge add_intr_adv "
3706 "intr type 0x%x\n", type));
3707
3708 return (status);
3709 }
3710 }
3711
3712 if (!hxgep->hxge_intr_type.intr_registered) {
3713 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3714 "==> hxge_add_intrs: failed to register interrupts"));
3715 return (HXGE_ERROR | HXGE_DDI_FAILED);
3716 }
3717
3718 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3719
3720 return (status);
3721 }
3722
3723 /*ARGSUSED*/
3724 static hxge_status_t
hxge_add_intrs_adv(p_hxge_t hxgep)3725 hxge_add_intrs_adv(p_hxge_t hxgep)
3726 {
3727 int intr_type;
3728 p_hxge_intr_t intrp;
3729 hxge_status_t status;
3730
3731 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3732
3733 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3734 intr_type = intrp->intr_type;
3735
3736 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3737 intr_type));
3738
3739 switch (intr_type) {
3740 case DDI_INTR_TYPE_MSI: /* 0x2 */
3741 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3742 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3743 break;
3744
3745 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3746 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3747 break;
3748
3749 default:
3750 status = HXGE_ERROR;
3751 break;
3752 }
3753
3754 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3755
3756 return (status);
3757 }
3758
3759 /*ARGSUSED*/
3760 static hxge_status_t
hxge_add_intrs_adv_type(p_hxge_t hxgep,uint32_t int_type)3761 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3762 {
3763 dev_info_t *dip = hxgep->dip;
3764 p_hxge_ldg_t ldgp;
3765 p_hxge_intr_t intrp;
3766 uint_t *inthandler;
3767 void *arg1, *arg2;
3768 int behavior;
3769 int nintrs, navail;
3770 int nactual, nrequired, nrequest;
3771 int inum = 0;
3772 int loop = 0;
3773 int x, y;
3774 int ddi_status = DDI_SUCCESS;
3775 hxge_status_t status = HXGE_OK;
3776
3777 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3778
3779 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3780
3781 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3782 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3783 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3784 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3785 "nintrs: %d", ddi_status, nintrs));
3786 return (HXGE_ERROR | HXGE_DDI_FAILED);
3787 }
3788
3789 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3790 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3791 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3792 "ddi_intr_get_navail() failed, status: 0x%x%, "
3793 "nintrs: %d", ddi_status, navail));
3794 return (HXGE_ERROR | HXGE_DDI_FAILED);
3795 }
3796
3797 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3798 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3799 int_type, nintrs, navail));
3800
3801 /* PSARC/2007/453 MSI-X interrupt limit override */
3802 if (int_type == DDI_INTR_TYPE_MSIX) {
3803 nrequest = hxge_create_msi_property(hxgep);
3804 if (nrequest < navail) {
3805 navail = nrequest;
3806 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3807 "hxge_add_intrs_adv_type: nintrs %d "
3808 "navail %d (nrequest %d)",
3809 nintrs, navail, nrequest));
3810 }
3811 }
3812
3813 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3814 /* MSI must be power of 2 */
3815 if ((navail & 16) == 16) {
3816 navail = 16;
3817 } else if ((navail & 8) == 8) {
3818 navail = 8;
3819 } else if ((navail & 4) == 4) {
3820 navail = 4;
3821 } else if ((navail & 2) == 2) {
3822 navail = 2;
3823 } else {
3824 navail = 1;
3825 }
3826 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3827 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3828 "navail %d", nintrs, navail));
3829 }
3830
3831 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3832 "requesting: intr type %d nintrs %d, navail %d",
3833 int_type, nintrs, navail));
3834
3835 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3836 DDI_INTR_ALLOC_NORMAL);
3837 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3838 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3839
3840 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3841 navail, &nactual, behavior);
3842 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3843 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3844 " ddi_intr_alloc() failed: %d", ddi_status));
3845 kmem_free(intrp->htable, intrp->intr_size);
3846 return (HXGE_ERROR | HXGE_DDI_FAILED);
3847 }
3848
3849 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3850 "ddi_intr_alloc() returned: navail %d nactual %d",
3851 navail, nactual));
3852
3853 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3854 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3855 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3856 " ddi_intr_get_pri() failed: %d", ddi_status));
3857 /* Free already allocated interrupts */
3858 for (y = 0; y < nactual; y++) {
3859 (void) ddi_intr_free(intrp->htable[y]);
3860 }
3861
3862 kmem_free(intrp->htable, intrp->intr_size);
3863 return (HXGE_ERROR | HXGE_DDI_FAILED);
3864 }
3865
3866 nrequired = 0;
3867 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3868 if (status != HXGE_OK) {
3869 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3870 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3871 "failed: 0x%x", status));
3872 /* Free already allocated interrupts */
3873 for (y = 0; y < nactual; y++) {
3874 (void) ddi_intr_free(intrp->htable[y]);
3875 }
3876
3877 kmem_free(intrp->htable, intrp->intr_size);
3878 return (status);
3879 }
3880
3881 ldgp = hxgep->ldgvp->ldgp;
3882 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3883 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3884
3885 if (nactual < nrequired)
3886 loop = nactual;
3887 else
3888 loop = nrequired;
3889
3890 for (x = 0; x < loop; x++, ldgp++) {
3891 ldgp->vector = (uint8_t)x;
3892 arg1 = ldgp->ldvp;
3893 arg2 = hxgep;
3894 if (ldgp->nldvs == 1) {
3895 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3896 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3897 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3898 "1-1 int handler (entry %d)\n",
3899 arg1, arg2, x));
3900 } else if (ldgp->nldvs > 1) {
3901 inthandler = (uint_t *)ldgp->sys_intr_handler;
3902 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3903 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3904 "nldevs %d int handler (entry %d)\n",
3905 arg1, arg2, ldgp->nldvs, x));
3906 }
3907 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3908 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3909 "htable 0x%llx", x, intrp->htable[x]));
3910
3911 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3912 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3913 DDI_SUCCESS) {
3914 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3915 "==> hxge_add_intrs_adv_type: failed #%d "
3916 "status 0x%x", x, ddi_status));
3917 for (y = 0; y < intrp->intr_added; y++) {
3918 (void) ddi_intr_remove_handler(
3919 intrp->htable[y]);
3920 }
3921
3922 /* Free already allocated intr */
3923 for (y = 0; y < nactual; y++) {
3924 (void) ddi_intr_free(intrp->htable[y]);
3925 }
3926 kmem_free(intrp->htable, intrp->intr_size);
3927
3928 (void) hxge_ldgv_uninit(hxgep);
3929
3930 return (HXGE_ERROR | HXGE_DDI_FAILED);
3931 }
3932
3933 ldgp->htable_idx = x;
3934 intrp->intr_added++;
3935 }
3936 intrp->msi_intx_cnt = nactual;
3937
3938 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3939 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3940 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3941
3942 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3943 (void) hxge_intr_ldgv_init(hxgep);
3944
3945 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3946
3947 return (status);
3948 }
3949
3950 /*ARGSUSED*/
3951 static hxge_status_t
hxge_add_intrs_adv_type_fix(p_hxge_t hxgep,uint32_t int_type)3952 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3953 {
3954 dev_info_t *dip = hxgep->dip;
3955 p_hxge_ldg_t ldgp;
3956 p_hxge_intr_t intrp;
3957 uint_t *inthandler;
3958 void *arg1, *arg2;
3959 int behavior;
3960 int nintrs, navail;
3961 int nactual, nrequired;
3962 int inum = 0;
3963 int x, y;
3964 int ddi_status = DDI_SUCCESS;
3965 hxge_status_t status = HXGE_OK;
3966
3967 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3968 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3969
3970 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3971 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3972 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3973 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3974 "nintrs: %d", status, nintrs));
3975 return (HXGE_ERROR | HXGE_DDI_FAILED);
3976 }
3977
3978 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3979 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3980 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3981 "ddi_intr_get_navail() failed, status: 0x%x%, "
3982 "nintrs: %d", ddi_status, navail));
3983 return (HXGE_ERROR | HXGE_DDI_FAILED);
3984 }
3985
3986 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3987 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3988 nintrs, navail));
3989
3990 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3991 DDI_INTR_ALLOC_NORMAL);
3992 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3993 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3994 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3995 navail, &nactual, behavior);
3996 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3997 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3998 " ddi_intr_alloc() failed: %d", ddi_status));
3999 kmem_free(intrp->htable, intrp->intr_size);
4000 return (HXGE_ERROR | HXGE_DDI_FAILED);
4001 }
4002
4003 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4004 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4005 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4006 " ddi_intr_get_pri() failed: %d", ddi_status));
4007 /* Free already allocated interrupts */
4008 for (y = 0; y < nactual; y++) {
4009 (void) ddi_intr_free(intrp->htable[y]);
4010 }
4011
4012 kmem_free(intrp->htable, intrp->intr_size);
4013 return (HXGE_ERROR | HXGE_DDI_FAILED);
4014 }
4015
4016 nrequired = 0;
4017 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4018 if (status != HXGE_OK) {
4019 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4020 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4021 "failed: 0x%x", status));
4022 /* Free already allocated interrupts */
4023 for (y = 0; y < nactual; y++) {
4024 (void) ddi_intr_free(intrp->htable[y]);
4025 }
4026
4027 kmem_free(intrp->htable, intrp->intr_size);
4028 return (status);
4029 }
4030
4031 ldgp = hxgep->ldgvp->ldgp;
4032 for (x = 0; x < nrequired; x++, ldgp++) {
4033 ldgp->vector = (uint8_t)x;
4034 arg1 = ldgp->ldvp;
4035 arg2 = hxgep;
4036 if (ldgp->nldvs == 1) {
4037 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4038 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4039 "hxge_add_intrs_adv_type_fix: "
4040 "1-1 int handler(%d) ldg %d ldv %d "
4041 "arg1 $%p arg2 $%p\n",
4042 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4043 } else if (ldgp->nldvs > 1) {
4044 inthandler = (uint_t *)ldgp->sys_intr_handler;
4045 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4046 "hxge_add_intrs_adv_type_fix: "
4047 "shared ldv %d int handler(%d) ldv %d ldg %d"
4048 "arg1 0x%016llx arg2 0x%016llx\n",
4049 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4050 arg1, arg2));
4051 }
4052
4053 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4054 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4055 DDI_SUCCESS) {
4056 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4057 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4058 "status 0x%x", x, ddi_status));
4059 for (y = 0; y < intrp->intr_added; y++) {
4060 (void) ddi_intr_remove_handler(
4061 intrp->htable[y]);
4062 }
4063 for (y = 0; y < nactual; y++) {
4064 (void) ddi_intr_free(intrp->htable[y]);
4065 }
4066 /* Free already allocated intr */
4067 kmem_free(intrp->htable, intrp->intr_size);
4068
4069 (void) hxge_ldgv_uninit(hxgep);
4070
4071 return (HXGE_ERROR | HXGE_DDI_FAILED);
4072 }
4073 intrp->intr_added++;
4074 }
4075
4076 intrp->msi_intx_cnt = nactual;
4077
4078 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4079
4080 status = hxge_intr_ldgv_init(hxgep);
4081
4082 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4083
4084 return (status);
4085 }
4086
4087 /*ARGSUSED*/
4088 static void
hxge_remove_intrs(p_hxge_t hxgep)4089 hxge_remove_intrs(p_hxge_t hxgep)
4090 {
4091 int i, inum;
4092 p_hxge_intr_t intrp;
4093
4094 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4095 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4096 if (!intrp->intr_registered) {
4097 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4098 "<== hxge_remove_intrs: interrupts not registered"));
4099 return;
4100 }
4101
4102 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4103
4104 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4105 (void) ddi_intr_block_disable(intrp->htable,
4106 intrp->intr_added);
4107 } else {
4108 for (i = 0; i < intrp->intr_added; i++) {
4109 (void) ddi_intr_disable(intrp->htable[i]);
4110 }
4111 }
4112
4113 for (inum = 0; inum < intrp->intr_added; inum++) {
4114 if (intrp->htable[inum]) {
4115 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4116 }
4117 }
4118
4119 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4120 if (intrp->htable[inum]) {
4121 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4122 "hxge_remove_intrs: ddi_intr_free inum %d "
4123 "msi_intx_cnt %d intr_added %d",
4124 inum, intrp->msi_intx_cnt, intrp->intr_added));
4125
4126 (void) ddi_intr_free(intrp->htable[inum]);
4127 }
4128 }
4129
4130 kmem_free(intrp->htable, intrp->intr_size);
4131 intrp->intr_registered = B_FALSE;
4132 intrp->intr_enabled = B_FALSE;
4133 intrp->msi_intx_cnt = 0;
4134 intrp->intr_added = 0;
4135
4136 (void) hxge_ldgv_uninit(hxgep);
4137
4138 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4139 }
4140
4141 /*ARGSUSED*/
4142 static void
hxge_intrs_enable(p_hxge_t hxgep)4143 hxge_intrs_enable(p_hxge_t hxgep)
4144 {
4145 p_hxge_intr_t intrp;
4146 int i;
4147 int status;
4148
4149 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4150
4151 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4152
4153 if (!intrp->intr_registered) {
4154 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4155 "interrupts are not registered"));
4156 return;
4157 }
4158
4159 if (intrp->intr_enabled) {
4160 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4161 "<== hxge_intrs_enable: already enabled"));
4162 return;
4163 }
4164
4165 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4166 status = ddi_intr_block_enable(intrp->htable,
4167 intrp->intr_added);
4168 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4169 "block enable - status 0x%x total inums #%d\n",
4170 status, intrp->intr_added));
4171 } else {
4172 for (i = 0; i < intrp->intr_added; i++) {
4173 status = ddi_intr_enable(intrp->htable[i]);
4174 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4175 "ddi_intr_enable:enable - status 0x%x "
4176 "total inums %d enable inum #%d\n",
4177 status, intrp->intr_added, i));
4178 if (status == DDI_SUCCESS) {
4179 intrp->intr_enabled = B_TRUE;
4180 }
4181 }
4182 }
4183
4184 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4185 }
4186
4187 /*ARGSUSED*/
4188 static void
hxge_intrs_disable(p_hxge_t hxgep)4189 hxge_intrs_disable(p_hxge_t hxgep)
4190 {
4191 p_hxge_intr_t intrp;
4192 int i;
4193
4194 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4195
4196 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4197
4198 if (!intrp->intr_registered) {
4199 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4200 "interrupts are not registered"));
4201 return;
4202 }
4203
4204 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4205 (void) ddi_intr_block_disable(intrp->htable,
4206 intrp->intr_added);
4207 } else {
4208 for (i = 0; i < intrp->intr_added; i++) {
4209 (void) ddi_intr_disable(intrp->htable[i]);
4210 }
4211 }
4212
4213 intrp->intr_enabled = B_FALSE;
4214 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4215 }
4216
4217 static hxge_status_t
hxge_mac_register(p_hxge_t hxgep)4218 hxge_mac_register(p_hxge_t hxgep)
4219 {
4220 mac_register_t *macp;
4221 int status;
4222
4223 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4224
4225 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4226 return (HXGE_ERROR);
4227
4228 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4229 macp->m_driver = hxgep;
4230 macp->m_dip = hxgep->dip;
4231 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4232 macp->m_callbacks = &hxge_m_callbacks;
4233 macp->m_min_sdu = 0;
4234 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4235 macp->m_margin = VLAN_TAGSZ;
4236 macp->m_priv_props = hxge_priv_props;
4237 macp->m_v12n = MAC_VIRT_LEVEL1;
4238
4239 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4240 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4241 macp->m_src_addr[0],
4242 macp->m_src_addr[1],
4243 macp->m_src_addr[2],
4244 macp->m_src_addr[3],
4245 macp->m_src_addr[4],
4246 macp->m_src_addr[5]));
4247
4248 status = mac_register(macp, &hxgep->mach);
4249 mac_free(macp);
4250
4251 if (status != 0) {
4252 cmn_err(CE_WARN,
4253 "hxge_mac_register failed (status %d instance %d)",
4254 status, hxgep->instance);
4255 return (HXGE_ERROR);
4256 }
4257
4258 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4259 "(instance %d)", hxgep->instance));
4260
4261 return (HXGE_OK);
4262 }
4263
4264 static int
hxge_init_common_dev(p_hxge_t hxgep)4265 hxge_init_common_dev(p_hxge_t hxgep)
4266 {
4267 p_hxge_hw_list_t hw_p;
4268 dev_info_t *p_dip;
4269
4270 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4271
4272 p_dip = hxgep->p_dip;
4273 MUTEX_ENTER(&hxge_common_lock);
4274
4275 /*
4276 * Loop through existing per Hydra hardware list.
4277 */
4278 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4279 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4280 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4281 hw_p, p_dip));
4282 if (hw_p->parent_devp == p_dip) {
4283 hxgep->hxge_hw_p = hw_p;
4284 hw_p->ndevs++;
4285 hw_p->hxge_p = hxgep;
4286 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4287 "==> hxge_init_common_device: "
4288 "hw_p $%p parent dip $%p ndevs %d (found)",
4289 hw_p, p_dip, hw_p->ndevs));
4290 break;
4291 }
4292 }
4293
4294 if (hw_p == NULL) {
4295 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4296 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4297 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4298 hw_p->parent_devp = p_dip;
4299 hw_p->magic = HXGE_MAGIC;
4300 hxgep->hxge_hw_p = hw_p;
4301 hw_p->ndevs++;
4302 hw_p->hxge_p = hxgep;
4303 hw_p->next = hxge_hw_list;
4304
4305 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4306 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4307 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4308
4309 hxge_hw_list = hw_p;
4310 }
4311 MUTEX_EXIT(&hxge_common_lock);
4312 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4313 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4314 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4315
4316 return (HXGE_OK);
4317 }
4318
4319 static void
hxge_uninit_common_dev(p_hxge_t hxgep)4320 hxge_uninit_common_dev(p_hxge_t hxgep)
4321 {
4322 p_hxge_hw_list_t hw_p, h_hw_p;
4323 dev_info_t *p_dip;
4324
4325 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4326 if (hxgep->hxge_hw_p == NULL) {
4327 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4328 "<== hxge_uninit_common_dev (no common)"));
4329 return;
4330 }
4331
4332 MUTEX_ENTER(&hxge_common_lock);
4333 h_hw_p = hxge_hw_list;
4334 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4335 p_dip = hw_p->parent_devp;
4336 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4337 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4338 hw_p->magic == HXGE_MAGIC) {
4339 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4340 "==> hxge_uninit_common_dev: "
4341 "hw_p $%p parent dip $%p ndevs %d (found)",
4342 hw_p, p_dip, hw_p->ndevs));
4343
4344 hxgep->hxge_hw_p = NULL;
4345 if (hw_p->ndevs) {
4346 hw_p->ndevs--;
4347 }
4348 hw_p->hxge_p = NULL;
4349 if (!hw_p->ndevs) {
4350 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4351 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4352 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4353 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4354 "==> hxge_uninit_common_dev: "
4355 "hw_p $%p parent dip $%p ndevs %d (last)",
4356 hw_p, p_dip, hw_p->ndevs));
4357
4358 if (hw_p == hxge_hw_list) {
4359 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4360 "==> hxge_uninit_common_dev:"
4361 "remove head "
4362 "hw_p $%p parent dip $%p "
4363 "ndevs %d (head)",
4364 hw_p, p_dip, hw_p->ndevs));
4365 hxge_hw_list = hw_p->next;
4366 } else {
4367 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4368 "==> hxge_uninit_common_dev:"
4369 "remove middle "
4370 "hw_p $%p parent dip $%p "
4371 "ndevs %d (middle)",
4372 hw_p, p_dip, hw_p->ndevs));
4373 h_hw_p->next = hw_p->next;
4374 }
4375
4376 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4377 }
4378 break;
4379 } else {
4380 h_hw_p = hw_p;
4381 }
4382 }
4383
4384 MUTEX_EXIT(&hxge_common_lock);
4385 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4386 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4387
4388 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4389 }
4390
4391 #define HXGE_MSIX_ENTRIES 32
4392 #define HXGE_MSIX_WAIT_COUNT 10
4393 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4394
4395 static void
hxge_link_poll(void * arg)4396 hxge_link_poll(void *arg)
4397 {
4398 p_hxge_t hxgep = (p_hxge_t)arg;
4399 hpi_handle_t handle;
4400 cip_link_stat_t link_stat;
4401 hxge_timeout *to = &hxgep->timeout;
4402
4403 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4404 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4405
4406 if (to->report_link_status ||
4407 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4408 to->link_status = link_stat.bits.xpcs0_link_up;
4409 to->report_link_status = B_FALSE;
4410
4411 if (link_stat.bits.xpcs0_link_up) {
4412 hxge_link_update(hxgep, LINK_STATE_UP);
4413 } else {
4414 hxge_link_update(hxgep, LINK_STATE_DOWN);
4415 }
4416 }
4417
4418 /* Restart the link status timer to check the link status */
4419 MUTEX_ENTER(&to->lock);
4420 to->id = timeout(hxge_link_poll, arg, to->ticks);
4421 MUTEX_EXIT(&to->lock);
4422 }
4423
4424 static void
hxge_link_update(p_hxge_t hxgep,link_state_t state)4425 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4426 {
4427 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4428
4429 mac_link_update(hxgep->mach, state);
4430 if (state == LINK_STATE_UP) {
4431 statsp->mac_stats.link_speed = 10000;
4432 statsp->mac_stats.link_duplex = 2;
4433 statsp->mac_stats.link_up = 1;
4434 } else {
4435 statsp->mac_stats.link_speed = 0;
4436 statsp->mac_stats.link_duplex = 0;
4437 statsp->mac_stats.link_up = 0;
4438 }
4439 }
4440
4441 static void
hxge_msix_init(p_hxge_t hxgep)4442 hxge_msix_init(p_hxge_t hxgep)
4443 {
4444 uint32_t data0;
4445 uint32_t data1;
4446 uint32_t data2;
4447 int i;
4448 uint32_t msix_entry0;
4449 uint32_t msix_entry1;
4450 uint32_t msix_entry2;
4451 uint32_t msix_entry3;
4452
4453 /* Change to use MSIx bar instead of indirect access */
4454 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4455 data0 = 0xffffffff - i;
4456 data1 = 0xffffffff - i - 1;
4457 data2 = 0xffffffff - i - 2;
4458
4459 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4460 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4461 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4462 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4463 }
4464
4465 /* Initialize ram data out buffer. */
4466 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4467 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4468 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4469 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4470 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4471 }
4472 }
4473
4474 /*
4475 * The following function is to support
4476 * PSARC/2007/453 MSI-X interrupt limit override.
4477 */
4478 static int
hxge_create_msi_property(p_hxge_t hxgep)4479 hxge_create_msi_property(p_hxge_t hxgep)
4480 {
4481 int nmsi;
4482 extern int ncpus;
4483
4484 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4485
4486 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4487 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4488 /*
4489 * The maximum MSI-X requested will be 8.
4490 * If the # of CPUs is less than 8, we will reqeust
4491 * # MSI-X based on the # of CPUs.
4492 */
4493 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4494 nmsi = HXGE_MSIX_REQUEST_10G;
4495 } else {
4496 nmsi = ncpus;
4497 }
4498
4499 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4500 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4501 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4502 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4503
4504 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4505 return (nmsi);
4506 }
4507