1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2012 Milan Jurik. All rights reserved.
25 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
26 */
27
28 /*
29 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
30 */
31 #include <hxge_impl.h>
32 #include <hxge_pfc.h>
33
34 /*
35 * PSARC/2007/453 MSI-X interrupt limit override
36 * (This PSARC case is limited to MSI-X vectors
37 * and SPARC platforms only).
38 */
39 uint32_t hxge_msi_enable = 2;
40
41 /*
42 * Globals: tunable parameters (/etc/system or adb)
43 *
44 */
45 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
46 uint32_t hxge_rbr_spare_size = 0;
47 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
48 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
49 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
50 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
51 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
52 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
53
54 static hxge_os_mutex_t hxgedebuglock;
55 static int hxge_debug_init = 0;
56
57 /*
58 * Debugging flags:
59 * hxge_no_tx_lb : transmit load balancing
60 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
61 * 1 - From the Stack
62 * 2 - Destination IP Address
63 */
64 uint32_t hxge_no_tx_lb = 0;
65 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
66
67 /*
68 * Tunables to manage the receive buffer blocks.
69 *
70 * hxge_rx_threshold_hi: copy all buffers.
71 * hxge_rx_bcopy_size_type: receive buffer block size type.
72 * hxge_rx_threshold_lo: copy only up to tunable block size type.
73 */
74 #if defined(__sparc)
75 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
76 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
77 #else
78 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
79 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
80 #endif
81 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
82
83 rtrace_t hpi_rtracebuf;
84
85 /*
86 * Function Prototypes
87 */
88 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
89 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
90 static void hxge_unattach(p_hxge_t);
91
92 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
93
94 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
95 static void hxge_destroy_mutexes(p_hxge_t);
96
97 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
98 static void hxge_unmap_regs(p_hxge_t hxgep);
99
100 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
101 static void hxge_remove_intrs(p_hxge_t hxgep);
102 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
103 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
104 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
105 static void hxge_intrs_enable(p_hxge_t hxgep);
106 static void hxge_intrs_disable(p_hxge_t hxgep);
107 static void hxge_suspend(p_hxge_t);
108 static hxge_status_t hxge_resume(p_hxge_t);
109 static hxge_status_t hxge_setup_dev(p_hxge_t);
110 static void hxge_destroy_dev(p_hxge_t);
111 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
112 static void hxge_free_mem_pool(p_hxge_t);
113 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
114 static void hxge_free_rx_mem_pool(p_hxge_t);
115 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
116 static void hxge_free_tx_mem_pool(p_hxge_t);
117 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
118 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
119 p_hxge_dma_common_t);
120 static void hxge_dma_mem_free(p_hxge_dma_common_t);
121 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
122 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
123 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
124 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
125 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
126 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
127 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
128 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
129 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
130 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
131 p_hxge_dma_common_t *, size_t);
132 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
133 static int hxge_init_common_dev(p_hxge_t);
134 static void hxge_uninit_common_dev(p_hxge_t);
135
136 /*
137 * The next declarations are for the GLDv3 interface.
138 */
139 static int hxge_m_start(void *);
140 static void hxge_m_stop(void *);
141 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
142 static int hxge_m_promisc(void *, boolean_t);
143 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
144 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
145
146 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
147 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
148 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
149 uint_t pr_valsize, const void *pr_val);
150 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
151 uint_t pr_valsize, void *pr_val);
152 static void hxge_m_propinfo(void *barg, const char *pr_name,
153 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
154 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
155 uint_t pr_valsize, const void *pr_val);
156 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
157 uint_t pr_valsize, void *pr_val);
158 static void hxge_link_poll(void *arg);
159 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
160 static void hxge_msix_init(p_hxge_t hxgep);
161
162 char *hxge_priv_props[] = {
163 "_rxdma_intr_time",
164 "_rxdma_intr_pkts",
165 "_class_opt_ipv4_tcp",
166 "_class_opt_ipv4_udp",
167 "_class_opt_ipv4_ah",
168 "_class_opt_ipv4_sctp",
169 "_class_opt_ipv6_tcp",
170 "_class_opt_ipv6_udp",
171 "_class_opt_ipv6_ah",
172 "_class_opt_ipv6_sctp",
173 NULL
174 };
175
176 #define HXGE_MAX_PRIV_PROPS \
177 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
178
179 #define HXGE_MAGIC 0x4E584745UL
180 #define MAX_DUMP_SZ 256
181
182 #define HXGE_M_CALLBACK_FLAGS \
183 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
184
185 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
186
187 static mac_callbacks_t hxge_m_callbacks = {
188 HXGE_M_CALLBACK_FLAGS,
189 hxge_m_stat,
190 hxge_m_start,
191 hxge_m_stop,
192 hxge_m_promisc,
193 hxge_m_multicst,
194 NULL,
195 NULL,
196 NULL,
197 hxge_m_ioctl,
198 hxge_m_getcapab,
199 NULL,
200 NULL,
201 hxge_m_setprop,
202 hxge_m_getprop,
203 hxge_m_propinfo
204 };
205
206 /* PSARC/2007/453 MSI-X interrupt limit override. */
207 #define HXGE_MSIX_REQUEST_10G 8
208 static int hxge_create_msi_property(p_hxge_t);
209
210 /* Enable debug messages as necessary. */
211 uint64_t hxge_debug_level = 0;
212
213 /*
214 * This list contains the instance structures for the Hydra
215 * devices present in the system. The lock exists to guarantee
216 * mutually exclusive access to the list.
217 */
218 void *hxge_list = NULL;
219 void *hxge_hw_list = NULL;
220 hxge_os_mutex_t hxge_common_lock;
221
222 extern uint64_t hpi_debug_level;
223
224 extern hxge_status_t hxge_ldgv_init(p_hxge_t, int *, int *);
225 extern hxge_status_t hxge_ldgv_uninit(p_hxge_t);
226 extern hxge_status_t hxge_intr_ldgv_init(p_hxge_t);
227 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
228 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
229 extern void hxge_fm_fini(p_hxge_t hxgep);
230
231 /*
232 * Count used to maintain the number of buffers being used
233 * by Hydra instances and loaned up to the upper layers.
234 */
235 uint32_t hxge_mblks_pending = 0;
236
237 /*
238 * Device register access attributes for PIO.
239 */
240 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
241 DDI_DEVICE_ATTR_V0,
242 DDI_STRUCTURE_LE_ACC,
243 DDI_STRICTORDER_ACC,
244 };
245
246 /*
247 * Device descriptor access attributes for DMA.
248 */
249 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
250 DDI_DEVICE_ATTR_V0,
251 DDI_STRUCTURE_LE_ACC,
252 DDI_STRICTORDER_ACC
253 };
254
255 /*
256 * Device buffer access attributes for DMA.
257 */
258 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
259 DDI_DEVICE_ATTR_V0,
260 DDI_STRUCTURE_BE_ACC,
261 DDI_STRICTORDER_ACC
262 };
263
264 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
265 DMA_ATTR_V0, /* version number. */
266 0, /* low address */
267 0xffffffffffffffff, /* high address */
268 0xffffffffffffffff, /* address counter max */
269 0x80000, /* alignment */
270 0xfc00fc, /* dlim_burstsizes */
271 0x1, /* minimum transfer size */
272 0xffffffffffffffff, /* maximum transfer size */
273 0xffffffffffffffff, /* maximum segment size */
274 1, /* scatter/gather list length */
275 (unsigned int)1, /* granularity */
276 0 /* attribute flags */
277 };
278
279 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
280 DMA_ATTR_V0, /* version number. */
281 0, /* low address */
282 0xffffffffffffffff, /* high address */
283 0xffffffffffffffff, /* address counter max */
284 0x100000, /* alignment */
285 0xfc00fc, /* dlim_burstsizes */
286 0x1, /* minimum transfer size */
287 0xffffffffffffffff, /* maximum transfer size */
288 0xffffffffffffffff, /* maximum segment size */
289 1, /* scatter/gather list length */
290 (unsigned int)1, /* granularity */
291 0 /* attribute flags */
292 };
293
294 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
295 DMA_ATTR_V0, /* version number. */
296 0, /* low address */
297 0xffffffffffffffff, /* high address */
298 0xffffffffffffffff, /* address counter max */
299 0x40000, /* alignment */
300 0xfc00fc, /* dlim_burstsizes */
301 0x1, /* minimum transfer size */
302 0xffffffffffffffff, /* maximum transfer size */
303 0xffffffffffffffff, /* maximum segment size */
304 1, /* scatter/gather list length */
305 (unsigned int)1, /* granularity */
306 0 /* attribute flags */
307 };
308
309 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
310 DMA_ATTR_V0, /* version number. */
311 0, /* low address */
312 0xffffffffffffffff, /* high address */
313 0xffffffffffffffff, /* address counter max */
314 #if defined(_BIG_ENDIAN)
315 0x2000, /* alignment */
316 #else
317 0x1000, /* alignment */
318 #endif
319 0xfc00fc, /* dlim_burstsizes */
320 0x1, /* minimum transfer size */
321 0xffffffffffffffff, /* maximum transfer size */
322 0xffffffffffffffff, /* maximum segment size */
323 5, /* scatter/gather list length */
324 (unsigned int)1, /* granularity */
325 0 /* attribute flags */
326 };
327
328 ddi_dma_attr_t hxge_tx_dma_attr = {
329 DMA_ATTR_V0, /* version number. */
330 0, /* low address */
331 0xffffffffffffffff, /* high address */
332 0xffffffffffffffff, /* address counter max */
333 #if defined(_BIG_ENDIAN)
334 0x2000, /* alignment */
335 #else
336 0x1000, /* alignment */
337 #endif
338 0xfc00fc, /* dlim_burstsizes */
339 0x1, /* minimum transfer size */
340 0xffffffffffffffff, /* maximum transfer size */
341 0xffffffffffffffff, /* maximum segment size */
342 5, /* scatter/gather list length */
343 (unsigned int)1, /* granularity */
344 0 /* attribute flags */
345 };
346
347 ddi_dma_attr_t hxge_rx_dma_attr = {
348 DMA_ATTR_V0, /* version number. */
349 0, /* low address */
350 0xffffffffffffffff, /* high address */
351 0xffffffffffffffff, /* address counter max */
352 0x10000, /* alignment */
353 0xfc00fc, /* dlim_burstsizes */
354 0x1, /* minimum transfer size */
355 0xffffffffffffffff, /* maximum transfer size */
356 0xffffffffffffffff, /* maximum segment size */
357 1, /* scatter/gather list length */
358 (unsigned int)1, /* granularity */
359 DDI_DMA_RELAXED_ORDERING /* attribute flags */
360 };
361
362 ddi_dma_lim_t hxge_dma_limits = {
363 (uint_t)0, /* dlim_addr_lo */
364 (uint_t)0xffffffff, /* dlim_addr_hi */
365 (uint_t)0xffffffff, /* dlim_cntr_max */
366 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
367 0x1, /* dlim_minxfer */
368 1024 /* dlim_speed */
369 };
370
371 dma_method_t hxge_force_dma = DVMA;
372
373 /*
374 * dma chunk sizes.
375 *
376 * Try to allocate the largest possible size
377 * so that fewer number of dma chunks would be managed
378 */
379 size_t alloc_sizes[] = {
380 0x1000, 0x2000, 0x4000, 0x8000,
381 0x10000, 0x20000, 0x40000, 0x80000,
382 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
383 };
384
385 /*
386 * Translate "dev_t" to a pointer to the associated "dev_info_t".
387 */
388 static int
hxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)389 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
390 {
391 p_hxge_t hxgep = NULL;
392 int instance;
393 int status = DDI_SUCCESS;
394 int i;
395
396 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
397
398 /*
399 * Get the device instance since we'll need to setup or retrieve a soft
400 * state for this instance.
401 */
402 instance = ddi_get_instance(dip);
403
404 switch (cmd) {
405 case DDI_ATTACH:
406 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
407 break;
408
409 case DDI_RESUME:
410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
411 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
412 if (hxgep == NULL) {
413 status = DDI_FAILURE;
414 break;
415 }
416 if (hxgep->dip != dip) {
417 status = DDI_FAILURE;
418 break;
419 }
420 if (hxgep->suspended == DDI_PM_SUSPEND) {
421 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
422 } else {
423 (void) hxge_resume(hxgep);
424 }
425 goto hxge_attach_exit;
426
427 case DDI_PM_RESUME:
428 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
429 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
430 if (hxgep == NULL) {
431 status = DDI_FAILURE;
432 break;
433 }
434 if (hxgep->dip != dip) {
435 status = DDI_FAILURE;
436 break;
437 }
438 (void) hxge_resume(hxgep);
439 goto hxge_attach_exit;
440
441 default:
442 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
443 status = DDI_FAILURE;
444 goto hxge_attach_exit;
445 }
446
447 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
448 status = DDI_FAILURE;
449 HXGE_ERROR_MSG((hxgep, DDI_CTL,
450 "ddi_soft_state_zalloc failed"));
451 goto hxge_attach_exit;
452 }
453
454 hxgep = ddi_get_soft_state(hxge_list, instance);
455 if (hxgep == NULL) {
456 status = HXGE_ERROR;
457 HXGE_ERROR_MSG((hxgep, DDI_CTL,
458 "ddi_get_soft_state failed"));
459 goto hxge_attach_fail2;
460 }
461
462 hxgep->drv_state = 0;
463 hxgep->dip = dip;
464 hxgep->instance = instance;
465 hxgep->p_dip = ddi_get_parent(dip);
466 hxgep->hxge_debug_level = hxge_debug_level;
467 hpi_debug_level = hxge_debug_level;
468
469 /*
470 * Initialize MMAC struture.
471 */
472 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
473 hxgep->mmac.available = hxgep->mmac.total;
474 for (i = 0; i < hxgep->mmac.total; i++) {
475 hxgep->mmac.addrs[i].set = B_FALSE;
476 hxgep->mmac.addrs[i].primary = B_FALSE;
477 }
478
479 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
480 &hxge_rx_dma_attr);
481
482 status = hxge_map_regs(hxgep);
483 if (status != HXGE_OK) {
484 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
485 goto hxge_attach_fail3;
486 }
487
488 status = hxge_init_common_dev(hxgep);
489 if (status != HXGE_OK) {
490 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
491 "hxge_init_common_dev failed"));
492 goto hxge_attach_fail4;
493 }
494
495 /*
496 * Setup the Ndd parameters for this instance.
497 */
498 hxge_init_param(hxgep);
499
500 /*
501 * Setup Register Tracing Buffer.
502 */
503 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
504
505 /* init stats ptr */
506 hxge_init_statsp(hxgep);
507
508 status = hxge_setup_mutexes(hxgep);
509 if (status != HXGE_OK) {
510 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
511 goto hxge_attach_fail;
512 }
513
514 /* Scrub the MSI-X memory */
515 hxge_msix_init(hxgep);
516
517 status = hxge_get_config_properties(hxgep);
518 if (status != HXGE_OK) {
519 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
520 goto hxge_attach_fail;
521 }
522
523 /*
524 * Setup the Kstats for the driver.
525 */
526 hxge_setup_kstats(hxgep);
527 hxge_setup_param(hxgep);
528
529 status = hxge_setup_system_dma_pages(hxgep);
530 if (status != HXGE_OK) {
531 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
532 goto hxge_attach_fail;
533 }
534
535 hxge_hw_id_init(hxgep);
536 hxge_hw_init_niu_common(hxgep);
537
538 status = hxge_setup_dev(hxgep);
539 if (status != DDI_SUCCESS) {
540 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
541 goto hxge_attach_fail;
542 }
543
544 status = hxge_add_intrs(hxgep);
545 if (status != DDI_SUCCESS) {
546 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
547 goto hxge_attach_fail;
548 }
549
550 /*
551 * Enable interrupts.
552 */
553 hxge_intrs_enable(hxgep);
554
555 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
556 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
557 "unable to register to mac layer (%d)", status));
558 goto hxge_attach_fail;
559 }
560 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
561
562 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
563 instance));
564
565 goto hxge_attach_exit;
566
567 hxge_attach_fail:
568 hxge_unattach(hxgep);
569 goto hxge_attach_fail1;
570
571 hxge_attach_fail5:
572 /*
573 * Tear down the ndd parameters setup.
574 */
575 hxge_destroy_param(hxgep);
576
577 /*
578 * Tear down the kstat setup.
579 */
580 hxge_destroy_kstats(hxgep);
581
582 hxge_attach_fail4:
583 if (hxgep->hxge_hw_p) {
584 hxge_uninit_common_dev(hxgep);
585 hxgep->hxge_hw_p = NULL;
586 }
587 hxge_attach_fail3:
588 /*
589 * Unmap the register setup.
590 */
591 hxge_unmap_regs(hxgep);
592
593 hxge_fm_fini(hxgep);
594
595 hxge_attach_fail2:
596 ddi_soft_state_free(hxge_list, hxgep->instance);
597
598 hxge_attach_fail1:
599 if (status != HXGE_OK)
600 status = (HXGE_ERROR | HXGE_DDI_FAILED);
601 hxgep = NULL;
602
603 hxge_attach_exit:
604 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
605 status));
606
607 return (status);
608 }
609
610 static int
hxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)611 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
612 {
613 int status = DDI_SUCCESS;
614 int instance;
615 p_hxge_t hxgep = NULL;
616
617 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
618 instance = ddi_get_instance(dip);
619 hxgep = ddi_get_soft_state(hxge_list, instance);
620 if (hxgep == NULL) {
621 status = DDI_FAILURE;
622 goto hxge_detach_exit;
623 }
624
625 switch (cmd) {
626 case DDI_DETACH:
627 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
628 break;
629
630 case DDI_PM_SUSPEND:
631 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
632 hxgep->suspended = DDI_PM_SUSPEND;
633 hxge_suspend(hxgep);
634 break;
635
636 case DDI_SUSPEND:
637 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
638 if (hxgep->suspended != DDI_PM_SUSPEND) {
639 hxgep->suspended = DDI_SUSPEND;
640 hxge_suspend(hxgep);
641 }
642 break;
643
644 default:
645 status = DDI_FAILURE;
646 break;
647 }
648
649 if (cmd != DDI_DETACH)
650 goto hxge_detach_exit;
651
652 /*
653 * Stop the xcvr polling.
654 */
655 hxgep->suspended = cmd;
656
657 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
658 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
659 "<== hxge_detach status = 0x%08X", status));
660 return (DDI_FAILURE);
661 }
662 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
663 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
664
665 hxge_unattach(hxgep);
666 hxgep = NULL;
667
668 hxge_detach_exit:
669 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
670 status));
671
672 return (status);
673 }
674
675 static void
hxge_unattach(p_hxge_t hxgep)676 hxge_unattach(p_hxge_t hxgep)
677 {
678 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
679
680 if (hxgep == NULL || hxgep->dev_regs == NULL) {
681 return;
682 }
683
684 if (hxgep->hxge_hw_p) {
685 hxge_uninit_common_dev(hxgep);
686 hxgep->hxge_hw_p = NULL;
687 }
688
689 if (hxgep->hxge_timerid) {
690 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
691 hxgep->hxge_timerid = 0;
692 }
693
694 /* Stop interrupts. */
695 hxge_intrs_disable(hxgep);
696
697 /* Stop any further interrupts. */
698 hxge_remove_intrs(hxgep);
699
700 /* Stop the device and free resources. */
701 hxge_destroy_dev(hxgep);
702
703 /* Tear down the ndd parameters setup. */
704 hxge_destroy_param(hxgep);
705
706 /* Tear down the kstat setup. */
707 hxge_destroy_kstats(hxgep);
708
709 /*
710 * Remove the list of ndd parameters which were setup during attach.
711 */
712 if (hxgep->dip) {
713 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
714 " hxge_unattach: remove all properties"));
715 (void) ddi_prop_remove_all(hxgep->dip);
716 }
717
718 /*
719 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
720 * previous state before unmapping the registers.
721 */
722 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
723 HXGE_DELAY(1000);
724
725 /*
726 * Unmap the register setup.
727 */
728 hxge_unmap_regs(hxgep);
729
730 hxge_fm_fini(hxgep);
731
732 /* Destroy all mutexes. */
733 hxge_destroy_mutexes(hxgep);
734
735 /*
736 * Free the soft state data structures allocated with this instance.
737 */
738 ddi_soft_state_free(hxge_list, hxgep->instance);
739
740 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
741 }
742
743 static hxge_status_t
hxge_map_regs(p_hxge_t hxgep)744 hxge_map_regs(p_hxge_t hxgep)
745 {
746 int ddi_status = DDI_SUCCESS;
747 p_dev_regs_t dev_regs;
748
749 #ifdef HXGE_DEBUG
750 char *sysname;
751 #endif
752
753 off_t regsize;
754 hxge_status_t status = HXGE_OK;
755 int nregs;
756
757 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
758
759 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
760 return (HXGE_ERROR);
761
762 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
763
764 hxgep->dev_regs = NULL;
765 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
766 dev_regs->hxge_regh = NULL;
767 dev_regs->hxge_pciregh = NULL;
768 dev_regs->hxge_msix_regh = NULL;
769
770 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
771 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
772 "hxge_map_regs: pci config size 0x%x", regsize));
773
774 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
775 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
776 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
777 if (ddi_status != DDI_SUCCESS) {
778 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
779 "ddi_map_regs, hxge bus config regs failed"));
780 goto hxge_map_regs_fail0;
781 }
782
783 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
784 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
785 dev_regs->hxge_pciregp,
786 dev_regs->hxge_pciregh));
787
788 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
789 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
790 "hxge_map_regs: pio size 0x%x", regsize));
791
792 /* set up the device mapped register */
793 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
794 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
795 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
796
797 if (ddi_status != DDI_SUCCESS) {
798 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
799 "ddi_map_regs for Hydra global reg failed"));
800 goto hxge_map_regs_fail1;
801 }
802
803 /* set up the msi/msi-x mapped register */
804 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
805 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
806 "hxge_map_regs: msix size 0x%x", regsize));
807
808 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
809 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
810 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
811
812 if (ddi_status != DDI_SUCCESS) {
813 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
814 "ddi_map_regs for msi reg failed"));
815 goto hxge_map_regs_fail2;
816 }
817
818 hxgep->dev_regs = dev_regs;
819
820 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
821 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
822 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
823 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
824
825 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
826 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
827
828 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
829 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
830
831 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
832 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
833
834 goto hxge_map_regs_exit;
835
836 hxge_map_regs_fail3:
837 if (dev_regs->hxge_msix_regh) {
838 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
839 }
840
841 hxge_map_regs_fail2:
842 if (dev_regs->hxge_regh) {
843 ddi_regs_map_free(&dev_regs->hxge_regh);
844 }
845
846 hxge_map_regs_fail1:
847 if (dev_regs->hxge_pciregh) {
848 ddi_regs_map_free(&dev_regs->hxge_pciregh);
849 }
850
851 hxge_map_regs_fail0:
852 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
853 kmem_free(dev_regs, sizeof (dev_regs_t));
854
855 hxge_map_regs_exit:
856 if (ddi_status != DDI_SUCCESS)
857 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
858 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
859 return (status);
860 }
861
862 static void
hxge_unmap_regs(p_hxge_t hxgep)863 hxge_unmap_regs(p_hxge_t hxgep)
864 {
865 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
866 if (hxgep->dev_regs) {
867 if (hxgep->dev_regs->hxge_pciregh) {
868 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
869 "==> hxge_unmap_regs: bus"));
870 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
871 hxgep->dev_regs->hxge_pciregh = NULL;
872 }
873
874 if (hxgep->dev_regs->hxge_regh) {
875 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
876 "==> hxge_unmap_regs: device registers"));
877 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
878 hxgep->dev_regs->hxge_regh = NULL;
879 }
880
881 if (hxgep->dev_regs->hxge_msix_regh) {
882 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
883 "==> hxge_unmap_regs: device interrupts"));
884 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
885 hxgep->dev_regs->hxge_msix_regh = NULL;
886 }
887 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
888 hxgep->dev_regs = NULL;
889 }
890 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
891 }
892
893 static hxge_status_t
hxge_setup_mutexes(p_hxge_t hxgep)894 hxge_setup_mutexes(p_hxge_t hxgep)
895 {
896 int ddi_status = DDI_SUCCESS;
897 hxge_status_t status = HXGE_OK;
898
899 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
900
901 /*
902 * Get the interrupt cookie so the mutexes can be Initialised.
903 */
904 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
905 &hxgep->interrupt_cookie);
906
907 if (ddi_status != DDI_SUCCESS) {
908 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
909 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
910 goto hxge_setup_mutexes_exit;
911 }
912
913 /*
914 * Initialize mutex's for this device.
915 */
916 MUTEX_INIT(hxgep->genlock, NULL,
917 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
918 MUTEX_INIT(&hxgep->vmac_lock, NULL,
919 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
920 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
921 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
922 RW_INIT(&hxgep->filter_lock, NULL,
923 RW_DRIVER, (void *) hxgep->interrupt_cookie);
924 MUTEX_INIT(&hxgep->pio_lock, NULL,
925 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
926 MUTEX_INIT(&hxgep->timeout.lock, NULL,
927 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
928
929 hxge_setup_mutexes_exit:
930 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
931 "<== hxge_setup_mutexes status = %x", status));
932
933 if (ddi_status != DDI_SUCCESS)
934 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
935
936 return (status);
937 }
938
939 static void
hxge_destroy_mutexes(p_hxge_t hxgep)940 hxge_destroy_mutexes(p_hxge_t hxgep)
941 {
942 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
943 RW_DESTROY(&hxgep->filter_lock);
944 MUTEX_DESTROY(&hxgep->vmac_lock);
945 MUTEX_DESTROY(&hxgep->ouraddr_lock);
946 MUTEX_DESTROY(hxgep->genlock);
947 MUTEX_DESTROY(&hxgep->pio_lock);
948 MUTEX_DESTROY(&hxgep->timeout.lock);
949
950 if (hxge_debug_init == 1) {
951 MUTEX_DESTROY(&hxgedebuglock);
952 hxge_debug_init = 0;
953 }
954
955 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
956 }
957
958 hxge_status_t
hxge_init(p_hxge_t hxgep)959 hxge_init(p_hxge_t hxgep)
960 {
961 hxge_status_t status = HXGE_OK;
962
963 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
964
965 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
966 return (status);
967 }
968
969 /*
970 * Allocate system memory for the receive/transmit buffer blocks and
971 * receive/transmit descriptor rings.
972 */
973 status = hxge_alloc_mem_pool(hxgep);
974 if (status != HXGE_OK) {
975 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
976 goto hxge_init_fail1;
977 }
978
979 /*
980 * Initialize and enable TXDMA channels.
981 */
982 status = hxge_init_txdma_channels(hxgep);
983 if (status != HXGE_OK) {
984 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
985 goto hxge_init_fail3;
986 }
987
988 /*
989 * Initialize and enable RXDMA channels.
990 */
991 status = hxge_init_rxdma_channels(hxgep);
992 if (status != HXGE_OK) {
993 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
994 goto hxge_init_fail4;
995 }
996
997 /*
998 * Initialize TCAM
999 */
1000 status = hxge_classify_init(hxgep);
1001 if (status != HXGE_OK) {
1002 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1003 goto hxge_init_fail5;
1004 }
1005
1006 /*
1007 * Initialize the VMAC block.
1008 */
1009 status = hxge_vmac_init(hxgep);
1010 if (status != HXGE_OK) {
1011 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1012 goto hxge_init_fail5;
1013 }
1014
1015 /* Bringup - this may be unnecessary when PXE and FCODE available */
1016 status = hxge_pfc_set_default_mac_addr(hxgep);
1017 if (status != HXGE_OK) {
1018 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1019 "Default Address Failure\n"));
1020 goto hxge_init_fail5;
1021 }
1022
1023 /*
1024 * Enable hardware interrupts.
1025 */
1026 hxge_intr_hw_enable(hxgep);
1027 hxgep->drv_state |= STATE_HW_INITIALIZED;
1028
1029 goto hxge_init_exit;
1030
1031 hxge_init_fail5:
1032 hxge_uninit_rxdma_channels(hxgep);
1033 hxge_init_fail4:
1034 hxge_uninit_txdma_channels(hxgep);
1035 hxge_init_fail3:
1036 hxge_free_mem_pool(hxgep);
1037 hxge_init_fail1:
1038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1039 "<== hxge_init status (failed) = 0x%08x", status));
1040 return (status);
1041
1042 hxge_init_exit:
1043
1044 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1045 status));
1046
1047 return (status);
1048 }
1049
1050 timeout_id_t
hxge_start_timer(p_hxge_t hxgep,fptrv_t func,int msec)1051 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1052 {
1053 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1054 return (timeout(func, (caddr_t)hxgep,
1055 drv_usectohz(1000 * msec)));
1056 }
1057 return (NULL);
1058 }
1059
1060 /*ARGSUSED*/
1061 void
hxge_stop_timer(p_hxge_t hxgep,timeout_id_t timerid)1062 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1063 {
1064 if (timerid) {
1065 (void) untimeout(timerid);
1066 }
1067 }
1068
1069 void
hxge_uninit(p_hxge_t hxgep)1070 hxge_uninit(p_hxge_t hxgep)
1071 {
1072 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1073
1074 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1075 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1076 "==> hxge_uninit: not initialized"));
1077 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1078 return;
1079 }
1080
1081 /* Stop timer */
1082 if (hxgep->hxge_timerid) {
1083 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1084 hxgep->hxge_timerid = 0;
1085 }
1086
1087 (void) hxge_intr_hw_disable(hxgep);
1088
1089 /* Reset the receive VMAC side. */
1090 (void) hxge_rx_vmac_disable(hxgep);
1091
1092 /* Free classification resources */
1093 (void) hxge_classify_uninit(hxgep);
1094
1095 /* Reset the transmit/receive DMA side. */
1096 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1097 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1098
1099 hxge_uninit_txdma_channels(hxgep);
1100 hxge_uninit_rxdma_channels(hxgep);
1101
1102 /* Reset the transmit VMAC side. */
1103 (void) hxge_tx_vmac_disable(hxgep);
1104
1105 hxge_free_mem_pool(hxgep);
1106
1107 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1108
1109 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1110 }
1111
1112 /*ARGSUSED*/
1113 /*VARARGS*/
1114 void
hxge_debug_msg(p_hxge_t hxgep,uint64_t level,char * fmt,...)1115 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1116 {
1117 char msg_buffer[1048];
1118 char prefix_buffer[32];
1119 int instance;
1120 uint64_t debug_level;
1121 int cmn_level = CE_CONT;
1122 va_list ap;
1123
1124 debug_level = (hxgep == NULL) ? hxge_debug_level :
1125 hxgep->hxge_debug_level;
1126
1127 if ((level & debug_level) || (level == HXGE_NOTE) ||
1128 (level == HXGE_ERR_CTL)) {
1129 /* do the msg processing */
1130 if (hxge_debug_init == 0) {
1131 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1132 hxge_debug_init = 1;
1133 }
1134
1135 MUTEX_ENTER(&hxgedebuglock);
1136
1137 if ((level & HXGE_NOTE)) {
1138 cmn_level = CE_NOTE;
1139 }
1140
1141 if (level & HXGE_ERR_CTL) {
1142 cmn_level = CE_WARN;
1143 }
1144
1145 va_start(ap, fmt);
1146 (void) vsprintf(msg_buffer, fmt, ap);
1147 va_end(ap);
1148
1149 if (hxgep == NULL) {
1150 instance = -1;
1151 (void) sprintf(prefix_buffer, "%s :", "hxge");
1152 } else {
1153 instance = hxgep->instance;
1154 (void) sprintf(prefix_buffer,
1155 "%s%d :", "hxge", instance);
1156 }
1157
1158 MUTEX_EXIT(&hxgedebuglock);
1159 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1160 }
1161 }
1162
1163 char *
hxge_dump_packet(char * addr,int size)1164 hxge_dump_packet(char *addr, int size)
1165 {
1166 uchar_t *ap = (uchar_t *)addr;
1167 int i;
1168 static char etherbuf[1024];
1169 char *cp = etherbuf;
1170 char digits[] = "0123456789abcdef";
1171
1172 if (!size)
1173 size = 60;
1174
1175 if (size > MAX_DUMP_SZ) {
1176 /* Dump the leading bytes */
1177 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1178 if (*ap > 0x0f)
1179 *cp++ = digits[*ap >> 4];
1180 *cp++ = digits[*ap++ & 0xf];
1181 *cp++ = ':';
1182 }
1183 for (i = 0; i < 20; i++)
1184 *cp++ = '.';
1185 /* Dump the last MAX_DUMP_SZ/2 bytes */
1186 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1187 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1188 if (*ap > 0x0f)
1189 *cp++ = digits[*ap >> 4];
1190 *cp++ = digits[*ap++ & 0xf];
1191 *cp++ = ':';
1192 }
1193 } else {
1194 for (i = 0; i < size; i++) {
1195 if (*ap > 0x0f)
1196 *cp++ = digits[*ap >> 4];
1197 *cp++ = digits[*ap++ & 0xf];
1198 *cp++ = ':';
1199 }
1200 }
1201 *--cp = 0;
1202 return (etherbuf);
1203 }
1204
1205 static void
hxge_suspend(p_hxge_t hxgep)1206 hxge_suspend(p_hxge_t hxgep)
1207 {
1208 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1209
1210 /*
1211 * Stop the link status timer before hxge_intrs_disable() to avoid
1212 * accessing the the MSIX table simultaneously. Note that the timer
1213 * routine polls for MSIX parity errors.
1214 */
1215 MUTEX_ENTER(&hxgep->timeout.lock);
1216 if (hxgep->timeout.id)
1217 (void) untimeout(hxgep->timeout.id);
1218 MUTEX_EXIT(&hxgep->timeout.lock);
1219
1220 hxge_intrs_disable(hxgep);
1221 hxge_destroy_dev(hxgep);
1222
1223 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1224 }
1225
1226 static hxge_status_t
hxge_resume(p_hxge_t hxgep)1227 hxge_resume(p_hxge_t hxgep)
1228 {
1229 hxge_status_t status = HXGE_OK;
1230
1231 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1232 hxgep->suspended = DDI_RESUME;
1233
1234 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1235 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1236
1237 (void) hxge_rx_vmac_enable(hxgep);
1238 (void) hxge_tx_vmac_enable(hxgep);
1239
1240 hxge_intrs_enable(hxgep);
1241
1242 hxgep->suspended = 0;
1243
1244 /*
1245 * Resume the link status timer after hxge_intrs_enable to avoid
1246 * accessing MSIX table simultaneously.
1247 */
1248 MUTEX_ENTER(&hxgep->timeout.lock);
1249 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1250 hxgep->timeout.ticks);
1251 MUTEX_EXIT(&hxgep->timeout.lock);
1252
1253 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1254 "<== hxge_resume status = 0x%x", status));
1255
1256 return (status);
1257 }
1258
1259 static hxge_status_t
hxge_setup_dev(p_hxge_t hxgep)1260 hxge_setup_dev(p_hxge_t hxgep)
1261 {
1262 hxge_status_t status = HXGE_OK;
1263
1264 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1265
1266 status = hxge_link_init(hxgep);
1267 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1268 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1269 "Bad register acc handle"));
1270 status = HXGE_ERROR;
1271 }
1272
1273 if (status != HXGE_OK) {
1274 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1275 " hxge_setup_dev status (link init 0x%08x)", status));
1276 goto hxge_setup_dev_exit;
1277 }
1278
1279 hxge_setup_dev_exit:
1280 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1281 "<== hxge_setup_dev status = 0x%08x", status));
1282
1283 return (status);
1284 }
1285
1286 static void
hxge_destroy_dev(p_hxge_t hxgep)1287 hxge_destroy_dev(p_hxge_t hxgep)
1288 {
1289 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1290
1291 (void) hxge_hw_stop(hxgep);
1292
1293 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1294 }
1295
1296 static hxge_status_t
hxge_setup_system_dma_pages(p_hxge_t hxgep)1297 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1298 {
1299 int ddi_status = DDI_SUCCESS;
1300 uint_t count;
1301 ddi_dma_cookie_t cookie;
1302 uint_t iommu_pagesize;
1303 hxge_status_t status = HXGE_OK;
1304
1305 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1306
1307 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1308 iommu_pagesize = dvma_pagesize(hxgep->dip);
1309
1310 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1311 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1312 " default_block_size %d iommu_pagesize %d",
1313 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1314 hxgep->rx_default_block_size, iommu_pagesize));
1315
1316 if (iommu_pagesize != 0) {
1317 if (hxgep->sys_page_sz == iommu_pagesize) {
1318 /* Hydra support up to 8K pages */
1319 if (iommu_pagesize > 0x2000)
1320 hxgep->sys_page_sz = 0x2000;
1321 } else {
1322 if (hxgep->sys_page_sz > iommu_pagesize)
1323 hxgep->sys_page_sz = iommu_pagesize;
1324 }
1325 }
1326
1327 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1328
1329 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1330 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1331 "default_block_size %d page mask %d",
1332 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1333 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1334
1335 switch (hxgep->sys_page_sz) {
1336 default:
1337 hxgep->sys_page_sz = 0x1000;
1338 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1339 hxgep->rx_default_block_size = 0x1000;
1340 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1341 break;
1342 case 0x1000:
1343 hxgep->rx_default_block_size = 0x1000;
1344 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1345 break;
1346 case 0x2000:
1347 hxgep->rx_default_block_size = 0x2000;
1348 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1349 break;
1350 }
1351
1352 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1353 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1354
1355 /*
1356 * Get the system DMA burst size.
1357 */
1358 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1359 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1360 if (ddi_status != DDI_SUCCESS) {
1361 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1362 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1363 goto hxge_get_soft_properties_exit;
1364 }
1365
1366 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1367 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1368 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1369 &cookie, &count);
1370 if (ddi_status != DDI_DMA_MAPPED) {
1371 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1372 "Binding spare handle to find system burstsize failed."));
1373 ddi_status = DDI_FAILURE;
1374 goto hxge_get_soft_properties_fail1;
1375 }
1376
1377 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1378 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1379
1380 hxge_get_soft_properties_fail1:
1381 ddi_dma_free_handle(&hxgep->dmasparehandle);
1382
1383 hxge_get_soft_properties_exit:
1384
1385 if (ddi_status != DDI_SUCCESS)
1386 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1387
1388 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1389 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1390
1391 return (status);
1392 }
1393
1394 static hxge_status_t
hxge_alloc_mem_pool(p_hxge_t hxgep)1395 hxge_alloc_mem_pool(p_hxge_t hxgep)
1396 {
1397 hxge_status_t status = HXGE_OK;
1398
1399 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1400
1401 status = hxge_alloc_rx_mem_pool(hxgep);
1402 if (status != HXGE_OK) {
1403 return (HXGE_ERROR);
1404 }
1405
1406 status = hxge_alloc_tx_mem_pool(hxgep);
1407 if (status != HXGE_OK) {
1408 hxge_free_rx_mem_pool(hxgep);
1409 return (HXGE_ERROR);
1410 }
1411
1412 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1413 return (HXGE_OK);
1414 }
1415
1416 static void
hxge_free_mem_pool(p_hxge_t hxgep)1417 hxge_free_mem_pool(p_hxge_t hxgep)
1418 {
1419 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1420
1421 hxge_free_rx_mem_pool(hxgep);
1422 hxge_free_tx_mem_pool(hxgep);
1423
1424 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1425 }
1426
1427 static hxge_status_t
hxge_alloc_rx_mem_pool(p_hxge_t hxgep)1428 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1429 {
1430 int i, j;
1431 uint32_t ndmas, st_rdc;
1432 p_hxge_dma_pt_cfg_t p_all_cfgp;
1433 p_hxge_hw_pt_cfg_t p_cfgp;
1434 p_hxge_dma_pool_t dma_poolp;
1435 p_hxge_dma_common_t *dma_buf_p;
1436 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1437 p_hxge_dma_common_t *dma_rbr_cntl_p;
1438 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1439 p_hxge_dma_common_t *dma_rcr_cntl_p;
1440 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1441 p_hxge_dma_common_t *dma_mbox_cntl_p;
1442 size_t rx_buf_alloc_size;
1443 size_t rx_rbr_cntl_alloc_size;
1444 size_t rx_rcr_cntl_alloc_size;
1445 size_t rx_mbox_cntl_alloc_size;
1446 uint32_t *num_chunks; /* per dma */
1447 hxge_status_t status = HXGE_OK;
1448
1449 uint32_t hxge_port_rbr_size;
1450 uint32_t hxge_port_rbr_spare_size;
1451 uint32_t hxge_port_rcr_size;
1452
1453 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1454
1455 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1456 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1457 st_rdc = p_cfgp->start_rdc;
1458 ndmas = p_cfgp->max_rdcs;
1459
1460 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1461 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1462
1463 /*
1464 * Allocate memory for each receive DMA channel.
1465 */
1466 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1467 KM_SLEEP);
1468 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1469 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1470
1471 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1472 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1473 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1474 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1475 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1476 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1477 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1478 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1479 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1480 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1481 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1482 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1483
1484 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1485 KM_SLEEP);
1486
1487 /*
1488 * Assume that each DMA channel will be configured with default block
1489 * size. rbr block counts are mod of batch count (16).
1490 */
1491 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1492 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1493
1494 if (!hxge_port_rbr_size) {
1495 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1496 }
1497
1498 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1499 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1500 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1501 }
1502
1503 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1504 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1505
1506 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1507 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1508 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1509 }
1510
1511 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1512 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1513
1514 /*
1515 * Addresses of receive block ring, receive completion ring and the
1516 * mailbox must be all cache-aligned (64 bytes).
1517 */
1518 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1519 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1520 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1521 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1522
1523 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1524 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1525 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1526 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1527 hxge_port_rcr_size, rx_cntl_alloc_size));
1528
1529 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1530 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1531
1532 /*
1533 * Allocate memory for receive buffers and descriptor rings. Replace
1534 * allocation functions with interface functions provided by the
1535 * partition manager when it is available.
1536 */
1537 /*
1538 * Allocate memory for the receive buffer blocks.
1539 */
1540 for (i = 0; i < ndmas; i++) {
1541 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1542 " hxge_alloc_rx_mem_pool to alloc mem: "
1543 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1544 i, dma_buf_p[i], &dma_buf_p[i]));
1545
1546 num_chunks[i] = 0;
1547
1548 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1549 rx_buf_alloc_size, hxgep->rx_default_block_size,
1550 &num_chunks[i]);
1551 if (status != HXGE_OK) {
1552 break;
1553 }
1554
1555 st_rdc++;
1556 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1557 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1558 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1559 dma_buf_p[i], &dma_buf_p[i]));
1560 }
1561
1562 if (i < ndmas) {
1563 goto hxge_alloc_rx_mem_fail1;
1564 }
1565
1566 /*
1567 * Allocate memory for descriptor rings and mailbox.
1568 */
1569 st_rdc = p_cfgp->start_rdc;
1570 for (j = 0; j < ndmas; j++) {
1571 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1572 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1573 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1574 break;
1575 }
1576
1577 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1578 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1579 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1580 break;
1581 }
1582
1583 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1584 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1585 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1586 break;
1587 }
1588 st_rdc++;
1589 }
1590
1591 if (j < ndmas) {
1592 goto hxge_alloc_rx_mem_fail2;
1593 }
1594
1595 dma_poolp->ndmas = ndmas;
1596 dma_poolp->num_chunks = num_chunks;
1597 dma_poolp->buf_allocated = B_TRUE;
1598 hxgep->rx_buf_pool_p = dma_poolp;
1599 dma_poolp->dma_buf_pool_p = dma_buf_p;
1600
1601 dma_rbr_cntl_poolp->ndmas = ndmas;
1602 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1603 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1604 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1605
1606 dma_rcr_cntl_poolp->ndmas = ndmas;
1607 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1608 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1609 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1610
1611 dma_mbox_cntl_poolp->ndmas = ndmas;
1612 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1613 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1614 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1615
1616 goto hxge_alloc_rx_mem_pool_exit;
1617
1618 hxge_alloc_rx_mem_fail2:
1619 /* Free control buffers */
1620 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1621 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1622 for (; j >= 0; j--) {
1623 hxge_free_rx_cntl_dma(hxgep,
1624 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1625 hxge_free_rx_cntl_dma(hxgep,
1626 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1627 hxge_free_rx_cntl_dma(hxgep,
1628 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1629 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1630 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1631 }
1632 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1633 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1634
1635 hxge_alloc_rx_mem_fail1:
1636 /* Free data buffers */
1637 i--;
1638 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1639 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1640 for (; i >= 0; i--) {
1641 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1642 num_chunks[i]);
1643 }
1644 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1645 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1646
1647 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1648 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1649 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1650 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1651 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1652 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1653 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1654 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1655 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1656
1657 hxge_alloc_rx_mem_pool_exit:
1658 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1659 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1660
1661 return (status);
1662 }
1663
1664 static void
hxge_free_rx_mem_pool(p_hxge_t hxgep)1665 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1666 {
1667 uint32_t i, ndmas;
1668 p_hxge_dma_pool_t dma_poolp;
1669 p_hxge_dma_common_t *dma_buf_p;
1670 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1671 p_hxge_dma_common_t *dma_rbr_cntl_p;
1672 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1673 p_hxge_dma_common_t *dma_rcr_cntl_p;
1674 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1675 p_hxge_dma_common_t *dma_mbox_cntl_p;
1676 uint32_t *num_chunks;
1677
1678 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1679
1680 dma_poolp = hxgep->rx_buf_pool_p;
1681 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1682 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1683 "(null rx buf pool or buf not allocated"));
1684 return;
1685 }
1686
1687 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1688 if (dma_rbr_cntl_poolp == NULL ||
1689 (!dma_rbr_cntl_poolp->buf_allocated)) {
1690 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1691 "<== hxge_free_rx_mem_pool "
1692 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1693 return;
1694 }
1695
1696 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1697 if (dma_rcr_cntl_poolp == NULL ||
1698 (!dma_rcr_cntl_poolp->buf_allocated)) {
1699 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1700 "<== hxge_free_rx_mem_pool "
1701 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1702 return;
1703 }
1704
1705 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1706 if (dma_mbox_cntl_poolp == NULL ||
1707 (!dma_mbox_cntl_poolp->buf_allocated)) {
1708 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1709 "<== hxge_free_rx_mem_pool "
1710 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1711 return;
1712 }
1713
1714 dma_buf_p = dma_poolp->dma_buf_pool_p;
1715 num_chunks = dma_poolp->num_chunks;
1716
1717 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1718 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1719 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1720 ndmas = dma_rbr_cntl_poolp->ndmas;
1721
1722 for (i = 0; i < ndmas; i++) {
1723 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1724 }
1725
1726 for (i = 0; i < ndmas; i++) {
1727 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1728 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1729 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1730 }
1731
1732 for (i = 0; i < ndmas; i++) {
1733 KMEM_FREE(dma_buf_p[i],
1734 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1735 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1736 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1737 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1738 }
1739
1740 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1741 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1742 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1743 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1744 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1745 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1746 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1747 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1748 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1749
1750 hxgep->rx_buf_pool_p = NULL;
1751 hxgep->rx_rbr_cntl_pool_p = NULL;
1752 hxgep->rx_rcr_cntl_pool_p = NULL;
1753 hxgep->rx_mbox_cntl_pool_p = NULL;
1754
1755 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1756 }
1757
1758 static hxge_status_t
hxge_alloc_rx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)1759 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1760 p_hxge_dma_common_t *dmap,
1761 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1762 {
1763 p_hxge_dma_common_t rx_dmap;
1764 hxge_status_t status = HXGE_OK;
1765 size_t total_alloc_size;
1766 size_t allocated = 0;
1767 int i, size_index, array_size;
1768
1769 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1770
1771 rx_dmap = (p_hxge_dma_common_t)
1772 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1773
1774 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1775 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1776 dma_channel, alloc_size, block_size, dmap));
1777
1778 total_alloc_size = alloc_size;
1779
1780 i = 0;
1781 size_index = 0;
1782 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1783 while ((size_index < array_size) &&
1784 (alloc_sizes[size_index] < alloc_size))
1785 size_index++;
1786 if (size_index >= array_size) {
1787 size_index = array_size - 1;
1788 }
1789
1790 while ((allocated < total_alloc_size) &&
1791 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1792 rx_dmap[i].dma_chunk_index = i;
1793 rx_dmap[i].block_size = block_size;
1794 rx_dmap[i].alength = alloc_sizes[size_index];
1795 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1796 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1797 rx_dmap[i].dma_channel = dma_channel;
1798 rx_dmap[i].contig_alloc_type = B_FALSE;
1799
1800 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1801 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1802 "i %d nblocks %d alength %d",
1803 dma_channel, i, &rx_dmap[i], block_size,
1804 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1805 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1806 &hxge_rx_dma_attr, rx_dmap[i].alength,
1807 &hxge_dev_buf_dma_acc_attr,
1808 DDI_DMA_READ | DDI_DMA_STREAMING,
1809 (p_hxge_dma_common_t)(&rx_dmap[i]));
1810 if (status != HXGE_OK) {
1811 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1812 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1813 " for size: %d", alloc_sizes[size_index]));
1814 size_index--;
1815 } else {
1816 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1817 " alloc_rx_buf_dma allocated rdc %d "
1818 "chunk %d size %x dvma %x bufp %llx ",
1819 dma_channel, i, rx_dmap[i].alength,
1820 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1821 i++;
1822 allocated += alloc_sizes[size_index];
1823 }
1824 }
1825
1826 if (allocated < total_alloc_size) {
1827 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1828 " hxge_alloc_rx_buf_dma failed due to"
1829 " allocated(%d) < required(%d)",
1830 allocated, total_alloc_size));
1831 goto hxge_alloc_rx_mem_fail1;
1832 }
1833
1834 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1835 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1836
1837 *num_chunks = i;
1838 *dmap = rx_dmap;
1839
1840 goto hxge_alloc_rx_mem_exit;
1841
1842 hxge_alloc_rx_mem_fail1:
1843 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1844
1845 hxge_alloc_rx_mem_exit:
1846 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1847 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1848
1849 return (status);
1850 }
1851
1852 /*ARGSUSED*/
1853 static void
hxge_free_rx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)1854 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1855 uint32_t num_chunks)
1856 {
1857 int i;
1858
1859 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1860 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1861
1862 for (i = 0; i < num_chunks; i++) {
1863 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1864 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1865 hxge_dma_mem_free(dmap++);
1866 }
1867
1868 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1869 }
1870
1871 /*ARGSUSED*/
1872 static hxge_status_t
hxge_alloc_rx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,struct ddi_dma_attr * attr,size_t size)1873 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1874 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1875 {
1876 p_hxge_dma_common_t rx_dmap;
1877 hxge_status_t status = HXGE_OK;
1878
1879 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1880
1881 rx_dmap = (p_hxge_dma_common_t)
1882 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1883
1884 rx_dmap->contig_alloc_type = B_FALSE;
1885
1886 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1887 attr, size, &hxge_dev_desc_dma_acc_attr,
1888 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1889 if (status != HXGE_OK) {
1890 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1891 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1892 " for size: %d", size));
1893 goto hxge_alloc_rx_cntl_dma_fail1;
1894 }
1895
1896 *dmap = rx_dmap;
1897
1898 goto hxge_alloc_rx_cntl_dma_exit;
1899
1900 hxge_alloc_rx_cntl_dma_fail1:
1901 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1902
1903 hxge_alloc_rx_cntl_dma_exit:
1904 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1905 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1906
1907 return (status);
1908 }
1909
1910 /*ARGSUSED*/
1911 static void
hxge_free_rx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)1912 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1913 {
1914 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1915
1916 hxge_dma_mem_free(dmap);
1917
1918 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1919 }
1920
1921 static hxge_status_t
hxge_alloc_tx_mem_pool(p_hxge_t hxgep)1922 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1923 {
1924 hxge_status_t status = HXGE_OK;
1925 int i, j;
1926 uint32_t ndmas, st_tdc;
1927 p_hxge_dma_pt_cfg_t p_all_cfgp;
1928 p_hxge_hw_pt_cfg_t p_cfgp;
1929 p_hxge_dma_pool_t dma_poolp;
1930 p_hxge_dma_common_t *dma_buf_p;
1931 p_hxge_dma_pool_t dma_cntl_poolp;
1932 p_hxge_dma_common_t *dma_cntl_p;
1933 size_t tx_buf_alloc_size;
1934 size_t tx_cntl_alloc_size;
1935 uint32_t *num_chunks; /* per dma */
1936
1937 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1938
1939 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1940 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1941 st_tdc = p_cfgp->start_tdc;
1942 ndmas = p_cfgp->max_tdcs;
1943
1944 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1945 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1946 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1947 /*
1948 * Allocate memory for each transmit DMA channel.
1949 */
1950 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1951 KM_SLEEP);
1952 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1953 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1954
1955 dma_cntl_poolp = (p_hxge_dma_pool_t)
1956 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1957 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1958 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1959
1960 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1961
1962 /*
1963 * Assume that each DMA channel will be configured with default
1964 * transmit bufer size for copying transmit data. (For packet payload
1965 * over this limit, packets will not be copied.)
1966 */
1967 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1968
1969 /*
1970 * Addresses of transmit descriptor ring and the mailbox must be all
1971 * cache-aligned (64 bytes).
1972 */
1973 tx_cntl_alloc_size = hxge_tx_ring_size;
1974 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1975 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1976
1977 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1978 KM_SLEEP);
1979
1980 /*
1981 * Allocate memory for transmit buffers and descriptor rings. Replace
1982 * allocation functions with interface functions provided by the
1983 * partition manager when it is available.
1984 *
1985 * Allocate memory for the transmit buffer pool.
1986 */
1987 for (i = 0; i < ndmas; i++) {
1988 num_chunks[i] = 0;
1989 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1990 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1991 if (status != HXGE_OK) {
1992 break;
1993 }
1994 st_tdc++;
1995 }
1996
1997 if (i < ndmas) {
1998 goto hxge_alloc_tx_mem_pool_fail1;
1999 }
2000
2001 st_tdc = p_cfgp->start_tdc;
2002
2003 /*
2004 * Allocate memory for descriptor rings and mailbox.
2005 */
2006 for (j = 0; j < ndmas; j++) {
2007 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2008 tx_cntl_alloc_size);
2009 if (status != HXGE_OK) {
2010 break;
2011 }
2012 st_tdc++;
2013 }
2014
2015 if (j < ndmas) {
2016 goto hxge_alloc_tx_mem_pool_fail2;
2017 }
2018
2019 dma_poolp->ndmas = ndmas;
2020 dma_poolp->num_chunks = num_chunks;
2021 dma_poolp->buf_allocated = B_TRUE;
2022 dma_poolp->dma_buf_pool_p = dma_buf_p;
2023 hxgep->tx_buf_pool_p = dma_poolp;
2024
2025 dma_cntl_poolp->ndmas = ndmas;
2026 dma_cntl_poolp->buf_allocated = B_TRUE;
2027 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2028 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2029
2030 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2031 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2032 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2033
2034 goto hxge_alloc_tx_mem_pool_exit;
2035
2036 hxge_alloc_tx_mem_pool_fail2:
2037 /* Free control buffers */
2038 j--;
2039 for (; j >= 0; j--) {
2040 hxge_free_tx_cntl_dma(hxgep,
2041 (p_hxge_dma_common_t)dma_cntl_p[j]);
2042 }
2043
2044 hxge_alloc_tx_mem_pool_fail1:
2045 /* Free data buffers */
2046 i--;
2047 for (; i >= 0; i--) {
2048 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2049 num_chunks[i]);
2050 }
2051
2052 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2053 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2054 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2055 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2056 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2057
2058 hxge_alloc_tx_mem_pool_exit:
2059 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2060 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2061
2062 return (status);
2063 }
2064
2065 static hxge_status_t
hxge_alloc_tx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2066 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2067 p_hxge_dma_common_t *dmap, size_t alloc_size,
2068 size_t block_size, uint32_t *num_chunks)
2069 {
2070 p_hxge_dma_common_t tx_dmap;
2071 hxge_status_t status = HXGE_OK;
2072 size_t total_alloc_size;
2073 size_t allocated = 0;
2074 int i, size_index, array_size;
2075
2076 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2077
2078 tx_dmap = (p_hxge_dma_common_t)
2079 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2080
2081 total_alloc_size = alloc_size;
2082 i = 0;
2083 size_index = 0;
2084 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2085 while ((size_index < array_size) &&
2086 (alloc_sizes[size_index] < alloc_size))
2087 size_index++;
2088 if (size_index >= array_size) {
2089 size_index = array_size - 1;
2090 }
2091
2092 while ((allocated < total_alloc_size) &&
2093 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2094 tx_dmap[i].dma_chunk_index = i;
2095 tx_dmap[i].block_size = block_size;
2096 tx_dmap[i].alength = alloc_sizes[size_index];
2097 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2098 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2099 tx_dmap[i].dma_channel = dma_channel;
2100 tx_dmap[i].contig_alloc_type = B_FALSE;
2101
2102 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2103 &hxge_tx_dma_attr, tx_dmap[i].alength,
2104 &hxge_dev_buf_dma_acc_attr,
2105 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2106 (p_hxge_dma_common_t)(&tx_dmap[i]));
2107 if (status != HXGE_OK) {
2108 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2109 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2110 " for size: %d", alloc_sizes[size_index]));
2111 size_index--;
2112 } else {
2113 i++;
2114 allocated += alloc_sizes[size_index];
2115 }
2116 }
2117
2118 if (allocated < total_alloc_size) {
2119 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2120 " hxge_alloc_tx_buf_dma: failed due to"
2121 " allocated(%d) < required(%d)",
2122 allocated, total_alloc_size));
2123 goto hxge_alloc_tx_mem_fail1;
2124 }
2125
2126 *num_chunks = i;
2127 *dmap = tx_dmap;
2128 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2129 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2130 *dmap, i));
2131 goto hxge_alloc_tx_mem_exit;
2132
2133 hxge_alloc_tx_mem_fail1:
2134 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2135
2136 hxge_alloc_tx_mem_exit:
2137 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2138 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2139
2140 return (status);
2141 }
2142
2143 /*ARGSUSED*/
2144 static void
hxge_free_tx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)2145 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2146 uint32_t num_chunks)
2147 {
2148 int i;
2149
2150 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2151
2152 for (i = 0; i < num_chunks; i++) {
2153 hxge_dma_mem_free(dmap++);
2154 }
2155
2156 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2157 }
2158
2159 /*ARGSUSED*/
2160 static hxge_status_t
hxge_alloc_tx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t size)2161 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2162 p_hxge_dma_common_t *dmap, size_t size)
2163 {
2164 p_hxge_dma_common_t tx_dmap;
2165 hxge_status_t status = HXGE_OK;
2166
2167 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2168
2169 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2170 KM_SLEEP);
2171
2172 tx_dmap->contig_alloc_type = B_FALSE;
2173
2174 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2175 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2176 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2177 if (status != HXGE_OK) {
2178 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2179 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2180 " for size: %d", size));
2181 goto hxge_alloc_tx_cntl_dma_fail1;
2182 }
2183
2184 *dmap = tx_dmap;
2185
2186 goto hxge_alloc_tx_cntl_dma_exit;
2187
2188 hxge_alloc_tx_cntl_dma_fail1:
2189 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2190
2191 hxge_alloc_tx_cntl_dma_exit:
2192 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2193 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2194
2195 return (status);
2196 }
2197
2198 /*ARGSUSED*/
2199 static void
hxge_free_tx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)2200 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2201 {
2202 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2203
2204 hxge_dma_mem_free(dmap);
2205
2206 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2207 }
2208
2209 static void
hxge_free_tx_mem_pool(p_hxge_t hxgep)2210 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2211 {
2212 uint32_t i, ndmas;
2213 p_hxge_dma_pool_t dma_poolp;
2214 p_hxge_dma_common_t *dma_buf_p;
2215 p_hxge_dma_pool_t dma_cntl_poolp;
2216 p_hxge_dma_common_t *dma_cntl_p;
2217 uint32_t *num_chunks;
2218
2219 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2220
2221 dma_poolp = hxgep->tx_buf_pool_p;
2222 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2223 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2224 "<== hxge_free_tx_mem_pool "
2225 "(null rx buf pool or buf not allocated"));
2226 return;
2227 }
2228
2229 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2230 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2231 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2232 "<== hxge_free_tx_mem_pool "
2233 "(null tx cntl buf pool or cntl buf not allocated"));
2234 return;
2235 }
2236
2237 dma_buf_p = dma_poolp->dma_buf_pool_p;
2238 num_chunks = dma_poolp->num_chunks;
2239
2240 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2241 ndmas = dma_cntl_poolp->ndmas;
2242
2243 for (i = 0; i < ndmas; i++) {
2244 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2245 }
2246
2247 for (i = 0; i < ndmas; i++) {
2248 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2249 }
2250
2251 for (i = 0; i < ndmas; i++) {
2252 KMEM_FREE(dma_buf_p[i],
2253 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2254 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2255 }
2256
2257 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2258 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2259 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2260 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2261 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2262
2263 hxgep->tx_buf_pool_p = NULL;
2264 hxgep->tx_cntl_pool_p = NULL;
2265
2266 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2267 }
2268
2269 /*ARGSUSED*/
2270 static hxge_status_t
hxge_dma_mem_alloc(p_hxge_t hxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_hxge_dma_common_t dma_p)2271 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2272 struct ddi_dma_attr *dma_attrp,
2273 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2274 p_hxge_dma_common_t dma_p)
2275 {
2276 caddr_t kaddrp;
2277 int ddi_status = DDI_SUCCESS;
2278
2279 dma_p->dma_handle = NULL;
2280 dma_p->acc_handle = NULL;
2281 dma_p->kaddrp = NULL;
2282
2283 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2284 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2285 if (ddi_status != DDI_SUCCESS) {
2286 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2287 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2288 return (HXGE_ERROR | HXGE_DDI_FAILED);
2289 }
2290
2291 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2292 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2293 &dma_p->acc_handle);
2294 if (ddi_status != DDI_SUCCESS) {
2295 /* The caller will decide whether it is fatal */
2296 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2297 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2298 ddi_dma_free_handle(&dma_p->dma_handle);
2299 dma_p->dma_handle = NULL;
2300 return (HXGE_ERROR | HXGE_DDI_FAILED);
2301 }
2302
2303 if (dma_p->alength < length) {
2304 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2305 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2306 ddi_dma_mem_free(&dma_p->acc_handle);
2307 ddi_dma_free_handle(&dma_p->dma_handle);
2308 dma_p->acc_handle = NULL;
2309 dma_p->dma_handle = NULL;
2310 return (HXGE_ERROR);
2311 }
2312
2313 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2314 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2315 &dma_p->dma_cookie, &dma_p->ncookies);
2316 if (ddi_status != DDI_DMA_MAPPED) {
2317 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2318 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2319 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2320 if (dma_p->acc_handle) {
2321 ddi_dma_mem_free(&dma_p->acc_handle);
2322 dma_p->acc_handle = NULL;
2323 }
2324 ddi_dma_free_handle(&dma_p->dma_handle);
2325 dma_p->dma_handle = NULL;
2326 return (HXGE_ERROR | HXGE_DDI_FAILED);
2327 }
2328
2329 if (dma_p->ncookies != 1) {
2330 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2331 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2332 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2333 if (dma_p->acc_handle) {
2334 ddi_dma_mem_free(&dma_p->acc_handle);
2335 dma_p->acc_handle = NULL;
2336 }
2337 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2338 ddi_dma_free_handle(&dma_p->dma_handle);
2339 dma_p->dma_handle = NULL;
2340 return (HXGE_ERROR);
2341 }
2342
2343 dma_p->kaddrp = kaddrp;
2344 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2345
2346 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2347
2348 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2349 "dma buffer allocated: dma_p $%p "
2350 "return dmac_ladress from cookie $%p dmac_size %d "
2351 "dma_p->ioaddr_p $%p "
2352 "dma_p->orig_ioaddr_p $%p "
2353 "orig_vatopa $%p "
2354 "alength %d (0x%x) "
2355 "kaddrp $%p "
2356 "length %d (0x%x)",
2357 dma_p,
2358 dma_p->dma_cookie.dmac_laddress,
2359 dma_p->dma_cookie.dmac_size,
2360 dma_p->ioaddr_pp,
2361 dma_p->orig_ioaddr_pp,
2362 dma_p->orig_vatopa,
2363 dma_p->alength, dma_p->alength,
2364 kaddrp,
2365 length, length));
2366
2367 return (HXGE_OK);
2368 }
2369
2370 static void
hxge_dma_mem_free(p_hxge_dma_common_t dma_p)2371 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2372 {
2373 if (dma_p == NULL)
2374 return;
2375
2376 if (dma_p->dma_handle != NULL) {
2377 if (dma_p->ncookies) {
2378 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2379 dma_p->ncookies = 0;
2380 }
2381 ddi_dma_free_handle(&dma_p->dma_handle);
2382 dma_p->dma_handle = NULL;
2383 }
2384
2385 if (dma_p->acc_handle != NULL) {
2386 ddi_dma_mem_free(&dma_p->acc_handle);
2387 dma_p->acc_handle = NULL;
2388 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2389 }
2390
2391 dma_p->kaddrp = NULL;
2392 dma_p->alength = 0;
2393 }
2394
2395 /*
2396 * hxge_m_start() -- start transmitting and receiving.
2397 *
2398 * This function is called by the MAC layer when the first
2399 * stream is open to prepare the hardware ready for sending
2400 * and transmitting packets.
2401 */
2402 static int
hxge_m_start(void * arg)2403 hxge_m_start(void *arg)
2404 {
2405 p_hxge_t hxgep = (p_hxge_t)arg;
2406
2407 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2408
2409 MUTEX_ENTER(hxgep->genlock);
2410
2411 if (hxge_init(hxgep) != DDI_SUCCESS) {
2412 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2413 "<== hxge_m_start: initialization failed"));
2414 MUTEX_EXIT(hxgep->genlock);
2415 return (EIO);
2416 }
2417
2418 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2419 /*
2420 * Start timer to check the system error and tx hangs
2421 */
2422 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2423 hxge_check_hw_state, HXGE_CHECK_TIMER);
2424
2425 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2426
2427 hxgep->timeout.link_status = 0;
2428 hxgep->timeout.report_link_status = B_TRUE;
2429 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2430
2431 /* Start the link status timer to check the link status */
2432 MUTEX_ENTER(&hxgep->timeout.lock);
2433 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2434 hxgep->timeout.ticks);
2435 MUTEX_EXIT(&hxgep->timeout.lock);
2436 }
2437
2438 MUTEX_EXIT(hxgep->genlock);
2439
2440 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2441
2442 return (0);
2443 }
2444
2445 /*
2446 * hxge_m_stop(): stop transmitting and receiving.
2447 */
2448 static void
hxge_m_stop(void * arg)2449 hxge_m_stop(void *arg)
2450 {
2451 p_hxge_t hxgep = (p_hxge_t)arg;
2452
2453 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2454
2455 if (hxgep->hxge_timerid) {
2456 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2457 hxgep->hxge_timerid = 0;
2458 }
2459
2460 /* Stop the link status timer before unregistering */
2461 MUTEX_ENTER(&hxgep->timeout.lock);
2462 if (hxgep->timeout.id) {
2463 (void) untimeout(hxgep->timeout.id);
2464 hxgep->timeout.id = 0;
2465 }
2466 hxge_link_update(hxgep, LINK_STATE_DOWN);
2467 MUTEX_EXIT(&hxgep->timeout.lock);
2468
2469 MUTEX_ENTER(hxgep->genlock);
2470
2471 hxge_uninit(hxgep);
2472
2473 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2474
2475 MUTEX_EXIT(hxgep->genlock);
2476
2477 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2478 }
2479
2480 static int
hxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)2481 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2482 {
2483 p_hxge_t hxgep = (p_hxge_t)arg;
2484 struct ether_addr addrp;
2485
2486 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2487
2488 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2489
2490 if (add) {
2491 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2492 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2493 "<== hxge_m_multicst: add multicast failed"));
2494 return (EINVAL);
2495 }
2496 } else {
2497 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2498 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2499 "<== hxge_m_multicst: del multicast failed"));
2500 return (EINVAL);
2501 }
2502 }
2503
2504 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2505
2506 return (0);
2507 }
2508
2509 static int
hxge_m_promisc(void * arg,boolean_t on)2510 hxge_m_promisc(void *arg, boolean_t on)
2511 {
2512 p_hxge_t hxgep = (p_hxge_t)arg;
2513
2514 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2515
2516 if (hxge_set_promisc(hxgep, on)) {
2517 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2518 "<== hxge_m_promisc: set promisc failed"));
2519 return (EINVAL);
2520 }
2521
2522 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2523
2524 return (0);
2525 }
2526
2527 static void
hxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2528 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2529 {
2530 p_hxge_t hxgep = (p_hxge_t)arg;
2531 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2532 boolean_t need_privilege;
2533 int err;
2534 int cmd;
2535
2536 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2537
2538 iocp = (struct iocblk *)mp->b_rptr;
2539 iocp->ioc_error = 0;
2540 need_privilege = B_TRUE;
2541 cmd = iocp->ioc_cmd;
2542
2543 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2544 switch (cmd) {
2545 default:
2546 miocnak(wq, mp, 0, EINVAL);
2547 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2548 return;
2549
2550 case LB_GET_INFO_SIZE:
2551 case LB_GET_INFO:
2552 case LB_GET_MODE:
2553 need_privilege = B_FALSE;
2554 break;
2555
2556 case LB_SET_MODE:
2557 break;
2558
2559 case ND_GET:
2560 need_privilege = B_FALSE;
2561 break;
2562 case ND_SET:
2563 break;
2564
2565 case HXGE_GET_TX_RING_SZ:
2566 case HXGE_GET_TX_DESC:
2567 case HXGE_TX_SIDE_RESET:
2568 case HXGE_RX_SIDE_RESET:
2569 case HXGE_GLOBAL_RESET:
2570 case HXGE_RESET_MAC:
2571 case HXGE_PUT_TCAM:
2572 case HXGE_GET_TCAM:
2573 case HXGE_RTRACE:
2574
2575 need_privilege = B_FALSE;
2576 break;
2577 }
2578
2579 if (need_privilege) {
2580 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2581 if (err != 0) {
2582 miocnak(wq, mp, 0, err);
2583 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2584 "<== hxge_m_ioctl: no priv"));
2585 return;
2586 }
2587 }
2588
2589 switch (cmd) {
2590 case ND_GET:
2591 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2592 case ND_SET:
2593 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2594 hxge_param_ioctl(hxgep, wq, mp, iocp);
2595 break;
2596
2597 case LB_GET_MODE:
2598 case LB_SET_MODE:
2599 case LB_GET_INFO_SIZE:
2600 case LB_GET_INFO:
2601 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2602 break;
2603
2604 case HXGE_PUT_TCAM:
2605 case HXGE_GET_TCAM:
2606 case HXGE_GET_TX_RING_SZ:
2607 case HXGE_GET_TX_DESC:
2608 case HXGE_TX_SIDE_RESET:
2609 case HXGE_RX_SIDE_RESET:
2610 case HXGE_GLOBAL_RESET:
2611 case HXGE_RESET_MAC:
2612 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2613 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2614 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2615 break;
2616 }
2617
2618 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2619 }
2620
2621 /*ARGSUSED*/
2622 static int
hxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2623 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2624 {
2625 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2626 p_hxge_t hxgep;
2627 p_tx_ring_t ring;
2628
2629 ASSERT(rhp != NULL);
2630 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2631
2632 hxgep = rhp->hxgep;
2633
2634 /*
2635 * Get the ring pointer.
2636 */
2637 ring = hxgep->tx_rings->rings[rhp->index];
2638
2639 /*
2640 * Fill in the handle for the transmit.
2641 */
2642 MUTEX_ENTER(&ring->lock);
2643 rhp->started = B_TRUE;
2644 ring->ring_handle = rhp->ring_handle;
2645 MUTEX_EXIT(&ring->lock);
2646
2647 return (0);
2648 }
2649
2650 static void
hxge_tx_ring_stop(mac_ring_driver_t rdriver)2651 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2652 {
2653 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2654 p_hxge_t hxgep;
2655 p_tx_ring_t ring;
2656
2657 ASSERT(rhp != NULL);
2658 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2659
2660 hxgep = rhp->hxgep;
2661 ring = hxgep->tx_rings->rings[rhp->index];
2662
2663 MUTEX_ENTER(&ring->lock);
2664 ring->ring_handle = (mac_ring_handle_t)NULL;
2665 rhp->started = B_FALSE;
2666 MUTEX_EXIT(&ring->lock);
2667 }
2668
2669 static int
hxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2670 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2671 {
2672 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2673 p_hxge_t hxgep;
2674 p_rx_rcr_ring_t ring;
2675 int i;
2676
2677 ASSERT(rhp != NULL);
2678 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2679
2680 hxgep = rhp->hxgep;
2681
2682 /*
2683 * Get pointer to ring.
2684 */
2685 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2686
2687 MUTEX_ENTER(&ring->lock);
2688
2689 if (rhp->started) {
2690 MUTEX_EXIT(&ring->lock);
2691 return (0);
2692 }
2693
2694 /*
2695 * Set the ldvp and ldgp pointers to enable/disable
2696 * polling.
2697 */
2698 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2699 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2700 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2701 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2702 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2703 break;
2704 }
2705 }
2706
2707 rhp->started = B_TRUE;
2708 ring->rcr_mac_handle = rhp->ring_handle;
2709 ring->rcr_gen_num = mr_gen_num;
2710 MUTEX_EXIT(&ring->lock);
2711
2712 return (0);
2713 }
2714
2715 static void
hxge_rx_ring_stop(mac_ring_driver_t rdriver)2716 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2717 {
2718 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2719 p_hxge_t hxgep;
2720 p_rx_rcr_ring_t ring;
2721
2722 ASSERT(rhp != NULL);
2723 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2724
2725 hxgep = rhp->hxgep;
2726 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2727
2728 MUTEX_ENTER(&ring->lock);
2729 rhp->started = B_TRUE;
2730 ring->rcr_mac_handle = NULL;
2731 ring->ldvp = NULL;
2732 ring->ldgp = NULL;
2733 MUTEX_EXIT(&ring->lock);
2734 }
2735
2736 static int
hxge_rx_group_start(mac_group_driver_t gdriver)2737 hxge_rx_group_start(mac_group_driver_t gdriver)
2738 {
2739 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2740
2741 ASSERT(group->hxgep != NULL);
2742 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2743
2744 MUTEX_ENTER(group->hxgep->genlock);
2745 group->started = B_TRUE;
2746 MUTEX_EXIT(group->hxgep->genlock);
2747
2748 return (0);
2749 }
2750
2751 static void
hxge_rx_group_stop(mac_group_driver_t gdriver)2752 hxge_rx_group_stop(mac_group_driver_t gdriver)
2753 {
2754 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2755
2756 ASSERT(group->hxgep != NULL);
2757 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2758 ASSERT(group->started == B_TRUE);
2759
2760 MUTEX_ENTER(group->hxgep->genlock);
2761 group->started = B_FALSE;
2762 MUTEX_EXIT(group->hxgep->genlock);
2763 }
2764
2765 static int
hxge_mmac_get_slot(p_hxge_t hxgep,int * slot)2766 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2767 {
2768 int i;
2769
2770 /*
2771 * Find an open slot.
2772 */
2773 for (i = 0; i < hxgep->mmac.total; i++) {
2774 if (!hxgep->mmac.addrs[i].set) {
2775 *slot = i;
2776 return (0);
2777 }
2778 }
2779
2780 return (ENXIO);
2781 }
2782
2783 static int
hxge_mmac_set_addr(p_hxge_t hxgep,int slot,const uint8_t * addr)2784 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2785 {
2786 struct ether_addr eaddr;
2787 hxge_status_t status = HXGE_OK;
2788
2789 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2790
2791 /*
2792 * Set new interface local address and re-init device.
2793 * This is destructive to any other streams attached
2794 * to this device.
2795 */
2796 RW_ENTER_WRITER(&hxgep->filter_lock);
2797 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2798 RW_EXIT(&hxgep->filter_lock);
2799 if (status != HXGE_OK)
2800 return (status);
2801
2802 hxgep->mmac.addrs[slot].set = B_TRUE;
2803 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2804 hxgep->mmac.available--;
2805 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2806 hxgep->mmac.addrs[slot].primary = B_TRUE;
2807
2808 return (0);
2809 }
2810
2811 static int
hxge_mmac_find_addr(p_hxge_t hxgep,const uint8_t * addr,int * slot)2812 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2813 {
2814 int i, result;
2815
2816 for (i = 0; i < hxgep->mmac.total; i++) {
2817 if (hxgep->mmac.addrs[i].set) {
2818 result = memcmp(hxgep->mmac.addrs[i].addr,
2819 addr, ETHERADDRL);
2820 if (result == 0) {
2821 *slot = i;
2822 return (0);
2823 }
2824 }
2825 }
2826
2827 return (EINVAL);
2828 }
2829
2830 static int
hxge_mmac_unset_addr(p_hxge_t hxgep,int slot)2831 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2832 {
2833 hxge_status_t status;
2834 int i;
2835
2836 status = hxge_pfc_clear_mac_address(hxgep, slot);
2837 if (status != HXGE_OK)
2838 return (status);
2839
2840 for (i = 0; i < ETHERADDRL; i++)
2841 hxgep->mmac.addrs[slot].addr[i] = 0;
2842
2843 hxgep->mmac.addrs[slot].set = B_FALSE;
2844 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2845 hxgep->mmac.addrs[slot].primary = B_FALSE;
2846 hxgep->mmac.available++;
2847
2848 return (0);
2849 }
2850
2851 static int
hxge_rx_group_add_mac(void * arg,const uint8_t * mac_addr)2852 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2853 {
2854 hxge_ring_group_t *group = arg;
2855 p_hxge_t hxgep = group->hxgep;
2856 int slot = 0;
2857
2858 ASSERT(group->type == MAC_RING_TYPE_RX);
2859
2860 MUTEX_ENTER(hxgep->genlock);
2861
2862 /*
2863 * Find a slot for the address.
2864 */
2865 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2866 MUTEX_EXIT(hxgep->genlock);
2867 return (ENOSPC);
2868 }
2869
2870 /*
2871 * Program the MAC address.
2872 */
2873 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2874 MUTEX_EXIT(hxgep->genlock);
2875 return (ENOSPC);
2876 }
2877
2878 MUTEX_EXIT(hxgep->genlock);
2879 return (0);
2880 }
2881
2882 static int
hxge_rx_group_rem_mac(void * arg,const uint8_t * mac_addr)2883 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2884 {
2885 hxge_ring_group_t *group = arg;
2886 p_hxge_t hxgep = group->hxgep;
2887 int rv, slot;
2888
2889 ASSERT(group->type == MAC_RING_TYPE_RX);
2890
2891 MUTEX_ENTER(hxgep->genlock);
2892
2893 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2894 MUTEX_EXIT(hxgep->genlock);
2895 return (rv);
2896 }
2897
2898 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2899 MUTEX_EXIT(hxgep->genlock);
2900 return (rv);
2901 }
2902
2903 MUTEX_EXIT(hxgep->genlock);
2904 return (0);
2905 }
2906
2907 static void
hxge_group_get(void * arg,mac_ring_type_t type,int groupid,mac_group_info_t * infop,mac_group_handle_t gh)2908 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2909 mac_group_info_t *infop, mac_group_handle_t gh)
2910 {
2911 p_hxge_t hxgep = arg;
2912 hxge_ring_group_t *group;
2913
2914 ASSERT(type == MAC_RING_TYPE_RX);
2915
2916 switch (type) {
2917 case MAC_RING_TYPE_RX:
2918 group = &hxgep->rx_groups[groupid];
2919 group->hxgep = hxgep;
2920 group->ghandle = gh;
2921 group->index = groupid;
2922 group->type = type;
2923
2924 infop->mgi_driver = (mac_group_driver_t)group;
2925 infop->mgi_start = hxge_rx_group_start;
2926 infop->mgi_stop = hxge_rx_group_stop;
2927 infop->mgi_addmac = hxge_rx_group_add_mac;
2928 infop->mgi_remmac = hxge_rx_group_rem_mac;
2929 infop->mgi_count = HXGE_MAX_RDCS;
2930 break;
2931
2932 case MAC_RING_TYPE_TX:
2933 default:
2934 break;
2935 }
2936 }
2937
2938 static int
hxge_ring_get_htable_idx(p_hxge_t hxgep,mac_ring_type_t type,uint32_t channel)2939 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2940 {
2941 int i;
2942
2943 ASSERT(hxgep->ldgvp != NULL);
2944
2945 switch (type) {
2946 case MAC_RING_TYPE_RX:
2947 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2948 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2949 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2950 return ((int)
2951 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2952 }
2953 }
2954 break;
2955
2956 case MAC_RING_TYPE_TX:
2957 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2958 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2959 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2960 return ((int)
2961 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2962 }
2963 }
2964 break;
2965
2966 default:
2967 break;
2968 }
2969
2970 return (-1);
2971 }
2972
2973 /*
2974 * Callback function for the GLDv3 layer to register all rings.
2975 */
2976 /*ARGSUSED*/
2977 static void
hxge_fill_ring(void * arg,mac_ring_type_t type,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)2978 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2979 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2980 {
2981 p_hxge_t hxgep = arg;
2982
2983 ASSERT(hxgep != NULL);
2984 ASSERT(infop != NULL);
2985
2986 switch (type) {
2987 case MAC_RING_TYPE_TX: {
2988 p_hxge_ring_handle_t rhp;
2989 mac_intr_t *mintr = &infop->mri_intr;
2990 p_hxge_intr_t intrp;
2991 int htable_idx;
2992
2993 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2994 rhp = &hxgep->tx_ring_handles[index];
2995 rhp->hxgep = hxgep;
2996 rhp->index = index;
2997 rhp->ring_handle = rh;
2998 infop->mri_driver = (mac_ring_driver_t)rhp;
2999 infop->mri_start = hxge_tx_ring_start;
3000 infop->mri_stop = hxge_tx_ring_stop;
3001 infop->mri_tx = hxge_tx_ring_send;
3002 infop->mri_stat = hxge_tx_ring_stat;
3003
3004 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3005 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3006 if (htable_idx >= 0)
3007 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3008 else
3009 mintr->mi_ddi_handle = NULL;
3010 break;
3011 }
3012
3013 case MAC_RING_TYPE_RX: {
3014 p_hxge_ring_handle_t rhp;
3015 mac_intr_t hxge_mac_intr;
3016 p_hxge_intr_t intrp;
3017 int htable_idx;
3018
3019 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3020 rhp = &hxgep->rx_ring_handles[index];
3021 rhp->hxgep = hxgep;
3022 rhp->index = index;
3023 rhp->ring_handle = rh;
3024
3025 /*
3026 * Entrypoint to enable interrupt (disable poll) and
3027 * disable interrupt (enable poll).
3028 */
3029 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3030 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3031 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3032
3033 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3034 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3035 if (htable_idx >= 0)
3036 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3037 else
3038 hxge_mac_intr.mi_ddi_handle = NULL;
3039
3040 infop->mri_driver = (mac_ring_driver_t)rhp;
3041 infop->mri_start = hxge_rx_ring_start;
3042 infop->mri_stop = hxge_rx_ring_stop;
3043 infop->mri_intr = hxge_mac_intr;
3044 infop->mri_poll = hxge_rx_poll;
3045 infop->mri_stat = hxge_rx_ring_stat;
3046 break;
3047 }
3048
3049 default:
3050 break;
3051 }
3052 }
3053
3054 /*ARGSUSED*/
3055 boolean_t
hxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)3056 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3057 {
3058 p_hxge_t hxgep = arg;
3059
3060 switch (cap) {
3061 case MAC_CAPAB_HCKSUM: {
3062 uint32_t *txflags = cap_data;
3063
3064 *txflags = HCKSUM_INET_PARTIAL;
3065 break;
3066 }
3067
3068 case MAC_CAPAB_RINGS: {
3069 mac_capab_rings_t *cap_rings = cap_data;
3070
3071 MUTEX_ENTER(hxgep->genlock);
3072 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3073 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3074 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3075 cap_rings->mr_rget = hxge_fill_ring;
3076 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3077 cap_rings->mr_gget = hxge_group_get;
3078 cap_rings->mr_gaddring = NULL;
3079 cap_rings->mr_gremring = NULL;
3080 } else {
3081 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3082 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3083 cap_rings->mr_rget = hxge_fill_ring;
3084 cap_rings->mr_gnum = 0;
3085 cap_rings->mr_gget = NULL;
3086 cap_rings->mr_gaddring = NULL;
3087 cap_rings->mr_gremring = NULL;
3088 }
3089 MUTEX_EXIT(hxgep->genlock);
3090 break;
3091 }
3092
3093 default:
3094 return (B_FALSE);
3095 }
3096 return (B_TRUE);
3097 }
3098
3099 static boolean_t
hxge_param_locked(mac_prop_id_t pr_num)3100 hxge_param_locked(mac_prop_id_t pr_num)
3101 {
3102 /*
3103 * All adv_* parameters are locked (read-only) while
3104 * the device is in any sort of loopback mode ...
3105 */
3106 switch (pr_num) {
3107 case MAC_PROP_ADV_1000FDX_CAP:
3108 case MAC_PROP_EN_1000FDX_CAP:
3109 case MAC_PROP_ADV_1000HDX_CAP:
3110 case MAC_PROP_EN_1000HDX_CAP:
3111 case MAC_PROP_ADV_100FDX_CAP:
3112 case MAC_PROP_EN_100FDX_CAP:
3113 case MAC_PROP_ADV_100HDX_CAP:
3114 case MAC_PROP_EN_100HDX_CAP:
3115 case MAC_PROP_ADV_10FDX_CAP:
3116 case MAC_PROP_EN_10FDX_CAP:
3117 case MAC_PROP_ADV_10HDX_CAP:
3118 case MAC_PROP_EN_10HDX_CAP:
3119 case MAC_PROP_AUTONEG:
3120 case MAC_PROP_FLOWCTRL:
3121 return (B_TRUE);
3122 }
3123 return (B_FALSE);
3124 }
3125
3126 /*
3127 * callback functions for set/get of properties
3128 */
3129 static int
hxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3130 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3131 uint_t pr_valsize, const void *pr_val)
3132 {
3133 hxge_t *hxgep = barg;
3134 p_hxge_stats_t statsp;
3135 int err = 0;
3136 uint32_t new_mtu, old_framesize, new_framesize;
3137
3138 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3139
3140 statsp = hxgep->statsp;
3141 MUTEX_ENTER(hxgep->genlock);
3142 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3143 hxge_param_locked(pr_num)) {
3144 /*
3145 * All adv_* parameters are locked (read-only)
3146 * while the device is in any sort of loopback mode.
3147 */
3148 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3149 "==> hxge_m_setprop: loopback mode: read only"));
3150 MUTEX_EXIT(hxgep->genlock);
3151 return (EBUSY);
3152 }
3153
3154 switch (pr_num) {
3155 /*
3156 * These properties are either not exist or read only
3157 */
3158 case MAC_PROP_EN_1000FDX_CAP:
3159 case MAC_PROP_EN_100FDX_CAP:
3160 case MAC_PROP_EN_10FDX_CAP:
3161 case MAC_PROP_EN_1000HDX_CAP:
3162 case MAC_PROP_EN_100HDX_CAP:
3163 case MAC_PROP_EN_10HDX_CAP:
3164 case MAC_PROP_ADV_1000FDX_CAP:
3165 case MAC_PROP_ADV_1000HDX_CAP:
3166 case MAC_PROP_ADV_100FDX_CAP:
3167 case MAC_PROP_ADV_100HDX_CAP:
3168 case MAC_PROP_ADV_10FDX_CAP:
3169 case MAC_PROP_ADV_10HDX_CAP:
3170 case MAC_PROP_STATUS:
3171 case MAC_PROP_SPEED:
3172 case MAC_PROP_DUPLEX:
3173 case MAC_PROP_AUTONEG:
3174 /*
3175 * Flow control is handled in the shared domain and
3176 * it is readonly here.
3177 */
3178 case MAC_PROP_FLOWCTRL:
3179 err = EINVAL;
3180 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3181 "==> hxge_m_setprop: read only property %d",
3182 pr_num));
3183 break;
3184
3185 case MAC_PROP_MTU:
3186 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3187 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3188 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3189
3190 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3191 if (new_framesize == hxgep->vmac.maxframesize) {
3192 err = 0;
3193 break;
3194 }
3195
3196 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3197 err = EBUSY;
3198 break;
3199 }
3200
3201 if (new_framesize < MIN_FRAME_SIZE ||
3202 new_framesize > MAX_FRAME_SIZE) {
3203 err = EINVAL;
3204 break;
3205 }
3206
3207 old_framesize = hxgep->vmac.maxframesize;
3208 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3209
3210 if (hxge_vmac_set_framesize(hxgep)) {
3211 hxgep->vmac.maxframesize =
3212 (uint16_t)old_framesize;
3213 err = EINVAL;
3214 break;
3215 }
3216
3217 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3218 if (err) {
3219 hxgep->vmac.maxframesize =
3220 (uint16_t)old_framesize;
3221 (void) hxge_vmac_set_framesize(hxgep);
3222 }
3223
3224 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3225 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3226 new_mtu, hxgep->vmac.maxframesize));
3227 break;
3228
3229 case MAC_PROP_PRIVATE:
3230 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3231 "==> hxge_m_setprop: private property"));
3232 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3233 pr_val);
3234 break;
3235
3236 default:
3237 err = ENOTSUP;
3238 break;
3239 }
3240
3241 MUTEX_EXIT(hxgep->genlock);
3242
3243 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3244 "<== hxge_m_setprop (return %d)", err));
3245
3246 return (err);
3247 }
3248
3249 static int
hxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3250 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3251 uint_t pr_valsize, void *pr_val)
3252 {
3253 hxge_t *hxgep = barg;
3254 p_hxge_stats_t statsp = hxgep->statsp;
3255 int err = 0;
3256 link_flowctrl_t fl;
3257 uint64_t tmp = 0;
3258 link_state_t ls;
3259
3260 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3261 "==> hxge_m_getprop: pr_num %d", pr_num));
3262
3263 switch (pr_num) {
3264 case MAC_PROP_DUPLEX:
3265 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3266 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3267 "==> hxge_m_getprop: duplex mode %d",
3268 *(uint8_t *)pr_val));
3269 break;
3270
3271 case MAC_PROP_SPEED:
3272 ASSERT(pr_valsize >= sizeof (uint64_t));
3273 tmp = statsp->mac_stats.link_speed * 1000000ull;
3274 bcopy(&tmp, pr_val, sizeof (tmp));
3275 break;
3276
3277 case MAC_PROP_STATUS:
3278 ASSERT(pr_valsize >= sizeof (link_state_t));
3279 if (!statsp->mac_stats.link_up)
3280 ls = LINK_STATE_DOWN;
3281 else
3282 ls = LINK_STATE_UP;
3283 bcopy(&ls, pr_val, sizeof (ls));
3284 break;
3285
3286 case MAC_PROP_FLOWCTRL:
3287 /*
3288 * Flow control is supported by the shared domain and
3289 * it is currently transmit only
3290 */
3291 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3292 fl = LINK_FLOWCTRL_TX;
3293 bcopy(&fl, pr_val, sizeof (fl));
3294 break;
3295 case MAC_PROP_AUTONEG:
3296 /* 10G link only and it is not negotiable */
3297 *(uint8_t *)pr_val = 0;
3298 break;
3299 case MAC_PROP_ADV_1000FDX_CAP:
3300 case MAC_PROP_ADV_100FDX_CAP:
3301 case MAC_PROP_ADV_10FDX_CAP:
3302 case MAC_PROP_ADV_1000HDX_CAP:
3303 case MAC_PROP_ADV_100HDX_CAP:
3304 case MAC_PROP_ADV_10HDX_CAP:
3305 case MAC_PROP_EN_1000FDX_CAP:
3306 case MAC_PROP_EN_100FDX_CAP:
3307 case MAC_PROP_EN_10FDX_CAP:
3308 case MAC_PROP_EN_1000HDX_CAP:
3309 case MAC_PROP_EN_100HDX_CAP:
3310 case MAC_PROP_EN_10HDX_CAP:
3311 err = ENOTSUP;
3312 break;
3313
3314 case MAC_PROP_PRIVATE:
3315 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3316 pr_val);
3317 break;
3318
3319 default:
3320 err = ENOTSUP;
3321 break;
3322 }
3323
3324 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3325
3326 return (err);
3327 }
3328
3329 static void
hxge_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3330 hxge_m_propinfo(void *arg, const char *pr_name,
3331 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3332 {
3333 _NOTE(ARGUNUSED(arg));
3334 switch (pr_num) {
3335 case MAC_PROP_DUPLEX:
3336 case MAC_PROP_SPEED:
3337 case MAC_PROP_STATUS:
3338 case MAC_PROP_AUTONEG:
3339 case MAC_PROP_FLOWCTRL:
3340 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3341 break;
3342
3343 case MAC_PROP_MTU:
3344 mac_prop_info_set_range_uint32(prh,
3345 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3346 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3347 break;
3348
3349 case MAC_PROP_PRIVATE: {
3350 char valstr[MAXNAMELEN];
3351
3352 bzero(valstr, sizeof (valstr));
3353
3354 /* Receive Interrupt Blanking Parameters */
3355 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3356 (void) snprintf(valstr, sizeof (valstr), "%d",
3357 RXDMA_RCR_TO_DEFAULT);
3358 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3359 (void) snprintf(valstr, sizeof (valstr), "%d",
3360 RXDMA_RCR_PTHRES_DEFAULT);
3361
3362 /* Classification and Load Distribution Configuration */
3363 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3364 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3365 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3366 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3367 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3368 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3369 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3370 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3371 (void) snprintf(valstr, sizeof (valstr), "%d",
3372 HXGE_CLASS_TCAM_LOOKUP);
3373 }
3374
3375 if (strlen(valstr) > 0)
3376 mac_prop_info_set_default_str(prh, valstr);
3377 break;
3378 }
3379 }
3380 }
3381
3382
3383 /* ARGSUSED */
3384 static int
hxge_set_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)3385 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3386 const void *pr_val)
3387 {
3388 p_hxge_param_t param_arr = hxgep->param_arr;
3389 int err = 0;
3390
3391 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3392 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3393
3394 if (pr_val == NULL) {
3395 return (EINVAL);
3396 }
3397
3398 /* Blanking */
3399 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3400 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3401 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3402 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3403 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3404 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3405
3406 /* Classification */
3407 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3408 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3409 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3410 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3411 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3412 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3413 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3414 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3415 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3416 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3417 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3418 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3419 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3420 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3421 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3422 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3423 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3424 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3425 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3426 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3427 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3428 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3429 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3430 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3431 } else {
3432 err = ENOTSUP;
3433 }
3434
3435 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3436 "<== hxge_set_priv_prop: err %d", err));
3437
3438 return (err);
3439 }
3440
3441 static int
hxge_get_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)3442 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3443 void *pr_val)
3444 {
3445 p_hxge_param_t param_arr = hxgep->param_arr;
3446 char valstr[MAXNAMELEN];
3447 int err = 0;
3448 uint_t strsize;
3449 int value = 0;
3450
3451 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3452 "==> hxge_get_priv_prop: property %s", pr_name));
3453
3454 /* Receive Interrupt Blanking Parameters */
3455 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3456 value = hxgep->intr_timeout;
3457 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3458 value = hxgep->intr_threshold;
3459
3460 /* Classification and Load Distribution Configuration */
3461 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3462 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3463 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3464
3465 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3466 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3467 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3468 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3469
3470 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3471 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3472 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3473 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3474
3475 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3476 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3477 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3478 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3479
3480 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3481 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3482 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3483 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3484
3485 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3486 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3487 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3488 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3489
3490 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3491 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3492 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3493 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3494
3495 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3496 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3497 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3498 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3499
3500 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3501 } else {
3502 err = ENOTSUP;
3503 }
3504
3505 if (err == 0) {
3506 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3507
3508 strsize = (uint_t)strlen(valstr);
3509 if (pr_valsize < strsize) {
3510 err = ENOBUFS;
3511 } else {
3512 (void) strlcpy(pr_val, valstr, pr_valsize);
3513 }
3514 }
3515
3516 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3517 "<== hxge_get_priv_prop: return %d", err));
3518
3519 return (err);
3520 }
3521 /*
3522 * Module loading and removing entry points.
3523 */
3524 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3525 nodev, NULL, D_MP, NULL, NULL);
3526
3527 extern struct mod_ops mod_driverops;
3528
3529 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3530
3531 /*
3532 * Module linkage information for the kernel.
3533 */
3534 static struct modldrv hxge_modldrv = {
3535 &mod_driverops,
3536 HXGE_DESC_VER,
3537 &hxge_dev_ops
3538 };
3539
3540 static struct modlinkage modlinkage = {
3541 MODREV_1, (void *) &hxge_modldrv, NULL
3542 };
3543
3544 int
_init(void)3545 _init(void)
3546 {
3547 int status;
3548
3549 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3550 mac_init_ops(&hxge_dev_ops, "hxge");
3551 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3552 if (status != 0) {
3553 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3554 "failed to init device soft state"));
3555 mac_fini_ops(&hxge_dev_ops);
3556 goto _init_exit;
3557 }
3558
3559 status = mod_install(&modlinkage);
3560 if (status != 0) {
3561 ddi_soft_state_fini(&hxge_list);
3562 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3563 goto _init_exit;
3564 }
3565
3566 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3567
3568 _init_exit:
3569 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3570
3571 return (status);
3572 }
3573
3574 int
_fini(void)3575 _fini(void)
3576 {
3577 int status;
3578
3579 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3580
3581 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3582
3583 if (hxge_mblks_pending)
3584 return (EBUSY);
3585
3586 status = mod_remove(&modlinkage);
3587 if (status != DDI_SUCCESS) {
3588 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3589 "Module removal failed 0x%08x", status));
3590 goto _fini_exit;
3591 }
3592
3593 mac_fini_ops(&hxge_dev_ops);
3594
3595 ddi_soft_state_fini(&hxge_list);
3596
3597 MUTEX_DESTROY(&hxge_common_lock);
3598
3599 _fini_exit:
3600 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3601
3602 return (status);
3603 }
3604
3605 int
_info(struct modinfo * modinfop)3606 _info(struct modinfo *modinfop)
3607 {
3608 int status;
3609
3610 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3611 status = mod_info(&modlinkage, modinfop);
3612 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3613
3614 return (status);
3615 }
3616
3617 /*ARGSUSED*/
3618 static hxge_status_t
hxge_add_intrs(p_hxge_t hxgep)3619 hxge_add_intrs(p_hxge_t hxgep)
3620 {
3621 int intr_types;
3622 int type = 0;
3623 int ddi_status = DDI_SUCCESS;
3624 hxge_status_t status = HXGE_OK;
3625
3626 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3627
3628 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3629 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3630 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3631 hxgep->hxge_intr_type.intr_added = 0;
3632 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3633 hxgep->hxge_intr_type.intr_type = 0;
3634
3635 if (hxge_msi_enable) {
3636 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3637 }
3638
3639 /* Get the supported interrupt types */
3640 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3641 != DDI_SUCCESS) {
3642 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3643 "ddi_intr_get_supported_types failed: status 0x%08x",
3644 ddi_status));
3645 return (HXGE_ERROR | HXGE_DDI_FAILED);
3646 }
3647
3648 hxgep->hxge_intr_type.intr_types = intr_types;
3649
3650 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3651 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3652
3653 /*
3654 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3655 * (1): 1 - MSI
3656 * (2): 2 - MSI-X
3657 * others - FIXED
3658 */
3659 switch (hxge_msi_enable) {
3660 default:
3661 type = DDI_INTR_TYPE_FIXED;
3662 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3663 "use fixed (intx emulation) type %08x", type));
3664 break;
3665
3666 case 2:
3667 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3668 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3669 if (intr_types & DDI_INTR_TYPE_MSIX) {
3670 type = DDI_INTR_TYPE_MSIX;
3671 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3672 "==> hxge_add_intrs: "
3673 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3674 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3675 type = DDI_INTR_TYPE_MSI;
3676 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3677 "==> hxge_add_intrs: "
3678 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3679 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3680 type = DDI_INTR_TYPE_FIXED;
3681 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3682 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3683 }
3684 break;
3685
3686 case 1:
3687 if (intr_types & DDI_INTR_TYPE_MSI) {
3688 type = DDI_INTR_TYPE_MSI;
3689 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3690 "==> hxge_add_intrs: "
3691 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3692 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3693 type = DDI_INTR_TYPE_MSIX;
3694 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3695 "==> hxge_add_intrs: "
3696 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3697 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3698 type = DDI_INTR_TYPE_FIXED;
3699 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3700 "==> hxge_add_intrs: "
3701 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3702 }
3703 }
3704
3705 hxgep->hxge_intr_type.intr_type = type;
3706 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3707 type == DDI_INTR_TYPE_FIXED) &&
3708 hxgep->hxge_intr_type.niu_msi_enable) {
3709 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3710 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3711 " hxge_add_intrs: "
3712 " hxge_add_intrs_adv failed: status 0x%08x",
3713 status));
3714 return (status);
3715 } else {
3716 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3717 "interrupts registered : type %d", type));
3718 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3719
3720 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3721 "\nAdded advanced hxge add_intr_adv "
3722 "intr type 0x%x\n", type));
3723
3724 return (status);
3725 }
3726 }
3727
3728 if (!hxgep->hxge_intr_type.intr_registered) {
3729 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3730 "==> hxge_add_intrs: failed to register interrupts"));
3731 return (HXGE_ERROR | HXGE_DDI_FAILED);
3732 }
3733
3734 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3735
3736 return (status);
3737 }
3738
3739 /*ARGSUSED*/
3740 static hxge_status_t
hxge_add_intrs_adv(p_hxge_t hxgep)3741 hxge_add_intrs_adv(p_hxge_t hxgep)
3742 {
3743 int intr_type;
3744 p_hxge_intr_t intrp;
3745 hxge_status_t status;
3746
3747 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3748
3749 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3750 intr_type = intrp->intr_type;
3751
3752 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3753 intr_type));
3754
3755 switch (intr_type) {
3756 case DDI_INTR_TYPE_MSI: /* 0x2 */
3757 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3758 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3759 break;
3760
3761 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3762 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3763 break;
3764
3765 default:
3766 status = HXGE_ERROR;
3767 break;
3768 }
3769
3770 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3771
3772 return (status);
3773 }
3774
3775 /*ARGSUSED*/
3776 static hxge_status_t
hxge_add_intrs_adv_type(p_hxge_t hxgep,uint32_t int_type)3777 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3778 {
3779 dev_info_t *dip = hxgep->dip;
3780 p_hxge_ldg_t ldgp;
3781 p_hxge_intr_t intrp;
3782 uint_t *inthandler;
3783 void *arg1, *arg2;
3784 int behavior;
3785 int nintrs, navail;
3786 int nactual, nrequired, nrequest;
3787 int inum = 0;
3788 int loop = 0;
3789 int x, y;
3790 int ddi_status = DDI_SUCCESS;
3791 hxge_status_t status = HXGE_OK;
3792
3793 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3794
3795 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3796
3797 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3798 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3799 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3800 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3801 "nintrs: %d", ddi_status, nintrs));
3802 return (HXGE_ERROR | HXGE_DDI_FAILED);
3803 }
3804
3805 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3806 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3807 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3808 "ddi_intr_get_navail() failed, status: 0x%x%, "
3809 "nintrs: %d", ddi_status, navail));
3810 return (HXGE_ERROR | HXGE_DDI_FAILED);
3811 }
3812
3813 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3814 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3815 int_type, nintrs, navail));
3816
3817 /* PSARC/2007/453 MSI-X interrupt limit override */
3818 if (int_type == DDI_INTR_TYPE_MSIX) {
3819 nrequest = hxge_create_msi_property(hxgep);
3820 if (nrequest < navail) {
3821 navail = nrequest;
3822 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3823 "hxge_add_intrs_adv_type: nintrs %d "
3824 "navail %d (nrequest %d)",
3825 nintrs, navail, nrequest));
3826 }
3827 }
3828
3829 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3830 /* MSI must be power of 2 */
3831 if ((navail & 16) == 16) {
3832 navail = 16;
3833 } else if ((navail & 8) == 8) {
3834 navail = 8;
3835 } else if ((navail & 4) == 4) {
3836 navail = 4;
3837 } else if ((navail & 2) == 2) {
3838 navail = 2;
3839 } else {
3840 navail = 1;
3841 }
3842 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3843 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3844 "navail %d", nintrs, navail));
3845 }
3846
3847 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3848 "requesting: intr type %d nintrs %d, navail %d",
3849 int_type, nintrs, navail));
3850
3851 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3852 DDI_INTR_ALLOC_NORMAL);
3853 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3854 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3855
3856 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3857 navail, &nactual, behavior);
3858 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3859 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3860 " ddi_intr_alloc() failed: %d", ddi_status));
3861 kmem_free(intrp->htable, intrp->intr_size);
3862 return (HXGE_ERROR | HXGE_DDI_FAILED);
3863 }
3864
3865 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3866 "ddi_intr_alloc() returned: navail %d nactual %d",
3867 navail, nactual));
3868
3869 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3870 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3871 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3872 " ddi_intr_get_pri() failed: %d", ddi_status));
3873 /* Free already allocated interrupts */
3874 for (y = 0; y < nactual; y++) {
3875 (void) ddi_intr_free(intrp->htable[y]);
3876 }
3877
3878 kmem_free(intrp->htable, intrp->intr_size);
3879 return (HXGE_ERROR | HXGE_DDI_FAILED);
3880 }
3881
3882 nrequired = 0;
3883 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3884 if (status != HXGE_OK) {
3885 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3886 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3887 "failed: 0x%x", status));
3888 /* Free already allocated interrupts */
3889 for (y = 0; y < nactual; y++) {
3890 (void) ddi_intr_free(intrp->htable[y]);
3891 }
3892
3893 kmem_free(intrp->htable, intrp->intr_size);
3894 return (status);
3895 }
3896
3897 ldgp = hxgep->ldgvp->ldgp;
3898 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3899 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3900
3901 if (nactual < nrequired)
3902 loop = nactual;
3903 else
3904 loop = nrequired;
3905
3906 for (x = 0; x < loop; x++, ldgp++) {
3907 ldgp->vector = (uint8_t)x;
3908 arg1 = ldgp->ldvp;
3909 arg2 = hxgep;
3910 if (ldgp->nldvs == 1) {
3911 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3912 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3913 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3914 "1-1 int handler (entry %d)\n",
3915 arg1, arg2, x));
3916 } else if (ldgp->nldvs > 1) {
3917 inthandler = (uint_t *)ldgp->sys_intr_handler;
3918 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3919 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3920 "nldevs %d int handler (entry %d)\n",
3921 arg1, arg2, ldgp->nldvs, x));
3922 }
3923 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3924 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3925 "htable 0x%llx", x, intrp->htable[x]));
3926
3927 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3928 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3929 DDI_SUCCESS) {
3930 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3931 "==> hxge_add_intrs_adv_type: failed #%d "
3932 "status 0x%x", x, ddi_status));
3933 for (y = 0; y < intrp->intr_added; y++) {
3934 (void) ddi_intr_remove_handler(
3935 intrp->htable[y]);
3936 }
3937
3938 /* Free already allocated intr */
3939 for (y = 0; y < nactual; y++) {
3940 (void) ddi_intr_free(intrp->htable[y]);
3941 }
3942 kmem_free(intrp->htable, intrp->intr_size);
3943
3944 (void) hxge_ldgv_uninit(hxgep);
3945
3946 return (HXGE_ERROR | HXGE_DDI_FAILED);
3947 }
3948
3949 ldgp->htable_idx = x;
3950 intrp->intr_added++;
3951 }
3952 intrp->msi_intx_cnt = nactual;
3953
3954 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3955 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3956 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3957
3958 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3959 (void) hxge_intr_ldgv_init(hxgep);
3960
3961 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3962
3963 return (status);
3964 }
3965
3966 /*ARGSUSED*/
3967 static hxge_status_t
hxge_add_intrs_adv_type_fix(p_hxge_t hxgep,uint32_t int_type)3968 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3969 {
3970 dev_info_t *dip = hxgep->dip;
3971 p_hxge_ldg_t ldgp;
3972 p_hxge_intr_t intrp;
3973 uint_t *inthandler;
3974 void *arg1, *arg2;
3975 int behavior;
3976 int nintrs, navail;
3977 int nactual, nrequired;
3978 int inum = 0;
3979 int x, y;
3980 int ddi_status = DDI_SUCCESS;
3981 hxge_status_t status = HXGE_OK;
3982
3983 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3984 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3985
3986 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3987 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3988 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3989 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3990 "nintrs: %d", status, nintrs));
3991 return (HXGE_ERROR | HXGE_DDI_FAILED);
3992 }
3993
3994 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3995 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3996 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3997 "ddi_intr_get_navail() failed, status: 0x%x%, "
3998 "nintrs: %d", ddi_status, navail));
3999 return (HXGE_ERROR | HXGE_DDI_FAILED);
4000 }
4001
4002 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4003 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4004 nintrs, navail));
4005
4006 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4007 DDI_INTR_ALLOC_NORMAL);
4008 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4009 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4010 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4011 navail, &nactual, behavior);
4012 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4013 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4014 " ddi_intr_alloc() failed: %d", ddi_status));
4015 kmem_free(intrp->htable, intrp->intr_size);
4016 return (HXGE_ERROR | HXGE_DDI_FAILED);
4017 }
4018
4019 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4020 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4021 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4022 " ddi_intr_get_pri() failed: %d", ddi_status));
4023 /* Free already allocated interrupts */
4024 for (y = 0; y < nactual; y++) {
4025 (void) ddi_intr_free(intrp->htable[y]);
4026 }
4027
4028 kmem_free(intrp->htable, intrp->intr_size);
4029 return (HXGE_ERROR | HXGE_DDI_FAILED);
4030 }
4031
4032 nrequired = 0;
4033 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4034 if (status != HXGE_OK) {
4035 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4036 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4037 "failed: 0x%x", status));
4038 /* Free already allocated interrupts */
4039 for (y = 0; y < nactual; y++) {
4040 (void) ddi_intr_free(intrp->htable[y]);
4041 }
4042
4043 kmem_free(intrp->htable, intrp->intr_size);
4044 return (status);
4045 }
4046
4047 ldgp = hxgep->ldgvp->ldgp;
4048 for (x = 0; x < nrequired; x++, ldgp++) {
4049 ldgp->vector = (uint8_t)x;
4050 arg1 = ldgp->ldvp;
4051 arg2 = hxgep;
4052 if (ldgp->nldvs == 1) {
4053 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4054 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4055 "hxge_add_intrs_adv_type_fix: "
4056 "1-1 int handler(%d) ldg %d ldv %d "
4057 "arg1 $%p arg2 $%p\n",
4058 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4059 } else if (ldgp->nldvs > 1) {
4060 inthandler = (uint_t *)ldgp->sys_intr_handler;
4061 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4062 "hxge_add_intrs_adv_type_fix: "
4063 "shared ldv %d int handler(%d) ldv %d ldg %d"
4064 "arg1 0x%016llx arg2 0x%016llx\n",
4065 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4066 arg1, arg2));
4067 }
4068
4069 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4070 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4071 DDI_SUCCESS) {
4072 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4073 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4074 "status 0x%x", x, ddi_status));
4075 for (y = 0; y < intrp->intr_added; y++) {
4076 (void) ddi_intr_remove_handler(
4077 intrp->htable[y]);
4078 }
4079 for (y = 0; y < nactual; y++) {
4080 (void) ddi_intr_free(intrp->htable[y]);
4081 }
4082 /* Free already allocated intr */
4083 kmem_free(intrp->htable, intrp->intr_size);
4084
4085 (void) hxge_ldgv_uninit(hxgep);
4086
4087 return (HXGE_ERROR | HXGE_DDI_FAILED);
4088 }
4089 intrp->intr_added++;
4090 }
4091
4092 intrp->msi_intx_cnt = nactual;
4093
4094 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4095
4096 status = hxge_intr_ldgv_init(hxgep);
4097
4098 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4099
4100 return (status);
4101 }
4102
4103 /*ARGSUSED*/
4104 static void
hxge_remove_intrs(p_hxge_t hxgep)4105 hxge_remove_intrs(p_hxge_t hxgep)
4106 {
4107 int i, inum;
4108 p_hxge_intr_t intrp;
4109
4110 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4111 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4112 if (!intrp->intr_registered) {
4113 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4114 "<== hxge_remove_intrs: interrupts not registered"));
4115 return;
4116 }
4117
4118 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4119
4120 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4121 (void) ddi_intr_block_disable(intrp->htable,
4122 intrp->intr_added);
4123 } else {
4124 for (i = 0; i < intrp->intr_added; i++) {
4125 (void) ddi_intr_disable(intrp->htable[i]);
4126 }
4127 }
4128
4129 for (inum = 0; inum < intrp->intr_added; inum++) {
4130 if (intrp->htable[inum]) {
4131 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4132 }
4133 }
4134
4135 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4136 if (intrp->htable[inum]) {
4137 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4138 "hxge_remove_intrs: ddi_intr_free inum %d "
4139 "msi_intx_cnt %d intr_added %d",
4140 inum, intrp->msi_intx_cnt, intrp->intr_added));
4141
4142 (void) ddi_intr_free(intrp->htable[inum]);
4143 }
4144 }
4145
4146 kmem_free(intrp->htable, intrp->intr_size);
4147 intrp->intr_registered = B_FALSE;
4148 intrp->intr_enabled = B_FALSE;
4149 intrp->msi_intx_cnt = 0;
4150 intrp->intr_added = 0;
4151
4152 (void) hxge_ldgv_uninit(hxgep);
4153
4154 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4155 }
4156
4157 /*ARGSUSED*/
4158 static void
hxge_intrs_enable(p_hxge_t hxgep)4159 hxge_intrs_enable(p_hxge_t hxgep)
4160 {
4161 p_hxge_intr_t intrp;
4162 int i;
4163 int status;
4164
4165 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4166
4167 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4168
4169 if (!intrp->intr_registered) {
4170 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4171 "interrupts are not registered"));
4172 return;
4173 }
4174
4175 if (intrp->intr_enabled) {
4176 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4177 "<== hxge_intrs_enable: already enabled"));
4178 return;
4179 }
4180
4181 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4182 status = ddi_intr_block_enable(intrp->htable,
4183 intrp->intr_added);
4184 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4185 "block enable - status 0x%x total inums #%d\n",
4186 status, intrp->intr_added));
4187 } else {
4188 for (i = 0; i < intrp->intr_added; i++) {
4189 status = ddi_intr_enable(intrp->htable[i]);
4190 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4191 "ddi_intr_enable:enable - status 0x%x "
4192 "total inums %d enable inum #%d\n",
4193 status, intrp->intr_added, i));
4194 if (status == DDI_SUCCESS) {
4195 intrp->intr_enabled = B_TRUE;
4196 }
4197 }
4198 }
4199
4200 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4201 }
4202
4203 /*ARGSUSED*/
4204 static void
hxge_intrs_disable(p_hxge_t hxgep)4205 hxge_intrs_disable(p_hxge_t hxgep)
4206 {
4207 p_hxge_intr_t intrp;
4208 int i;
4209
4210 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4211
4212 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4213
4214 if (!intrp->intr_registered) {
4215 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4216 "interrupts are not registered"));
4217 return;
4218 }
4219
4220 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4221 (void) ddi_intr_block_disable(intrp->htable,
4222 intrp->intr_added);
4223 } else {
4224 for (i = 0; i < intrp->intr_added; i++) {
4225 (void) ddi_intr_disable(intrp->htable[i]);
4226 }
4227 }
4228
4229 intrp->intr_enabled = B_FALSE;
4230 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4231 }
4232
4233 static hxge_status_t
hxge_mac_register(p_hxge_t hxgep)4234 hxge_mac_register(p_hxge_t hxgep)
4235 {
4236 mac_register_t *macp;
4237 int status;
4238
4239 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4240
4241 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4242 return (HXGE_ERROR);
4243
4244 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4245 macp->m_driver = hxgep;
4246 macp->m_dip = hxgep->dip;
4247 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4248 macp->m_callbacks = &hxge_m_callbacks;
4249 macp->m_min_sdu = 0;
4250 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4251 macp->m_margin = VLAN_TAGSZ;
4252 macp->m_priv_props = hxge_priv_props;
4253 macp->m_v12n = MAC_VIRT_LEVEL1;
4254
4255 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4256 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4257 macp->m_src_addr[0],
4258 macp->m_src_addr[1],
4259 macp->m_src_addr[2],
4260 macp->m_src_addr[3],
4261 macp->m_src_addr[4],
4262 macp->m_src_addr[5]));
4263
4264 status = mac_register(macp, &hxgep->mach);
4265 mac_free(macp);
4266
4267 if (status != 0) {
4268 cmn_err(CE_WARN,
4269 "hxge_mac_register failed (status %d instance %d)",
4270 status, hxgep->instance);
4271 return (HXGE_ERROR);
4272 }
4273
4274 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4275 "(instance %d)", hxgep->instance));
4276
4277 return (HXGE_OK);
4278 }
4279
4280 static int
hxge_init_common_dev(p_hxge_t hxgep)4281 hxge_init_common_dev(p_hxge_t hxgep)
4282 {
4283 p_hxge_hw_list_t hw_p;
4284 dev_info_t *p_dip;
4285
4286 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4287
4288 p_dip = hxgep->p_dip;
4289 MUTEX_ENTER(&hxge_common_lock);
4290
4291 /*
4292 * Loop through existing per Hydra hardware list.
4293 */
4294 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4295 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4296 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4297 hw_p, p_dip));
4298 if (hw_p->parent_devp == p_dip) {
4299 hxgep->hxge_hw_p = hw_p;
4300 hw_p->ndevs++;
4301 hw_p->hxge_p = hxgep;
4302 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4303 "==> hxge_init_common_device: "
4304 "hw_p $%p parent dip $%p ndevs %d (found)",
4305 hw_p, p_dip, hw_p->ndevs));
4306 break;
4307 }
4308 }
4309
4310 if (hw_p == NULL) {
4311 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4312 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4313 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4314 hw_p->parent_devp = p_dip;
4315 hw_p->magic = HXGE_MAGIC;
4316 hxgep->hxge_hw_p = hw_p;
4317 hw_p->ndevs++;
4318 hw_p->hxge_p = hxgep;
4319 hw_p->next = hxge_hw_list;
4320
4321 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4322 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4323 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4324
4325 hxge_hw_list = hw_p;
4326 }
4327 MUTEX_EXIT(&hxge_common_lock);
4328 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4329 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4330 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4331
4332 return (HXGE_OK);
4333 }
4334
4335 static void
hxge_uninit_common_dev(p_hxge_t hxgep)4336 hxge_uninit_common_dev(p_hxge_t hxgep)
4337 {
4338 p_hxge_hw_list_t hw_p, h_hw_p;
4339 dev_info_t *p_dip;
4340
4341 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4342 if (hxgep->hxge_hw_p == NULL) {
4343 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4344 "<== hxge_uninit_common_dev (no common)"));
4345 return;
4346 }
4347
4348 MUTEX_ENTER(&hxge_common_lock);
4349 h_hw_p = hxge_hw_list;
4350 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4351 p_dip = hw_p->parent_devp;
4352 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4353 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4354 hw_p->magic == HXGE_MAGIC) {
4355 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4356 "==> hxge_uninit_common_dev: "
4357 "hw_p $%p parent dip $%p ndevs %d (found)",
4358 hw_p, p_dip, hw_p->ndevs));
4359
4360 hxgep->hxge_hw_p = NULL;
4361 if (hw_p->ndevs) {
4362 hw_p->ndevs--;
4363 }
4364 hw_p->hxge_p = NULL;
4365 if (!hw_p->ndevs) {
4366 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4367 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4368 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4369 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4370 "==> hxge_uninit_common_dev: "
4371 "hw_p $%p parent dip $%p ndevs %d (last)",
4372 hw_p, p_dip, hw_p->ndevs));
4373
4374 if (hw_p == hxge_hw_list) {
4375 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4376 "==> hxge_uninit_common_dev:"
4377 "remove head "
4378 "hw_p $%p parent dip $%p "
4379 "ndevs %d (head)",
4380 hw_p, p_dip, hw_p->ndevs));
4381 hxge_hw_list = hw_p->next;
4382 } else {
4383 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4384 "==> hxge_uninit_common_dev:"
4385 "remove middle "
4386 "hw_p $%p parent dip $%p "
4387 "ndevs %d (middle)",
4388 hw_p, p_dip, hw_p->ndevs));
4389 h_hw_p->next = hw_p->next;
4390 }
4391
4392 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4393 }
4394 break;
4395 } else {
4396 h_hw_p = hw_p;
4397 }
4398 }
4399
4400 MUTEX_EXIT(&hxge_common_lock);
4401 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4402 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4403
4404 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4405 }
4406
4407 #define HXGE_MSIX_ENTRIES 32
4408 #define HXGE_MSIX_WAIT_COUNT 10
4409 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4410
4411 static void
hxge_link_poll(void * arg)4412 hxge_link_poll(void *arg)
4413 {
4414 p_hxge_t hxgep = (p_hxge_t)arg;
4415 hpi_handle_t handle;
4416 cip_link_stat_t link_stat;
4417 hxge_timeout *to = &hxgep->timeout;
4418
4419 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4420 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4421
4422 if (to->report_link_status ||
4423 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4424 to->link_status = link_stat.bits.xpcs0_link_up;
4425 to->report_link_status = B_FALSE;
4426
4427 if (link_stat.bits.xpcs0_link_up) {
4428 hxge_link_update(hxgep, LINK_STATE_UP);
4429 } else {
4430 hxge_link_update(hxgep, LINK_STATE_DOWN);
4431 }
4432 }
4433
4434 /* Restart the link status timer to check the link status */
4435 MUTEX_ENTER(&to->lock);
4436 to->id = timeout(hxge_link_poll, arg, to->ticks);
4437 MUTEX_EXIT(&to->lock);
4438 }
4439
4440 static void
hxge_link_update(p_hxge_t hxgep,link_state_t state)4441 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4442 {
4443 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4444
4445 mac_link_update(hxgep->mach, state);
4446 if (state == LINK_STATE_UP) {
4447 statsp->mac_stats.link_speed = 10000;
4448 statsp->mac_stats.link_duplex = 2;
4449 statsp->mac_stats.link_up = 1;
4450 } else {
4451 statsp->mac_stats.link_speed = 0;
4452 statsp->mac_stats.link_duplex = 0;
4453 statsp->mac_stats.link_up = 0;
4454 }
4455 }
4456
4457 static void
hxge_msix_init(p_hxge_t hxgep)4458 hxge_msix_init(p_hxge_t hxgep)
4459 {
4460 uint32_t data0;
4461 uint32_t data1;
4462 uint32_t data2;
4463 int i;
4464 uint32_t msix_entry0;
4465 uint32_t msix_entry1;
4466 uint32_t msix_entry2;
4467 uint32_t msix_entry3;
4468
4469 /* Change to use MSIx bar instead of indirect access */
4470 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4471 data0 = 0xffffffff - i;
4472 data1 = 0xffffffff - i - 1;
4473 data2 = 0xffffffff - i - 2;
4474
4475 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4476 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4477 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4478 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4479 }
4480
4481 /* Initialize ram data out buffer. */
4482 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4483 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4484 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4485 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4486 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4487 }
4488 }
4489
4490 /*
4491 * The following function is to support
4492 * PSARC/2007/453 MSI-X interrupt limit override.
4493 */
4494 static int
hxge_create_msi_property(p_hxge_t hxgep)4495 hxge_create_msi_property(p_hxge_t hxgep)
4496 {
4497 int nmsi;
4498 extern int ncpus;
4499
4500 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4501
4502 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4503 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4504 /*
4505 * The maximum MSI-X requested will be 8.
4506 * If the # of CPUs is less than 8, we will reqeust
4507 * # MSI-X based on the # of CPUs.
4508 */
4509 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4510 nmsi = HXGE_MSIX_REQUEST_10G;
4511 } else {
4512 nmsi = ncpus;
4513 }
4514
4515 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4516 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4517 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4518 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4519
4520 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4521 return (nmsi);
4522 }
4523