1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2012 Milan Jurik. All rights reserved.
25 */
26
27 /*
28 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
29 */
30 #include <hxge_impl.h>
31 #include <hxge_pfc.h>
32
33 /*
34 * PSARC/2007/453 MSI-X interrupt limit override
35 * (This PSARC case is limited to MSI-X vectors
36 * and SPARC platforms only).
37 */
38 uint32_t hxge_msi_enable = 2;
39
40 /*
41 * Globals: tunable parameters (/etc/system or adb)
42 *
43 */
44 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
45 uint32_t hxge_rbr_spare_size = 0;
46 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
47 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
48 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
49 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
50 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
51 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
52
53 static hxge_os_mutex_t hxgedebuglock;
54 static int hxge_debug_init = 0;
55
56 /*
57 * Debugging flags:
58 * hxge_no_tx_lb : transmit load balancing
59 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
60 * 1 - From the Stack
61 * 2 - Destination IP Address
62 */
63 uint32_t hxge_no_tx_lb = 0;
64 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
65
66 /*
67 * Tunables to manage the receive buffer blocks.
68 *
69 * hxge_rx_threshold_hi: copy all buffers.
70 * hxge_rx_bcopy_size_type: receive buffer block size type.
71 * hxge_rx_threshold_lo: copy only up to tunable block size type.
72 */
73 #if defined(__sparc)
74 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
75 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
76 #else
77 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
78 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
79 #endif
80 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
81
82 rtrace_t hpi_rtracebuf;
83
84 /*
85 * Function Prototypes
86 */
87 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
88 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
89 static void hxge_unattach(p_hxge_t);
90
91 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
92
93 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
94 static void hxge_destroy_mutexes(p_hxge_t);
95
96 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
97 static void hxge_unmap_regs(p_hxge_t hxgep);
98
99 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
100 static void hxge_remove_intrs(p_hxge_t hxgep);
101 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
102 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
103 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
104 static void hxge_intrs_enable(p_hxge_t hxgep);
105 static void hxge_intrs_disable(p_hxge_t hxgep);
106 static void hxge_suspend(p_hxge_t);
107 static hxge_status_t hxge_resume(p_hxge_t);
108 static hxge_status_t hxge_setup_dev(p_hxge_t);
109 static void hxge_destroy_dev(p_hxge_t);
110 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
111 static void hxge_free_mem_pool(p_hxge_t);
112 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
113 static void hxge_free_rx_mem_pool(p_hxge_t);
114 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
115 static void hxge_free_tx_mem_pool(p_hxge_t);
116 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
117 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
118 p_hxge_dma_common_t);
119 static void hxge_dma_mem_free(p_hxge_dma_common_t);
120 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
121 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
122 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
123 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
124 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
125 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
126 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
127 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
130 p_hxge_dma_common_t *, size_t);
131 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 static int hxge_init_common_dev(p_hxge_t);
133 static void hxge_uninit_common_dev(p_hxge_t);
134
135 /*
136 * The next declarations are for the GLDv3 interface.
137 */
138 static int hxge_m_start(void *);
139 static void hxge_m_stop(void *);
140 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
141 static int hxge_m_promisc(void *, boolean_t);
142 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
143 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
144
145 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
146 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
147 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
148 uint_t pr_valsize, const void *pr_val);
149 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
150 uint_t pr_valsize, void *pr_val);
151 static void hxge_m_propinfo(void *barg, const char *pr_name,
152 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
153 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
154 uint_t pr_valsize, const void *pr_val);
155 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
156 uint_t pr_valsize, void *pr_val);
157 static void hxge_link_poll(void *arg);
158 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
159 static void hxge_msix_init(p_hxge_t hxgep);
160
161 char *hxge_priv_props[] = {
162 "_rxdma_intr_time",
163 "_rxdma_intr_pkts",
164 "_class_opt_ipv4_tcp",
165 "_class_opt_ipv4_udp",
166 "_class_opt_ipv4_ah",
167 "_class_opt_ipv4_sctp",
168 "_class_opt_ipv6_tcp",
169 "_class_opt_ipv6_udp",
170 "_class_opt_ipv6_ah",
171 "_class_opt_ipv6_sctp",
172 NULL
173 };
174
175 #define HXGE_MAX_PRIV_PROPS \
176 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
177
178 #define HXGE_MAGIC 0x4E584745UL
179 #define MAX_DUMP_SZ 256
180
181 #define HXGE_M_CALLBACK_FLAGS \
182 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
183
184 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
185
186 static mac_callbacks_t hxge_m_callbacks = {
187 HXGE_M_CALLBACK_FLAGS,
188 hxge_m_stat,
189 hxge_m_start,
190 hxge_m_stop,
191 hxge_m_promisc,
192 hxge_m_multicst,
193 NULL,
194 NULL,
195 NULL,
196 hxge_m_ioctl,
197 hxge_m_getcapab,
198 NULL,
199 NULL,
200 hxge_m_setprop,
201 hxge_m_getprop,
202 hxge_m_propinfo
203 };
204
205 /* PSARC/2007/453 MSI-X interrupt limit override. */
206 #define HXGE_MSIX_REQUEST_10G 8
207 static int hxge_create_msi_property(p_hxge_t);
208
209 /* Enable debug messages as necessary. */
210 uint64_t hxge_debug_level = 0;
211
212 /*
213 * This list contains the instance structures for the Hydra
214 * devices present in the system. The lock exists to guarantee
215 * mutually exclusive access to the list.
216 */
217 void *hxge_list = NULL;
218 void *hxge_hw_list = NULL;
219 hxge_os_mutex_t hxge_common_lock;
220
221 extern uint64_t hpi_debug_level;
222
223 extern hxge_status_t hxge_ldgv_init(p_hxge_t, int *, int *);
224 extern hxge_status_t hxge_ldgv_uninit(p_hxge_t);
225 extern hxge_status_t hxge_intr_ldgv_init(p_hxge_t);
226 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
227 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
228 extern void hxge_fm_fini(p_hxge_t hxgep);
229
230 /*
231 * Count used to maintain the number of buffers being used
232 * by Hydra instances and loaned up to the upper layers.
233 */
234 uint32_t hxge_mblks_pending = 0;
235
236 /*
237 * Device register access attributes for PIO.
238 */
239 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
240 DDI_DEVICE_ATTR_V0,
241 DDI_STRUCTURE_LE_ACC,
242 DDI_STRICTORDER_ACC,
243 };
244
245 /*
246 * Device descriptor access attributes for DMA.
247 */
248 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
249 DDI_DEVICE_ATTR_V0,
250 DDI_STRUCTURE_LE_ACC,
251 DDI_STRICTORDER_ACC
252 };
253
254 /*
255 * Device buffer access attributes for DMA.
256 */
257 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
258 DDI_DEVICE_ATTR_V0,
259 DDI_STRUCTURE_BE_ACC,
260 DDI_STRICTORDER_ACC
261 };
262
263 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
264 DMA_ATTR_V0, /* version number. */
265 0, /* low address */
266 0xffffffffffffffff, /* high address */
267 0xffffffffffffffff, /* address counter max */
268 0x80000, /* alignment */
269 0xfc00fc, /* dlim_burstsizes */
270 0x1, /* minimum transfer size */
271 0xffffffffffffffff, /* maximum transfer size */
272 0xffffffffffffffff, /* maximum segment size */
273 1, /* scatter/gather list length */
274 (unsigned int)1, /* granularity */
275 0 /* attribute flags */
276 };
277
278 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
279 DMA_ATTR_V0, /* version number. */
280 0, /* low address */
281 0xffffffffffffffff, /* high address */
282 0xffffffffffffffff, /* address counter max */
283 0x100000, /* alignment */
284 0xfc00fc, /* dlim_burstsizes */
285 0x1, /* minimum transfer size */
286 0xffffffffffffffff, /* maximum transfer size */
287 0xffffffffffffffff, /* maximum segment size */
288 1, /* scatter/gather list length */
289 (unsigned int)1, /* granularity */
290 0 /* attribute flags */
291 };
292
293 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
294 DMA_ATTR_V0, /* version number. */
295 0, /* low address */
296 0xffffffffffffffff, /* high address */
297 0xffffffffffffffff, /* address counter max */
298 0x40000, /* alignment */
299 0xfc00fc, /* dlim_burstsizes */
300 0x1, /* minimum transfer size */
301 0xffffffffffffffff, /* maximum transfer size */
302 0xffffffffffffffff, /* maximum segment size */
303 1, /* scatter/gather list length */
304 (unsigned int)1, /* granularity */
305 0 /* attribute flags */
306 };
307
308 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
309 DMA_ATTR_V0, /* version number. */
310 0, /* low address */
311 0xffffffffffffffff, /* high address */
312 0xffffffffffffffff, /* address counter max */
313 #if defined(_BIG_ENDIAN)
314 0x2000, /* alignment */
315 #else
316 0x1000, /* alignment */
317 #endif
318 0xfc00fc, /* dlim_burstsizes */
319 0x1, /* minimum transfer size */
320 0xffffffffffffffff, /* maximum transfer size */
321 0xffffffffffffffff, /* maximum segment size */
322 5, /* scatter/gather list length */
323 (unsigned int)1, /* granularity */
324 0 /* attribute flags */
325 };
326
327 ddi_dma_attr_t hxge_tx_dma_attr = {
328 DMA_ATTR_V0, /* version number. */
329 0, /* low address */
330 0xffffffffffffffff, /* high address */
331 0xffffffffffffffff, /* address counter max */
332 #if defined(_BIG_ENDIAN)
333 0x2000, /* alignment */
334 #else
335 0x1000, /* alignment */
336 #endif
337 0xfc00fc, /* dlim_burstsizes */
338 0x1, /* minimum transfer size */
339 0xffffffffffffffff, /* maximum transfer size */
340 0xffffffffffffffff, /* maximum segment size */
341 5, /* scatter/gather list length */
342 (unsigned int)1, /* granularity */
343 0 /* attribute flags */
344 };
345
346 ddi_dma_attr_t hxge_rx_dma_attr = {
347 DMA_ATTR_V0, /* version number. */
348 0, /* low address */
349 0xffffffffffffffff, /* high address */
350 0xffffffffffffffff, /* address counter max */
351 0x10000, /* alignment */
352 0xfc00fc, /* dlim_burstsizes */
353 0x1, /* minimum transfer size */
354 0xffffffffffffffff, /* maximum transfer size */
355 0xffffffffffffffff, /* maximum segment size */
356 1, /* scatter/gather list length */
357 (unsigned int)1, /* granularity */
358 DDI_DMA_RELAXED_ORDERING /* attribute flags */
359 };
360
361 ddi_dma_lim_t hxge_dma_limits = {
362 (uint_t)0, /* dlim_addr_lo */
363 (uint_t)0xffffffff, /* dlim_addr_hi */
364 (uint_t)0xffffffff, /* dlim_cntr_max */
365 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
366 0x1, /* dlim_minxfer */
367 1024 /* dlim_speed */
368 };
369
370 dma_method_t hxge_force_dma = DVMA;
371
372 /*
373 * dma chunk sizes.
374 *
375 * Try to allocate the largest possible size
376 * so that fewer number of dma chunks would be managed
377 */
378 size_t alloc_sizes[] = {
379 0x1000, 0x2000, 0x4000, 0x8000,
380 0x10000, 0x20000, 0x40000, 0x80000,
381 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
382 };
383
384 /*
385 * Translate "dev_t" to a pointer to the associated "dev_info_t".
386 */
387 static int
hxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)388 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
389 {
390 p_hxge_t hxgep = NULL;
391 int instance;
392 int status = DDI_SUCCESS;
393 int i;
394
395 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
396
397 /*
398 * Get the device instance since we'll need to setup or retrieve a soft
399 * state for this instance.
400 */
401 instance = ddi_get_instance(dip);
402
403 switch (cmd) {
404 case DDI_ATTACH:
405 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
406 break;
407
408 case DDI_RESUME:
409 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
410 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
411 if (hxgep == NULL) {
412 status = DDI_FAILURE;
413 break;
414 }
415 if (hxgep->dip != dip) {
416 status = DDI_FAILURE;
417 break;
418 }
419 if (hxgep->suspended == DDI_PM_SUSPEND) {
420 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
421 } else {
422 (void) hxge_resume(hxgep);
423 }
424 goto hxge_attach_exit;
425
426 case DDI_PM_RESUME:
427 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
428 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
429 if (hxgep == NULL) {
430 status = DDI_FAILURE;
431 break;
432 }
433 if (hxgep->dip != dip) {
434 status = DDI_FAILURE;
435 break;
436 }
437 (void) hxge_resume(hxgep);
438 goto hxge_attach_exit;
439
440 default:
441 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
442 status = DDI_FAILURE;
443 goto hxge_attach_exit;
444 }
445
446 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
447 status = DDI_FAILURE;
448 HXGE_ERROR_MSG((hxgep, DDI_CTL,
449 "ddi_soft_state_zalloc failed"));
450 goto hxge_attach_exit;
451 }
452
453 hxgep = ddi_get_soft_state(hxge_list, instance);
454 if (hxgep == NULL) {
455 status = HXGE_ERROR;
456 HXGE_ERROR_MSG((hxgep, DDI_CTL,
457 "ddi_get_soft_state failed"));
458 goto hxge_attach_fail2;
459 }
460
461 hxgep->drv_state = 0;
462 hxgep->dip = dip;
463 hxgep->instance = instance;
464 hxgep->p_dip = ddi_get_parent(dip);
465 hxgep->hxge_debug_level = hxge_debug_level;
466 hpi_debug_level = hxge_debug_level;
467
468 /*
469 * Initialize MMAC struture.
470 */
471 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
472 hxgep->mmac.available = hxgep->mmac.total;
473 for (i = 0; i < hxgep->mmac.total; i++) {
474 hxgep->mmac.addrs[i].set = B_FALSE;
475 hxgep->mmac.addrs[i].primary = B_FALSE;
476 }
477
478 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
479 &hxge_rx_dma_attr);
480
481 status = hxge_map_regs(hxgep);
482 if (status != HXGE_OK) {
483 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
484 goto hxge_attach_fail3;
485 }
486
487 status = hxge_init_common_dev(hxgep);
488 if (status != HXGE_OK) {
489 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
490 "hxge_init_common_dev failed"));
491 goto hxge_attach_fail4;
492 }
493
494 /*
495 * Setup the Ndd parameters for this instance.
496 */
497 hxge_init_param(hxgep);
498
499 /*
500 * Setup Register Tracing Buffer.
501 */
502 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
503
504 /* init stats ptr */
505 hxge_init_statsp(hxgep);
506
507 status = hxge_setup_mutexes(hxgep);
508 if (status != HXGE_OK) {
509 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
510 goto hxge_attach_fail;
511 }
512
513 /* Scrub the MSI-X memory */
514 hxge_msix_init(hxgep);
515
516 status = hxge_get_config_properties(hxgep);
517 if (status != HXGE_OK) {
518 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
519 goto hxge_attach_fail;
520 }
521
522 /*
523 * Setup the Kstats for the driver.
524 */
525 hxge_setup_kstats(hxgep);
526 hxge_setup_param(hxgep);
527
528 status = hxge_setup_system_dma_pages(hxgep);
529 if (status != HXGE_OK) {
530 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
531 goto hxge_attach_fail;
532 }
533
534 hxge_hw_id_init(hxgep);
535 hxge_hw_init_niu_common(hxgep);
536
537 status = hxge_setup_dev(hxgep);
538 if (status != DDI_SUCCESS) {
539 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
540 goto hxge_attach_fail;
541 }
542
543 status = hxge_add_intrs(hxgep);
544 if (status != DDI_SUCCESS) {
545 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
546 goto hxge_attach_fail;
547 }
548
549 /*
550 * Enable interrupts.
551 */
552 hxge_intrs_enable(hxgep);
553
554 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
555 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
556 "unable to register to mac layer (%d)", status));
557 goto hxge_attach_fail;
558 }
559 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
560
561 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
562 instance));
563
564 goto hxge_attach_exit;
565
566 hxge_attach_fail:
567 hxge_unattach(hxgep);
568 goto hxge_attach_fail1;
569
570 hxge_attach_fail5:
571 /*
572 * Tear down the ndd parameters setup.
573 */
574 hxge_destroy_param(hxgep);
575
576 /*
577 * Tear down the kstat setup.
578 */
579 hxge_destroy_kstats(hxgep);
580
581 hxge_attach_fail4:
582 if (hxgep->hxge_hw_p) {
583 hxge_uninit_common_dev(hxgep);
584 hxgep->hxge_hw_p = NULL;
585 }
586 hxge_attach_fail3:
587 /*
588 * Unmap the register setup.
589 */
590 hxge_unmap_regs(hxgep);
591
592 hxge_fm_fini(hxgep);
593
594 hxge_attach_fail2:
595 ddi_soft_state_free(hxge_list, hxgep->instance);
596
597 hxge_attach_fail1:
598 if (status != HXGE_OK)
599 status = (HXGE_ERROR | HXGE_DDI_FAILED);
600 hxgep = NULL;
601
602 hxge_attach_exit:
603 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
604 status));
605
606 return (status);
607 }
608
609 static int
hxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)610 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
611 {
612 int status = DDI_SUCCESS;
613 int instance;
614 p_hxge_t hxgep = NULL;
615
616 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
617 instance = ddi_get_instance(dip);
618 hxgep = ddi_get_soft_state(hxge_list, instance);
619 if (hxgep == NULL) {
620 status = DDI_FAILURE;
621 goto hxge_detach_exit;
622 }
623
624 switch (cmd) {
625 case DDI_DETACH:
626 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
627 break;
628
629 case DDI_PM_SUSPEND:
630 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
631 hxgep->suspended = DDI_PM_SUSPEND;
632 hxge_suspend(hxgep);
633 break;
634
635 case DDI_SUSPEND:
636 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
637 if (hxgep->suspended != DDI_PM_SUSPEND) {
638 hxgep->suspended = DDI_SUSPEND;
639 hxge_suspend(hxgep);
640 }
641 break;
642
643 default:
644 status = DDI_FAILURE;
645 break;
646 }
647
648 if (cmd != DDI_DETACH)
649 goto hxge_detach_exit;
650
651 /*
652 * Stop the xcvr polling.
653 */
654 hxgep->suspended = cmd;
655
656 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
657 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
658 "<== hxge_detach status = 0x%08X", status));
659 return (DDI_FAILURE);
660 }
661 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
662 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
663
664 hxge_unattach(hxgep);
665 hxgep = NULL;
666
667 hxge_detach_exit:
668 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
669 status));
670
671 return (status);
672 }
673
674 static void
hxge_unattach(p_hxge_t hxgep)675 hxge_unattach(p_hxge_t hxgep)
676 {
677 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
678
679 if (hxgep == NULL || hxgep->dev_regs == NULL) {
680 return;
681 }
682
683 if (hxgep->hxge_hw_p) {
684 hxge_uninit_common_dev(hxgep);
685 hxgep->hxge_hw_p = NULL;
686 }
687
688 if (hxgep->hxge_timerid) {
689 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
690 hxgep->hxge_timerid = 0;
691 }
692
693 /* Stop interrupts. */
694 hxge_intrs_disable(hxgep);
695
696 /* Stop any further interrupts. */
697 hxge_remove_intrs(hxgep);
698
699 /* Stop the device and free resources. */
700 hxge_destroy_dev(hxgep);
701
702 /* Tear down the ndd parameters setup. */
703 hxge_destroy_param(hxgep);
704
705 /* Tear down the kstat setup. */
706 hxge_destroy_kstats(hxgep);
707
708 /*
709 * Remove the list of ndd parameters which were setup during attach.
710 */
711 if (hxgep->dip) {
712 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
713 " hxge_unattach: remove all properties"));
714 (void) ddi_prop_remove_all(hxgep->dip);
715 }
716
717 /*
718 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
719 * previous state before unmapping the registers.
720 */
721 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
722 HXGE_DELAY(1000);
723
724 /*
725 * Unmap the register setup.
726 */
727 hxge_unmap_regs(hxgep);
728
729 hxge_fm_fini(hxgep);
730
731 /* Destroy all mutexes. */
732 hxge_destroy_mutexes(hxgep);
733
734 /*
735 * Free the soft state data structures allocated with this instance.
736 */
737 ddi_soft_state_free(hxge_list, hxgep->instance);
738
739 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
740 }
741
742 static hxge_status_t
hxge_map_regs(p_hxge_t hxgep)743 hxge_map_regs(p_hxge_t hxgep)
744 {
745 int ddi_status = DDI_SUCCESS;
746 p_dev_regs_t dev_regs;
747
748 #ifdef HXGE_DEBUG
749 char *sysname;
750 #endif
751
752 off_t regsize;
753 hxge_status_t status = HXGE_OK;
754 int nregs;
755
756 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
757
758 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
759 return (HXGE_ERROR);
760
761 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
762
763 hxgep->dev_regs = NULL;
764 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
765 dev_regs->hxge_regh = NULL;
766 dev_regs->hxge_pciregh = NULL;
767 dev_regs->hxge_msix_regh = NULL;
768
769 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
770 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
771 "hxge_map_regs: pci config size 0x%x", regsize));
772
773 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
774 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
775 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
776 if (ddi_status != DDI_SUCCESS) {
777 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
778 "ddi_map_regs, hxge bus config regs failed"));
779 goto hxge_map_regs_fail0;
780 }
781
782 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
783 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
784 dev_regs->hxge_pciregp,
785 dev_regs->hxge_pciregh));
786
787 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
788 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
789 "hxge_map_regs: pio size 0x%x", regsize));
790
791 /* set up the device mapped register */
792 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
793 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
794 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
795
796 if (ddi_status != DDI_SUCCESS) {
797 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
798 "ddi_map_regs for Hydra global reg failed"));
799 goto hxge_map_regs_fail1;
800 }
801
802 /* set up the msi/msi-x mapped register */
803 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
804 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
805 "hxge_map_regs: msix size 0x%x", regsize));
806
807 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
808 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
809 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
810
811 if (ddi_status != DDI_SUCCESS) {
812 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
813 "ddi_map_regs for msi reg failed"));
814 goto hxge_map_regs_fail2;
815 }
816
817 hxgep->dev_regs = dev_regs;
818
819 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
820 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
821 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
822 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
823
824 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
825 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
826
827 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
828 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
829
830 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
831 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
832
833 goto hxge_map_regs_exit;
834
835 hxge_map_regs_fail3:
836 if (dev_regs->hxge_msix_regh) {
837 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
838 }
839
840 hxge_map_regs_fail2:
841 if (dev_regs->hxge_regh) {
842 ddi_regs_map_free(&dev_regs->hxge_regh);
843 }
844
845 hxge_map_regs_fail1:
846 if (dev_regs->hxge_pciregh) {
847 ddi_regs_map_free(&dev_regs->hxge_pciregh);
848 }
849
850 hxge_map_regs_fail0:
851 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
852 kmem_free(dev_regs, sizeof (dev_regs_t));
853
854 hxge_map_regs_exit:
855 if (ddi_status != DDI_SUCCESS)
856 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
857 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
858 return (status);
859 }
860
861 static void
hxge_unmap_regs(p_hxge_t hxgep)862 hxge_unmap_regs(p_hxge_t hxgep)
863 {
864 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
865 if (hxgep->dev_regs) {
866 if (hxgep->dev_regs->hxge_pciregh) {
867 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
868 "==> hxge_unmap_regs: bus"));
869 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
870 hxgep->dev_regs->hxge_pciregh = NULL;
871 }
872
873 if (hxgep->dev_regs->hxge_regh) {
874 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
875 "==> hxge_unmap_regs: device registers"));
876 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
877 hxgep->dev_regs->hxge_regh = NULL;
878 }
879
880 if (hxgep->dev_regs->hxge_msix_regh) {
881 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
882 "==> hxge_unmap_regs: device interrupts"));
883 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
884 hxgep->dev_regs->hxge_msix_regh = NULL;
885 }
886 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
887 hxgep->dev_regs = NULL;
888 }
889 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
890 }
891
892 static hxge_status_t
hxge_setup_mutexes(p_hxge_t hxgep)893 hxge_setup_mutexes(p_hxge_t hxgep)
894 {
895 int ddi_status = DDI_SUCCESS;
896 hxge_status_t status = HXGE_OK;
897
898 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
899
900 /*
901 * Get the interrupt cookie so the mutexes can be Initialised.
902 */
903 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
904 &hxgep->interrupt_cookie);
905
906 if (ddi_status != DDI_SUCCESS) {
907 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
908 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
909 goto hxge_setup_mutexes_exit;
910 }
911
912 /*
913 * Initialize mutex's for this device.
914 */
915 MUTEX_INIT(hxgep->genlock, NULL,
916 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
917 MUTEX_INIT(&hxgep->vmac_lock, NULL,
918 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
919 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
920 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
921 RW_INIT(&hxgep->filter_lock, NULL,
922 RW_DRIVER, (void *) hxgep->interrupt_cookie);
923 MUTEX_INIT(&hxgep->pio_lock, NULL,
924 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
925 MUTEX_INIT(&hxgep->timeout.lock, NULL,
926 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
927
928 hxge_setup_mutexes_exit:
929 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
930 "<== hxge_setup_mutexes status = %x", status));
931
932 if (ddi_status != DDI_SUCCESS)
933 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
934
935 return (status);
936 }
937
938 static void
hxge_destroy_mutexes(p_hxge_t hxgep)939 hxge_destroy_mutexes(p_hxge_t hxgep)
940 {
941 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
942 RW_DESTROY(&hxgep->filter_lock);
943 MUTEX_DESTROY(&hxgep->vmac_lock);
944 MUTEX_DESTROY(&hxgep->ouraddr_lock);
945 MUTEX_DESTROY(hxgep->genlock);
946 MUTEX_DESTROY(&hxgep->pio_lock);
947 MUTEX_DESTROY(&hxgep->timeout.lock);
948
949 if (hxge_debug_init == 1) {
950 MUTEX_DESTROY(&hxgedebuglock);
951 hxge_debug_init = 0;
952 }
953
954 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
955 }
956
957 hxge_status_t
hxge_init(p_hxge_t hxgep)958 hxge_init(p_hxge_t hxgep)
959 {
960 hxge_status_t status = HXGE_OK;
961
962 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
963
964 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
965 return (status);
966 }
967
968 /*
969 * Allocate system memory for the receive/transmit buffer blocks and
970 * receive/transmit descriptor rings.
971 */
972 status = hxge_alloc_mem_pool(hxgep);
973 if (status != HXGE_OK) {
974 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
975 goto hxge_init_fail1;
976 }
977
978 /*
979 * Initialize and enable TXDMA channels.
980 */
981 status = hxge_init_txdma_channels(hxgep);
982 if (status != HXGE_OK) {
983 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
984 goto hxge_init_fail3;
985 }
986
987 /*
988 * Initialize and enable RXDMA channels.
989 */
990 status = hxge_init_rxdma_channels(hxgep);
991 if (status != HXGE_OK) {
992 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
993 goto hxge_init_fail4;
994 }
995
996 /*
997 * Initialize TCAM
998 */
999 status = hxge_classify_init(hxgep);
1000 if (status != HXGE_OK) {
1001 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1002 goto hxge_init_fail5;
1003 }
1004
1005 /*
1006 * Initialize the VMAC block.
1007 */
1008 status = hxge_vmac_init(hxgep);
1009 if (status != HXGE_OK) {
1010 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1011 goto hxge_init_fail5;
1012 }
1013
1014 /* Bringup - this may be unnecessary when PXE and FCODE available */
1015 status = hxge_pfc_set_default_mac_addr(hxgep);
1016 if (status != HXGE_OK) {
1017 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1018 "Default Address Failure\n"));
1019 goto hxge_init_fail5;
1020 }
1021
1022 /*
1023 * Enable hardware interrupts.
1024 */
1025 hxge_intr_hw_enable(hxgep);
1026 hxgep->drv_state |= STATE_HW_INITIALIZED;
1027
1028 goto hxge_init_exit;
1029
1030 hxge_init_fail5:
1031 hxge_uninit_rxdma_channels(hxgep);
1032 hxge_init_fail4:
1033 hxge_uninit_txdma_channels(hxgep);
1034 hxge_init_fail3:
1035 hxge_free_mem_pool(hxgep);
1036 hxge_init_fail1:
1037 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1038 "<== hxge_init status (failed) = 0x%08x", status));
1039 return (status);
1040
1041 hxge_init_exit:
1042
1043 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1044 status));
1045
1046 return (status);
1047 }
1048
1049 timeout_id_t
hxge_start_timer(p_hxge_t hxgep,fptrv_t func,int msec)1050 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1051 {
1052 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1053 return (timeout(func, (caddr_t)hxgep,
1054 drv_usectohz(1000 * msec)));
1055 }
1056 return (NULL);
1057 }
1058
1059 /*ARGSUSED*/
1060 void
hxge_stop_timer(p_hxge_t hxgep,timeout_id_t timerid)1061 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1062 {
1063 if (timerid) {
1064 (void) untimeout(timerid);
1065 }
1066 }
1067
1068 void
hxge_uninit(p_hxge_t hxgep)1069 hxge_uninit(p_hxge_t hxgep)
1070 {
1071 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1072
1073 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1074 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1075 "==> hxge_uninit: not initialized"));
1076 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1077 return;
1078 }
1079
1080 /* Stop timer */
1081 if (hxgep->hxge_timerid) {
1082 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1083 hxgep->hxge_timerid = 0;
1084 }
1085
1086 (void) hxge_intr_hw_disable(hxgep);
1087
1088 /* Reset the receive VMAC side. */
1089 (void) hxge_rx_vmac_disable(hxgep);
1090
1091 /* Free classification resources */
1092 (void) hxge_classify_uninit(hxgep);
1093
1094 /* Reset the transmit/receive DMA side. */
1095 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1096 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1097
1098 hxge_uninit_txdma_channels(hxgep);
1099 hxge_uninit_rxdma_channels(hxgep);
1100
1101 /* Reset the transmit VMAC side. */
1102 (void) hxge_tx_vmac_disable(hxgep);
1103
1104 hxge_free_mem_pool(hxgep);
1105
1106 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1107
1108 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1109 }
1110
1111 /*ARGSUSED*/
1112 /*VARARGS*/
1113 void
hxge_debug_msg(p_hxge_t hxgep,uint64_t level,char * fmt,...)1114 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1115 {
1116 char msg_buffer[1048];
1117 char prefix_buffer[32];
1118 int instance;
1119 uint64_t debug_level;
1120 int cmn_level = CE_CONT;
1121 va_list ap;
1122
1123 debug_level = (hxgep == NULL) ? hxge_debug_level :
1124 hxgep->hxge_debug_level;
1125
1126 if ((level & debug_level) || (level == HXGE_NOTE) ||
1127 (level == HXGE_ERR_CTL)) {
1128 /* do the msg processing */
1129 if (hxge_debug_init == 0) {
1130 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1131 hxge_debug_init = 1;
1132 }
1133
1134 MUTEX_ENTER(&hxgedebuglock);
1135
1136 if ((level & HXGE_NOTE)) {
1137 cmn_level = CE_NOTE;
1138 }
1139
1140 if (level & HXGE_ERR_CTL) {
1141 cmn_level = CE_WARN;
1142 }
1143
1144 va_start(ap, fmt);
1145 (void) vsprintf(msg_buffer, fmt, ap);
1146 va_end(ap);
1147
1148 if (hxgep == NULL) {
1149 instance = -1;
1150 (void) sprintf(prefix_buffer, "%s :", "hxge");
1151 } else {
1152 instance = hxgep->instance;
1153 (void) sprintf(prefix_buffer,
1154 "%s%d :", "hxge", instance);
1155 }
1156
1157 MUTEX_EXIT(&hxgedebuglock);
1158 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1159 }
1160 }
1161
1162 char *
hxge_dump_packet(char * addr,int size)1163 hxge_dump_packet(char *addr, int size)
1164 {
1165 uchar_t *ap = (uchar_t *)addr;
1166 int i;
1167 static char etherbuf[1024];
1168 char *cp = etherbuf;
1169 char digits[] = "0123456789abcdef";
1170
1171 if (!size)
1172 size = 60;
1173
1174 if (size > MAX_DUMP_SZ) {
1175 /* Dump the leading bytes */
1176 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1177 if (*ap > 0x0f)
1178 *cp++ = digits[*ap >> 4];
1179 *cp++ = digits[*ap++ & 0xf];
1180 *cp++ = ':';
1181 }
1182 for (i = 0; i < 20; i++)
1183 *cp++ = '.';
1184 /* Dump the last MAX_DUMP_SZ/2 bytes */
1185 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1186 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1187 if (*ap > 0x0f)
1188 *cp++ = digits[*ap >> 4];
1189 *cp++ = digits[*ap++ & 0xf];
1190 *cp++ = ':';
1191 }
1192 } else {
1193 for (i = 0; i < size; i++) {
1194 if (*ap > 0x0f)
1195 *cp++ = digits[*ap >> 4];
1196 *cp++ = digits[*ap++ & 0xf];
1197 *cp++ = ':';
1198 }
1199 }
1200 *--cp = 0;
1201 return (etherbuf);
1202 }
1203
1204 static void
hxge_suspend(p_hxge_t hxgep)1205 hxge_suspend(p_hxge_t hxgep)
1206 {
1207 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1208
1209 /*
1210 * Stop the link status timer before hxge_intrs_disable() to avoid
1211 * accessing the the MSIX table simultaneously. Note that the timer
1212 * routine polls for MSIX parity errors.
1213 */
1214 MUTEX_ENTER(&hxgep->timeout.lock);
1215 if (hxgep->timeout.id)
1216 (void) untimeout(hxgep->timeout.id);
1217 MUTEX_EXIT(&hxgep->timeout.lock);
1218
1219 hxge_intrs_disable(hxgep);
1220 hxge_destroy_dev(hxgep);
1221
1222 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1223 }
1224
1225 static hxge_status_t
hxge_resume(p_hxge_t hxgep)1226 hxge_resume(p_hxge_t hxgep)
1227 {
1228 hxge_status_t status = HXGE_OK;
1229
1230 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1231 hxgep->suspended = DDI_RESUME;
1232
1233 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1234 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1235
1236 (void) hxge_rx_vmac_enable(hxgep);
1237 (void) hxge_tx_vmac_enable(hxgep);
1238
1239 hxge_intrs_enable(hxgep);
1240
1241 hxgep->suspended = 0;
1242
1243 /*
1244 * Resume the link status timer after hxge_intrs_enable to avoid
1245 * accessing MSIX table simultaneously.
1246 */
1247 MUTEX_ENTER(&hxgep->timeout.lock);
1248 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1249 hxgep->timeout.ticks);
1250 MUTEX_EXIT(&hxgep->timeout.lock);
1251
1252 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1253 "<== hxge_resume status = 0x%x", status));
1254
1255 return (status);
1256 }
1257
1258 static hxge_status_t
hxge_setup_dev(p_hxge_t hxgep)1259 hxge_setup_dev(p_hxge_t hxgep)
1260 {
1261 hxge_status_t status = HXGE_OK;
1262
1263 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1264
1265 status = hxge_link_init(hxgep);
1266 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1267 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1268 "Bad register acc handle"));
1269 status = HXGE_ERROR;
1270 }
1271
1272 if (status != HXGE_OK) {
1273 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1274 " hxge_setup_dev status (link init 0x%08x)", status));
1275 goto hxge_setup_dev_exit;
1276 }
1277
1278 hxge_setup_dev_exit:
1279 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1280 "<== hxge_setup_dev status = 0x%08x", status));
1281
1282 return (status);
1283 }
1284
1285 static void
hxge_destroy_dev(p_hxge_t hxgep)1286 hxge_destroy_dev(p_hxge_t hxgep)
1287 {
1288 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1289
1290 (void) hxge_hw_stop(hxgep);
1291
1292 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1293 }
1294
1295 static hxge_status_t
hxge_setup_system_dma_pages(p_hxge_t hxgep)1296 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1297 {
1298 int ddi_status = DDI_SUCCESS;
1299 uint_t count;
1300 ddi_dma_cookie_t cookie;
1301 uint_t iommu_pagesize;
1302 hxge_status_t status = HXGE_OK;
1303
1304 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1305
1306 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1307 iommu_pagesize = dvma_pagesize(hxgep->dip);
1308
1309 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1310 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1311 " default_block_size %d iommu_pagesize %d",
1312 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1313 hxgep->rx_default_block_size, iommu_pagesize));
1314
1315 if (iommu_pagesize != 0) {
1316 if (hxgep->sys_page_sz == iommu_pagesize) {
1317 /* Hydra support up to 8K pages */
1318 if (iommu_pagesize > 0x2000)
1319 hxgep->sys_page_sz = 0x2000;
1320 } else {
1321 if (hxgep->sys_page_sz > iommu_pagesize)
1322 hxgep->sys_page_sz = iommu_pagesize;
1323 }
1324 }
1325
1326 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1327
1328 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1329 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1330 "default_block_size %d page mask %d",
1331 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1332 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1333
1334 switch (hxgep->sys_page_sz) {
1335 default:
1336 hxgep->sys_page_sz = 0x1000;
1337 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1338 hxgep->rx_default_block_size = 0x1000;
1339 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1340 break;
1341 case 0x1000:
1342 hxgep->rx_default_block_size = 0x1000;
1343 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1344 break;
1345 case 0x2000:
1346 hxgep->rx_default_block_size = 0x2000;
1347 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1348 break;
1349 }
1350
1351 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1352 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1353
1354 /*
1355 * Get the system DMA burst size.
1356 */
1357 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1358 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1359 if (ddi_status != DDI_SUCCESS) {
1360 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1361 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1362 goto hxge_get_soft_properties_exit;
1363 }
1364
1365 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1366 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1367 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1368 &cookie, &count);
1369 if (ddi_status != DDI_DMA_MAPPED) {
1370 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1371 "Binding spare handle to find system burstsize failed."));
1372 ddi_status = DDI_FAILURE;
1373 goto hxge_get_soft_properties_fail1;
1374 }
1375
1376 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1377 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1378
1379 hxge_get_soft_properties_fail1:
1380 ddi_dma_free_handle(&hxgep->dmasparehandle);
1381
1382 hxge_get_soft_properties_exit:
1383
1384 if (ddi_status != DDI_SUCCESS)
1385 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1386
1387 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1388 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1389
1390 return (status);
1391 }
1392
1393 static hxge_status_t
hxge_alloc_mem_pool(p_hxge_t hxgep)1394 hxge_alloc_mem_pool(p_hxge_t hxgep)
1395 {
1396 hxge_status_t status = HXGE_OK;
1397
1398 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1399
1400 status = hxge_alloc_rx_mem_pool(hxgep);
1401 if (status != HXGE_OK) {
1402 return (HXGE_ERROR);
1403 }
1404
1405 status = hxge_alloc_tx_mem_pool(hxgep);
1406 if (status != HXGE_OK) {
1407 hxge_free_rx_mem_pool(hxgep);
1408 return (HXGE_ERROR);
1409 }
1410
1411 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1412 return (HXGE_OK);
1413 }
1414
1415 static void
hxge_free_mem_pool(p_hxge_t hxgep)1416 hxge_free_mem_pool(p_hxge_t hxgep)
1417 {
1418 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1419
1420 hxge_free_rx_mem_pool(hxgep);
1421 hxge_free_tx_mem_pool(hxgep);
1422
1423 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1424 }
1425
1426 static hxge_status_t
hxge_alloc_rx_mem_pool(p_hxge_t hxgep)1427 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1428 {
1429 int i, j;
1430 uint32_t ndmas, st_rdc;
1431 p_hxge_dma_pt_cfg_t p_all_cfgp;
1432 p_hxge_hw_pt_cfg_t p_cfgp;
1433 p_hxge_dma_pool_t dma_poolp;
1434 p_hxge_dma_common_t *dma_buf_p;
1435 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1436 p_hxge_dma_common_t *dma_rbr_cntl_p;
1437 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1438 p_hxge_dma_common_t *dma_rcr_cntl_p;
1439 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1440 p_hxge_dma_common_t *dma_mbox_cntl_p;
1441 size_t rx_buf_alloc_size;
1442 size_t rx_rbr_cntl_alloc_size;
1443 size_t rx_rcr_cntl_alloc_size;
1444 size_t rx_mbox_cntl_alloc_size;
1445 uint32_t *num_chunks; /* per dma */
1446 hxge_status_t status = HXGE_OK;
1447
1448 uint32_t hxge_port_rbr_size;
1449 uint32_t hxge_port_rbr_spare_size;
1450 uint32_t hxge_port_rcr_size;
1451
1452 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1453
1454 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1455 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1456 st_rdc = p_cfgp->start_rdc;
1457 ndmas = p_cfgp->max_rdcs;
1458
1459 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1460 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1461
1462 /*
1463 * Allocate memory for each receive DMA channel.
1464 */
1465 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1466 KM_SLEEP);
1467 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1468 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1469
1470 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1471 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1472 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1473 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1474 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1475 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1476 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1477 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1478 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1479 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1480 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1481 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1482
1483 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1484 KM_SLEEP);
1485
1486 /*
1487 * Assume that each DMA channel will be configured with default block
1488 * size. rbr block counts are mod of batch count (16).
1489 */
1490 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1491 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1492
1493 if (!hxge_port_rbr_size) {
1494 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1495 }
1496
1497 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1498 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1499 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1500 }
1501
1502 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1503 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1504
1505 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1506 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1507 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1508 }
1509
1510 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1511 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1512
1513 /*
1514 * Addresses of receive block ring, receive completion ring and the
1515 * mailbox must be all cache-aligned (64 bytes).
1516 */
1517 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1518 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1519 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1520 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1521
1522 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1523 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1524 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1525 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1526 hxge_port_rcr_size, rx_cntl_alloc_size));
1527
1528 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1529 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1530
1531 /*
1532 * Allocate memory for receive buffers and descriptor rings. Replace
1533 * allocation functions with interface functions provided by the
1534 * partition manager when it is available.
1535 */
1536 /*
1537 * Allocate memory for the receive buffer blocks.
1538 */
1539 for (i = 0; i < ndmas; i++) {
1540 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1541 " hxge_alloc_rx_mem_pool to alloc mem: "
1542 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1543 i, dma_buf_p[i], &dma_buf_p[i]));
1544
1545 num_chunks[i] = 0;
1546
1547 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1548 rx_buf_alloc_size, hxgep->rx_default_block_size,
1549 &num_chunks[i]);
1550 if (status != HXGE_OK) {
1551 break;
1552 }
1553
1554 st_rdc++;
1555 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1556 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1557 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1558 dma_buf_p[i], &dma_buf_p[i]));
1559 }
1560
1561 if (i < ndmas) {
1562 goto hxge_alloc_rx_mem_fail1;
1563 }
1564
1565 /*
1566 * Allocate memory for descriptor rings and mailbox.
1567 */
1568 st_rdc = p_cfgp->start_rdc;
1569 for (j = 0; j < ndmas; j++) {
1570 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1571 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1572 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1573 break;
1574 }
1575
1576 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1577 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1578 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1579 break;
1580 }
1581
1582 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1583 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1584 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1585 break;
1586 }
1587 st_rdc++;
1588 }
1589
1590 if (j < ndmas) {
1591 goto hxge_alloc_rx_mem_fail2;
1592 }
1593
1594 dma_poolp->ndmas = ndmas;
1595 dma_poolp->num_chunks = num_chunks;
1596 dma_poolp->buf_allocated = B_TRUE;
1597 hxgep->rx_buf_pool_p = dma_poolp;
1598 dma_poolp->dma_buf_pool_p = dma_buf_p;
1599
1600 dma_rbr_cntl_poolp->ndmas = ndmas;
1601 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1602 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1603 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1604
1605 dma_rcr_cntl_poolp->ndmas = ndmas;
1606 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1607 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1608 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1609
1610 dma_mbox_cntl_poolp->ndmas = ndmas;
1611 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1612 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1613 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1614
1615 goto hxge_alloc_rx_mem_pool_exit;
1616
1617 hxge_alloc_rx_mem_fail2:
1618 /* Free control buffers */
1619 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1620 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1621 for (; j >= 0; j--) {
1622 hxge_free_rx_cntl_dma(hxgep,
1623 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1624 hxge_free_rx_cntl_dma(hxgep,
1625 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1626 hxge_free_rx_cntl_dma(hxgep,
1627 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1628 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1629 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1630 }
1631 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1632 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1633
1634 hxge_alloc_rx_mem_fail1:
1635 /* Free data buffers */
1636 i--;
1637 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1638 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1639 for (; i >= 0; i--) {
1640 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1641 num_chunks[i]);
1642 }
1643 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1644 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1645
1646 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1647 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1648 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1649 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1650 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1651 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1652 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1653 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1654 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1655
1656 hxge_alloc_rx_mem_pool_exit:
1657 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1658 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1659
1660 return (status);
1661 }
1662
1663 static void
hxge_free_rx_mem_pool(p_hxge_t hxgep)1664 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1665 {
1666 uint32_t i, ndmas;
1667 p_hxge_dma_pool_t dma_poolp;
1668 p_hxge_dma_common_t *dma_buf_p;
1669 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1670 p_hxge_dma_common_t *dma_rbr_cntl_p;
1671 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1672 p_hxge_dma_common_t *dma_rcr_cntl_p;
1673 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1674 p_hxge_dma_common_t *dma_mbox_cntl_p;
1675 uint32_t *num_chunks;
1676
1677 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1678
1679 dma_poolp = hxgep->rx_buf_pool_p;
1680 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1681 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1682 "(null rx buf pool or buf not allocated"));
1683 return;
1684 }
1685
1686 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1687 if (dma_rbr_cntl_poolp == NULL ||
1688 (!dma_rbr_cntl_poolp->buf_allocated)) {
1689 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1690 "<== hxge_free_rx_mem_pool "
1691 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1692 return;
1693 }
1694
1695 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1696 if (dma_rcr_cntl_poolp == NULL ||
1697 (!dma_rcr_cntl_poolp->buf_allocated)) {
1698 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1699 "<== hxge_free_rx_mem_pool "
1700 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1701 return;
1702 }
1703
1704 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1705 if (dma_mbox_cntl_poolp == NULL ||
1706 (!dma_mbox_cntl_poolp->buf_allocated)) {
1707 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1708 "<== hxge_free_rx_mem_pool "
1709 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1710 return;
1711 }
1712
1713 dma_buf_p = dma_poolp->dma_buf_pool_p;
1714 num_chunks = dma_poolp->num_chunks;
1715
1716 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1717 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1718 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1719 ndmas = dma_rbr_cntl_poolp->ndmas;
1720
1721 for (i = 0; i < ndmas; i++) {
1722 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1723 }
1724
1725 for (i = 0; i < ndmas; i++) {
1726 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1727 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1728 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1729 }
1730
1731 for (i = 0; i < ndmas; i++) {
1732 KMEM_FREE(dma_buf_p[i],
1733 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1734 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1735 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1736 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1737 }
1738
1739 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1740 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1741 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1742 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1743 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1744 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1745 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1746 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1747 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1748
1749 hxgep->rx_buf_pool_p = NULL;
1750 hxgep->rx_rbr_cntl_pool_p = NULL;
1751 hxgep->rx_rcr_cntl_pool_p = NULL;
1752 hxgep->rx_mbox_cntl_pool_p = NULL;
1753
1754 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1755 }
1756
1757 static hxge_status_t
hxge_alloc_rx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)1758 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1759 p_hxge_dma_common_t *dmap,
1760 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1761 {
1762 p_hxge_dma_common_t rx_dmap;
1763 hxge_status_t status = HXGE_OK;
1764 size_t total_alloc_size;
1765 size_t allocated = 0;
1766 int i, size_index, array_size;
1767
1768 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1769
1770 rx_dmap = (p_hxge_dma_common_t)
1771 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1772
1773 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1774 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1775 dma_channel, alloc_size, block_size, dmap));
1776
1777 total_alloc_size = alloc_size;
1778
1779 i = 0;
1780 size_index = 0;
1781 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1782 while ((size_index < array_size) &&
1783 (alloc_sizes[size_index] < alloc_size))
1784 size_index++;
1785 if (size_index >= array_size) {
1786 size_index = array_size - 1;
1787 }
1788
1789 while ((allocated < total_alloc_size) &&
1790 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1791 rx_dmap[i].dma_chunk_index = i;
1792 rx_dmap[i].block_size = block_size;
1793 rx_dmap[i].alength = alloc_sizes[size_index];
1794 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1795 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1796 rx_dmap[i].dma_channel = dma_channel;
1797 rx_dmap[i].contig_alloc_type = B_FALSE;
1798
1799 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1800 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1801 "i %d nblocks %d alength %d",
1802 dma_channel, i, &rx_dmap[i], block_size,
1803 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1804 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1805 &hxge_rx_dma_attr, rx_dmap[i].alength,
1806 &hxge_dev_buf_dma_acc_attr,
1807 DDI_DMA_READ | DDI_DMA_STREAMING,
1808 (p_hxge_dma_common_t)(&rx_dmap[i]));
1809 if (status != HXGE_OK) {
1810 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1811 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1812 " for size: %d", alloc_sizes[size_index]));
1813 size_index--;
1814 } else {
1815 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1816 " alloc_rx_buf_dma allocated rdc %d "
1817 "chunk %d size %x dvma %x bufp %llx ",
1818 dma_channel, i, rx_dmap[i].alength,
1819 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1820 i++;
1821 allocated += alloc_sizes[size_index];
1822 }
1823 }
1824
1825 if (allocated < total_alloc_size) {
1826 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1827 " hxge_alloc_rx_buf_dma failed due to"
1828 " allocated(%d) < required(%d)",
1829 allocated, total_alloc_size));
1830 goto hxge_alloc_rx_mem_fail1;
1831 }
1832
1833 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1834 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1835
1836 *num_chunks = i;
1837 *dmap = rx_dmap;
1838
1839 goto hxge_alloc_rx_mem_exit;
1840
1841 hxge_alloc_rx_mem_fail1:
1842 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1843
1844 hxge_alloc_rx_mem_exit:
1845 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1846 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1847
1848 return (status);
1849 }
1850
1851 /*ARGSUSED*/
1852 static void
hxge_free_rx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)1853 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1854 uint32_t num_chunks)
1855 {
1856 int i;
1857
1858 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1859 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1860
1861 for (i = 0; i < num_chunks; i++) {
1862 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1863 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1864 hxge_dma_mem_free(dmap++);
1865 }
1866
1867 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1868 }
1869
1870 /*ARGSUSED*/
1871 static hxge_status_t
hxge_alloc_rx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,struct ddi_dma_attr * attr,size_t size)1872 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1873 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1874 {
1875 p_hxge_dma_common_t rx_dmap;
1876 hxge_status_t status = HXGE_OK;
1877
1878 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1879
1880 rx_dmap = (p_hxge_dma_common_t)
1881 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1882
1883 rx_dmap->contig_alloc_type = B_FALSE;
1884
1885 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1886 attr, size, &hxge_dev_desc_dma_acc_attr,
1887 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1888 if (status != HXGE_OK) {
1889 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1890 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1891 " for size: %d", size));
1892 goto hxge_alloc_rx_cntl_dma_fail1;
1893 }
1894
1895 *dmap = rx_dmap;
1896
1897 goto hxge_alloc_rx_cntl_dma_exit;
1898
1899 hxge_alloc_rx_cntl_dma_fail1:
1900 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1901
1902 hxge_alloc_rx_cntl_dma_exit:
1903 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1904 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1905
1906 return (status);
1907 }
1908
1909 /*ARGSUSED*/
1910 static void
hxge_free_rx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)1911 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1912 {
1913 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1914
1915 hxge_dma_mem_free(dmap);
1916
1917 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1918 }
1919
1920 static hxge_status_t
hxge_alloc_tx_mem_pool(p_hxge_t hxgep)1921 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1922 {
1923 hxge_status_t status = HXGE_OK;
1924 int i, j;
1925 uint32_t ndmas, st_tdc;
1926 p_hxge_dma_pt_cfg_t p_all_cfgp;
1927 p_hxge_hw_pt_cfg_t p_cfgp;
1928 p_hxge_dma_pool_t dma_poolp;
1929 p_hxge_dma_common_t *dma_buf_p;
1930 p_hxge_dma_pool_t dma_cntl_poolp;
1931 p_hxge_dma_common_t *dma_cntl_p;
1932 size_t tx_buf_alloc_size;
1933 size_t tx_cntl_alloc_size;
1934 uint32_t *num_chunks; /* per dma */
1935
1936 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1937
1938 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1939 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1940 st_tdc = p_cfgp->start_tdc;
1941 ndmas = p_cfgp->max_tdcs;
1942
1943 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1944 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1945 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1946 /*
1947 * Allocate memory for each transmit DMA channel.
1948 */
1949 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1950 KM_SLEEP);
1951 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1952 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1953
1954 dma_cntl_poolp = (p_hxge_dma_pool_t)
1955 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1956 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1957 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1958
1959 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1960
1961 /*
1962 * Assume that each DMA channel will be configured with default
1963 * transmit bufer size for copying transmit data. (For packet payload
1964 * over this limit, packets will not be copied.)
1965 */
1966 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1967
1968 /*
1969 * Addresses of transmit descriptor ring and the mailbox must be all
1970 * cache-aligned (64 bytes).
1971 */
1972 tx_cntl_alloc_size = hxge_tx_ring_size;
1973 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1974 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1975
1976 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1977 KM_SLEEP);
1978
1979 /*
1980 * Allocate memory for transmit buffers and descriptor rings. Replace
1981 * allocation functions with interface functions provided by the
1982 * partition manager when it is available.
1983 *
1984 * Allocate memory for the transmit buffer pool.
1985 */
1986 for (i = 0; i < ndmas; i++) {
1987 num_chunks[i] = 0;
1988 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1989 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1990 if (status != HXGE_OK) {
1991 break;
1992 }
1993 st_tdc++;
1994 }
1995
1996 if (i < ndmas) {
1997 goto hxge_alloc_tx_mem_pool_fail1;
1998 }
1999
2000 st_tdc = p_cfgp->start_tdc;
2001
2002 /*
2003 * Allocate memory for descriptor rings and mailbox.
2004 */
2005 for (j = 0; j < ndmas; j++) {
2006 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2007 tx_cntl_alloc_size);
2008 if (status != HXGE_OK) {
2009 break;
2010 }
2011 st_tdc++;
2012 }
2013
2014 if (j < ndmas) {
2015 goto hxge_alloc_tx_mem_pool_fail2;
2016 }
2017
2018 dma_poolp->ndmas = ndmas;
2019 dma_poolp->num_chunks = num_chunks;
2020 dma_poolp->buf_allocated = B_TRUE;
2021 dma_poolp->dma_buf_pool_p = dma_buf_p;
2022 hxgep->tx_buf_pool_p = dma_poolp;
2023
2024 dma_cntl_poolp->ndmas = ndmas;
2025 dma_cntl_poolp->buf_allocated = B_TRUE;
2026 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2027 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2028
2029 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2030 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2031 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2032
2033 goto hxge_alloc_tx_mem_pool_exit;
2034
2035 hxge_alloc_tx_mem_pool_fail2:
2036 /* Free control buffers */
2037 j--;
2038 for (; j >= 0; j--) {
2039 hxge_free_tx_cntl_dma(hxgep,
2040 (p_hxge_dma_common_t)dma_cntl_p[j]);
2041 }
2042
2043 hxge_alloc_tx_mem_pool_fail1:
2044 /* Free data buffers */
2045 i--;
2046 for (; i >= 0; i--) {
2047 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2048 num_chunks[i]);
2049 }
2050
2051 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2052 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2053 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2054 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2055 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2056
2057 hxge_alloc_tx_mem_pool_exit:
2058 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2059 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2060
2061 return (status);
2062 }
2063
2064 static hxge_status_t
hxge_alloc_tx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2065 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2066 p_hxge_dma_common_t *dmap, size_t alloc_size,
2067 size_t block_size, uint32_t *num_chunks)
2068 {
2069 p_hxge_dma_common_t tx_dmap;
2070 hxge_status_t status = HXGE_OK;
2071 size_t total_alloc_size;
2072 size_t allocated = 0;
2073 int i, size_index, array_size;
2074
2075 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2076
2077 tx_dmap = (p_hxge_dma_common_t)
2078 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2079
2080 total_alloc_size = alloc_size;
2081 i = 0;
2082 size_index = 0;
2083 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2084 while ((size_index < array_size) &&
2085 (alloc_sizes[size_index] < alloc_size))
2086 size_index++;
2087 if (size_index >= array_size) {
2088 size_index = array_size - 1;
2089 }
2090
2091 while ((allocated < total_alloc_size) &&
2092 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2093 tx_dmap[i].dma_chunk_index = i;
2094 tx_dmap[i].block_size = block_size;
2095 tx_dmap[i].alength = alloc_sizes[size_index];
2096 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2097 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2098 tx_dmap[i].dma_channel = dma_channel;
2099 tx_dmap[i].contig_alloc_type = B_FALSE;
2100
2101 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2102 &hxge_tx_dma_attr, tx_dmap[i].alength,
2103 &hxge_dev_buf_dma_acc_attr,
2104 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2105 (p_hxge_dma_common_t)(&tx_dmap[i]));
2106 if (status != HXGE_OK) {
2107 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2108 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2109 " for size: %d", alloc_sizes[size_index]));
2110 size_index--;
2111 } else {
2112 i++;
2113 allocated += alloc_sizes[size_index];
2114 }
2115 }
2116
2117 if (allocated < total_alloc_size) {
2118 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2119 " hxge_alloc_tx_buf_dma: failed due to"
2120 " allocated(%d) < required(%d)",
2121 allocated, total_alloc_size));
2122 goto hxge_alloc_tx_mem_fail1;
2123 }
2124
2125 *num_chunks = i;
2126 *dmap = tx_dmap;
2127 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2128 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2129 *dmap, i));
2130 goto hxge_alloc_tx_mem_exit;
2131
2132 hxge_alloc_tx_mem_fail1:
2133 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2134
2135 hxge_alloc_tx_mem_exit:
2136 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2137 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2138
2139 return (status);
2140 }
2141
2142 /*ARGSUSED*/
2143 static void
hxge_free_tx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)2144 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2145 uint32_t num_chunks)
2146 {
2147 int i;
2148
2149 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2150
2151 for (i = 0; i < num_chunks; i++) {
2152 hxge_dma_mem_free(dmap++);
2153 }
2154
2155 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2156 }
2157
2158 /*ARGSUSED*/
2159 static hxge_status_t
hxge_alloc_tx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t size)2160 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2161 p_hxge_dma_common_t *dmap, size_t size)
2162 {
2163 p_hxge_dma_common_t tx_dmap;
2164 hxge_status_t status = HXGE_OK;
2165
2166 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2167
2168 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2169 KM_SLEEP);
2170
2171 tx_dmap->contig_alloc_type = B_FALSE;
2172
2173 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2174 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2175 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2176 if (status != HXGE_OK) {
2177 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2178 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2179 " for size: %d", size));
2180 goto hxge_alloc_tx_cntl_dma_fail1;
2181 }
2182
2183 *dmap = tx_dmap;
2184
2185 goto hxge_alloc_tx_cntl_dma_exit;
2186
2187 hxge_alloc_tx_cntl_dma_fail1:
2188 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2189
2190 hxge_alloc_tx_cntl_dma_exit:
2191 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2192 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2193
2194 return (status);
2195 }
2196
2197 /*ARGSUSED*/
2198 static void
hxge_free_tx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)2199 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2200 {
2201 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2202
2203 hxge_dma_mem_free(dmap);
2204
2205 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2206 }
2207
2208 static void
hxge_free_tx_mem_pool(p_hxge_t hxgep)2209 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2210 {
2211 uint32_t i, ndmas;
2212 p_hxge_dma_pool_t dma_poolp;
2213 p_hxge_dma_common_t *dma_buf_p;
2214 p_hxge_dma_pool_t dma_cntl_poolp;
2215 p_hxge_dma_common_t *dma_cntl_p;
2216 uint32_t *num_chunks;
2217
2218 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2219
2220 dma_poolp = hxgep->tx_buf_pool_p;
2221 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2222 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2223 "<== hxge_free_tx_mem_pool "
2224 "(null rx buf pool or buf not allocated"));
2225 return;
2226 }
2227
2228 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2229 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2230 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2231 "<== hxge_free_tx_mem_pool "
2232 "(null tx cntl buf pool or cntl buf not allocated"));
2233 return;
2234 }
2235
2236 dma_buf_p = dma_poolp->dma_buf_pool_p;
2237 num_chunks = dma_poolp->num_chunks;
2238
2239 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2240 ndmas = dma_cntl_poolp->ndmas;
2241
2242 for (i = 0; i < ndmas; i++) {
2243 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2244 }
2245
2246 for (i = 0; i < ndmas; i++) {
2247 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2248 }
2249
2250 for (i = 0; i < ndmas; i++) {
2251 KMEM_FREE(dma_buf_p[i],
2252 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2253 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2254 }
2255
2256 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2257 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2258 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2259 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2260 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2261
2262 hxgep->tx_buf_pool_p = NULL;
2263 hxgep->tx_cntl_pool_p = NULL;
2264
2265 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2266 }
2267
2268 /*ARGSUSED*/
2269 static hxge_status_t
hxge_dma_mem_alloc(p_hxge_t hxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_hxge_dma_common_t dma_p)2270 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2271 struct ddi_dma_attr *dma_attrp,
2272 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2273 p_hxge_dma_common_t dma_p)
2274 {
2275 caddr_t kaddrp;
2276 int ddi_status = DDI_SUCCESS;
2277
2278 dma_p->dma_handle = NULL;
2279 dma_p->acc_handle = NULL;
2280 dma_p->kaddrp = NULL;
2281
2282 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2283 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2284 if (ddi_status != DDI_SUCCESS) {
2285 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2286 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2287 return (HXGE_ERROR | HXGE_DDI_FAILED);
2288 }
2289
2290 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2291 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2292 &dma_p->acc_handle);
2293 if (ddi_status != DDI_SUCCESS) {
2294 /* The caller will decide whether it is fatal */
2295 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2296 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2297 ddi_dma_free_handle(&dma_p->dma_handle);
2298 dma_p->dma_handle = NULL;
2299 return (HXGE_ERROR | HXGE_DDI_FAILED);
2300 }
2301
2302 if (dma_p->alength < length) {
2303 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2304 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2305 ddi_dma_mem_free(&dma_p->acc_handle);
2306 ddi_dma_free_handle(&dma_p->dma_handle);
2307 dma_p->acc_handle = NULL;
2308 dma_p->dma_handle = NULL;
2309 return (HXGE_ERROR);
2310 }
2311
2312 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2313 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2314 &dma_p->dma_cookie, &dma_p->ncookies);
2315 if (ddi_status != DDI_DMA_MAPPED) {
2316 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2317 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2318 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2319 if (dma_p->acc_handle) {
2320 ddi_dma_mem_free(&dma_p->acc_handle);
2321 dma_p->acc_handle = NULL;
2322 }
2323 ddi_dma_free_handle(&dma_p->dma_handle);
2324 dma_p->dma_handle = NULL;
2325 return (HXGE_ERROR | HXGE_DDI_FAILED);
2326 }
2327
2328 if (dma_p->ncookies != 1) {
2329 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2330 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2331 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2332 if (dma_p->acc_handle) {
2333 ddi_dma_mem_free(&dma_p->acc_handle);
2334 dma_p->acc_handle = NULL;
2335 }
2336 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2337 ddi_dma_free_handle(&dma_p->dma_handle);
2338 dma_p->dma_handle = NULL;
2339 return (HXGE_ERROR);
2340 }
2341
2342 dma_p->kaddrp = kaddrp;
2343 #if defined(__i386)
2344 dma_p->ioaddr_pp =
2345 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2346 #else
2347 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2348 #endif
2349
2350 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2351
2352 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2353 "dma buffer allocated: dma_p $%p "
2354 "return dmac_ladress from cookie $%p dmac_size %d "
2355 "dma_p->ioaddr_p $%p "
2356 "dma_p->orig_ioaddr_p $%p "
2357 "orig_vatopa $%p "
2358 "alength %d (0x%x) "
2359 "kaddrp $%p "
2360 "length %d (0x%x)",
2361 dma_p,
2362 dma_p->dma_cookie.dmac_laddress,
2363 dma_p->dma_cookie.dmac_size,
2364 dma_p->ioaddr_pp,
2365 dma_p->orig_ioaddr_pp,
2366 dma_p->orig_vatopa,
2367 dma_p->alength, dma_p->alength,
2368 kaddrp,
2369 length, length));
2370
2371 return (HXGE_OK);
2372 }
2373
2374 static void
hxge_dma_mem_free(p_hxge_dma_common_t dma_p)2375 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2376 {
2377 if (dma_p == NULL)
2378 return;
2379
2380 if (dma_p->dma_handle != NULL) {
2381 if (dma_p->ncookies) {
2382 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2383 dma_p->ncookies = 0;
2384 }
2385 ddi_dma_free_handle(&dma_p->dma_handle);
2386 dma_p->dma_handle = NULL;
2387 }
2388
2389 if (dma_p->acc_handle != NULL) {
2390 ddi_dma_mem_free(&dma_p->acc_handle);
2391 dma_p->acc_handle = NULL;
2392 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2393 }
2394
2395 dma_p->kaddrp = NULL;
2396 dma_p->alength = NULL;
2397 }
2398
2399 /*
2400 * hxge_m_start() -- start transmitting and receiving.
2401 *
2402 * This function is called by the MAC layer when the first
2403 * stream is open to prepare the hardware ready for sending
2404 * and transmitting packets.
2405 */
2406 static int
hxge_m_start(void * arg)2407 hxge_m_start(void *arg)
2408 {
2409 p_hxge_t hxgep = (p_hxge_t)arg;
2410
2411 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2412
2413 MUTEX_ENTER(hxgep->genlock);
2414
2415 if (hxge_init(hxgep) != DDI_SUCCESS) {
2416 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2417 "<== hxge_m_start: initialization failed"));
2418 MUTEX_EXIT(hxgep->genlock);
2419 return (EIO);
2420 }
2421
2422 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2423 /*
2424 * Start timer to check the system error and tx hangs
2425 */
2426 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2427 hxge_check_hw_state, HXGE_CHECK_TIMER);
2428
2429 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2430
2431 hxgep->timeout.link_status = 0;
2432 hxgep->timeout.report_link_status = B_TRUE;
2433 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2434
2435 /* Start the link status timer to check the link status */
2436 MUTEX_ENTER(&hxgep->timeout.lock);
2437 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2438 hxgep->timeout.ticks);
2439 MUTEX_EXIT(&hxgep->timeout.lock);
2440 }
2441
2442 MUTEX_EXIT(hxgep->genlock);
2443
2444 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2445
2446 return (0);
2447 }
2448
2449 /*
2450 * hxge_m_stop(): stop transmitting and receiving.
2451 */
2452 static void
hxge_m_stop(void * arg)2453 hxge_m_stop(void *arg)
2454 {
2455 p_hxge_t hxgep = (p_hxge_t)arg;
2456
2457 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2458
2459 if (hxgep->hxge_timerid) {
2460 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2461 hxgep->hxge_timerid = 0;
2462 }
2463
2464 /* Stop the link status timer before unregistering */
2465 MUTEX_ENTER(&hxgep->timeout.lock);
2466 if (hxgep->timeout.id) {
2467 (void) untimeout(hxgep->timeout.id);
2468 hxgep->timeout.id = 0;
2469 }
2470 hxge_link_update(hxgep, LINK_STATE_DOWN);
2471 MUTEX_EXIT(&hxgep->timeout.lock);
2472
2473 MUTEX_ENTER(hxgep->genlock);
2474
2475 hxge_uninit(hxgep);
2476
2477 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2478
2479 MUTEX_EXIT(hxgep->genlock);
2480
2481 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2482 }
2483
2484 static int
hxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)2485 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2486 {
2487 p_hxge_t hxgep = (p_hxge_t)arg;
2488 struct ether_addr addrp;
2489
2490 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2491
2492 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2493
2494 if (add) {
2495 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2496 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2497 "<== hxge_m_multicst: add multicast failed"));
2498 return (EINVAL);
2499 }
2500 } else {
2501 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2502 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2503 "<== hxge_m_multicst: del multicast failed"));
2504 return (EINVAL);
2505 }
2506 }
2507
2508 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2509
2510 return (0);
2511 }
2512
2513 static int
hxge_m_promisc(void * arg,boolean_t on)2514 hxge_m_promisc(void *arg, boolean_t on)
2515 {
2516 p_hxge_t hxgep = (p_hxge_t)arg;
2517
2518 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2519
2520 if (hxge_set_promisc(hxgep, on)) {
2521 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2522 "<== hxge_m_promisc: set promisc failed"));
2523 return (EINVAL);
2524 }
2525
2526 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2527
2528 return (0);
2529 }
2530
2531 static void
hxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2532 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2533 {
2534 p_hxge_t hxgep = (p_hxge_t)arg;
2535 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2536 boolean_t need_privilege;
2537 int err;
2538 int cmd;
2539
2540 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2541
2542 iocp = (struct iocblk *)mp->b_rptr;
2543 iocp->ioc_error = 0;
2544 need_privilege = B_TRUE;
2545 cmd = iocp->ioc_cmd;
2546
2547 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2548 switch (cmd) {
2549 default:
2550 miocnak(wq, mp, 0, EINVAL);
2551 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2552 return;
2553
2554 case LB_GET_INFO_SIZE:
2555 case LB_GET_INFO:
2556 case LB_GET_MODE:
2557 need_privilege = B_FALSE;
2558 break;
2559
2560 case LB_SET_MODE:
2561 break;
2562
2563 case ND_GET:
2564 need_privilege = B_FALSE;
2565 break;
2566 case ND_SET:
2567 break;
2568
2569 case HXGE_GET_TX_RING_SZ:
2570 case HXGE_GET_TX_DESC:
2571 case HXGE_TX_SIDE_RESET:
2572 case HXGE_RX_SIDE_RESET:
2573 case HXGE_GLOBAL_RESET:
2574 case HXGE_RESET_MAC:
2575 case HXGE_PUT_TCAM:
2576 case HXGE_GET_TCAM:
2577 case HXGE_RTRACE:
2578
2579 need_privilege = B_FALSE;
2580 break;
2581 }
2582
2583 if (need_privilege) {
2584 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2585 if (err != 0) {
2586 miocnak(wq, mp, 0, err);
2587 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2588 "<== hxge_m_ioctl: no priv"));
2589 return;
2590 }
2591 }
2592
2593 switch (cmd) {
2594 case ND_GET:
2595 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2596 case ND_SET:
2597 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2598 hxge_param_ioctl(hxgep, wq, mp, iocp);
2599 break;
2600
2601 case LB_GET_MODE:
2602 case LB_SET_MODE:
2603 case LB_GET_INFO_SIZE:
2604 case LB_GET_INFO:
2605 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2606 break;
2607
2608 case HXGE_PUT_TCAM:
2609 case HXGE_GET_TCAM:
2610 case HXGE_GET_TX_RING_SZ:
2611 case HXGE_GET_TX_DESC:
2612 case HXGE_TX_SIDE_RESET:
2613 case HXGE_RX_SIDE_RESET:
2614 case HXGE_GLOBAL_RESET:
2615 case HXGE_RESET_MAC:
2616 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2617 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2618 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2619 break;
2620 }
2621
2622 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2623 }
2624
2625 /*ARGSUSED*/
2626 static int
hxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2627 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2628 {
2629 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2630 p_hxge_t hxgep;
2631 p_tx_ring_t ring;
2632
2633 ASSERT(rhp != NULL);
2634 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2635
2636 hxgep = rhp->hxgep;
2637
2638 /*
2639 * Get the ring pointer.
2640 */
2641 ring = hxgep->tx_rings->rings[rhp->index];
2642
2643 /*
2644 * Fill in the handle for the transmit.
2645 */
2646 MUTEX_ENTER(&ring->lock);
2647 rhp->started = B_TRUE;
2648 ring->ring_handle = rhp->ring_handle;
2649 MUTEX_EXIT(&ring->lock);
2650
2651 return (0);
2652 }
2653
2654 static void
hxge_tx_ring_stop(mac_ring_driver_t rdriver)2655 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2656 {
2657 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2658 p_hxge_t hxgep;
2659 p_tx_ring_t ring;
2660
2661 ASSERT(rhp != NULL);
2662 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2663
2664 hxgep = rhp->hxgep;
2665 ring = hxgep->tx_rings->rings[rhp->index];
2666
2667 MUTEX_ENTER(&ring->lock);
2668 ring->ring_handle = (mac_ring_handle_t)NULL;
2669 rhp->started = B_FALSE;
2670 MUTEX_EXIT(&ring->lock);
2671 }
2672
2673 static int
hxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2674 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2675 {
2676 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2677 p_hxge_t hxgep;
2678 p_rx_rcr_ring_t ring;
2679 int i;
2680
2681 ASSERT(rhp != NULL);
2682 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2683
2684 hxgep = rhp->hxgep;
2685
2686 /*
2687 * Get pointer to ring.
2688 */
2689 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2690
2691 MUTEX_ENTER(&ring->lock);
2692
2693 if (rhp->started) {
2694 MUTEX_EXIT(&ring->lock);
2695 return (0);
2696 }
2697
2698 /*
2699 * Set the ldvp and ldgp pointers to enable/disable
2700 * polling.
2701 */
2702 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2703 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2704 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2705 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2706 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2707 break;
2708 }
2709 }
2710
2711 rhp->started = B_TRUE;
2712 ring->rcr_mac_handle = rhp->ring_handle;
2713 ring->rcr_gen_num = mr_gen_num;
2714 MUTEX_EXIT(&ring->lock);
2715
2716 return (0);
2717 }
2718
2719 static void
hxge_rx_ring_stop(mac_ring_driver_t rdriver)2720 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2721 {
2722 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2723 p_hxge_t hxgep;
2724 p_rx_rcr_ring_t ring;
2725
2726 ASSERT(rhp != NULL);
2727 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2728
2729 hxgep = rhp->hxgep;
2730 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2731
2732 MUTEX_ENTER(&ring->lock);
2733 rhp->started = B_TRUE;
2734 ring->rcr_mac_handle = NULL;
2735 ring->ldvp = NULL;
2736 ring->ldgp = NULL;
2737 MUTEX_EXIT(&ring->lock);
2738 }
2739
2740 static int
hxge_rx_group_start(mac_group_driver_t gdriver)2741 hxge_rx_group_start(mac_group_driver_t gdriver)
2742 {
2743 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2744
2745 ASSERT(group->hxgep != NULL);
2746 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2747
2748 MUTEX_ENTER(group->hxgep->genlock);
2749 group->started = B_TRUE;
2750 MUTEX_EXIT(group->hxgep->genlock);
2751
2752 return (0);
2753 }
2754
2755 static void
hxge_rx_group_stop(mac_group_driver_t gdriver)2756 hxge_rx_group_stop(mac_group_driver_t gdriver)
2757 {
2758 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2759
2760 ASSERT(group->hxgep != NULL);
2761 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2762 ASSERT(group->started == B_TRUE);
2763
2764 MUTEX_ENTER(group->hxgep->genlock);
2765 group->started = B_FALSE;
2766 MUTEX_EXIT(group->hxgep->genlock);
2767 }
2768
2769 static int
hxge_mmac_get_slot(p_hxge_t hxgep,int * slot)2770 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2771 {
2772 int i;
2773
2774 /*
2775 * Find an open slot.
2776 */
2777 for (i = 0; i < hxgep->mmac.total; i++) {
2778 if (!hxgep->mmac.addrs[i].set) {
2779 *slot = i;
2780 return (0);
2781 }
2782 }
2783
2784 return (ENXIO);
2785 }
2786
2787 static int
hxge_mmac_set_addr(p_hxge_t hxgep,int slot,const uint8_t * addr)2788 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2789 {
2790 struct ether_addr eaddr;
2791 hxge_status_t status = HXGE_OK;
2792
2793 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2794
2795 /*
2796 * Set new interface local address and re-init device.
2797 * This is destructive to any other streams attached
2798 * to this device.
2799 */
2800 RW_ENTER_WRITER(&hxgep->filter_lock);
2801 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2802 RW_EXIT(&hxgep->filter_lock);
2803 if (status != HXGE_OK)
2804 return (status);
2805
2806 hxgep->mmac.addrs[slot].set = B_TRUE;
2807 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2808 hxgep->mmac.available--;
2809 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2810 hxgep->mmac.addrs[slot].primary = B_TRUE;
2811
2812 return (0);
2813 }
2814
2815 static int
hxge_mmac_find_addr(p_hxge_t hxgep,const uint8_t * addr,int * slot)2816 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2817 {
2818 int i, result;
2819
2820 for (i = 0; i < hxgep->mmac.total; i++) {
2821 if (hxgep->mmac.addrs[i].set) {
2822 result = memcmp(hxgep->mmac.addrs[i].addr,
2823 addr, ETHERADDRL);
2824 if (result == 0) {
2825 *slot = i;
2826 return (0);
2827 }
2828 }
2829 }
2830
2831 return (EINVAL);
2832 }
2833
2834 static int
hxge_mmac_unset_addr(p_hxge_t hxgep,int slot)2835 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2836 {
2837 hxge_status_t status;
2838 int i;
2839
2840 status = hxge_pfc_clear_mac_address(hxgep, slot);
2841 if (status != HXGE_OK)
2842 return (status);
2843
2844 for (i = 0; i < ETHERADDRL; i++)
2845 hxgep->mmac.addrs[slot].addr[i] = 0;
2846
2847 hxgep->mmac.addrs[slot].set = B_FALSE;
2848 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2849 hxgep->mmac.addrs[slot].primary = B_FALSE;
2850 hxgep->mmac.available++;
2851
2852 return (0);
2853 }
2854
2855 static int
hxge_rx_group_add_mac(void * arg,const uint8_t * mac_addr)2856 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2857 {
2858 hxge_ring_group_t *group = arg;
2859 p_hxge_t hxgep = group->hxgep;
2860 int slot = 0;
2861
2862 ASSERT(group->type == MAC_RING_TYPE_RX);
2863
2864 MUTEX_ENTER(hxgep->genlock);
2865
2866 /*
2867 * Find a slot for the address.
2868 */
2869 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2870 MUTEX_EXIT(hxgep->genlock);
2871 return (ENOSPC);
2872 }
2873
2874 /*
2875 * Program the MAC address.
2876 */
2877 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2878 MUTEX_EXIT(hxgep->genlock);
2879 return (ENOSPC);
2880 }
2881
2882 MUTEX_EXIT(hxgep->genlock);
2883 return (0);
2884 }
2885
2886 static int
hxge_rx_group_rem_mac(void * arg,const uint8_t * mac_addr)2887 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2888 {
2889 hxge_ring_group_t *group = arg;
2890 p_hxge_t hxgep = group->hxgep;
2891 int rv, slot;
2892
2893 ASSERT(group->type == MAC_RING_TYPE_RX);
2894
2895 MUTEX_ENTER(hxgep->genlock);
2896
2897 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2898 MUTEX_EXIT(hxgep->genlock);
2899 return (rv);
2900 }
2901
2902 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2903 MUTEX_EXIT(hxgep->genlock);
2904 return (rv);
2905 }
2906
2907 MUTEX_EXIT(hxgep->genlock);
2908 return (0);
2909 }
2910
2911 static void
hxge_group_get(void * arg,mac_ring_type_t type,int groupid,mac_group_info_t * infop,mac_group_handle_t gh)2912 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2913 mac_group_info_t *infop, mac_group_handle_t gh)
2914 {
2915 p_hxge_t hxgep = arg;
2916 hxge_ring_group_t *group;
2917
2918 ASSERT(type == MAC_RING_TYPE_RX);
2919
2920 switch (type) {
2921 case MAC_RING_TYPE_RX:
2922 group = &hxgep->rx_groups[groupid];
2923 group->hxgep = hxgep;
2924 group->ghandle = gh;
2925 group->index = groupid;
2926 group->type = type;
2927
2928 infop->mgi_driver = (mac_group_driver_t)group;
2929 infop->mgi_start = hxge_rx_group_start;
2930 infop->mgi_stop = hxge_rx_group_stop;
2931 infop->mgi_addmac = hxge_rx_group_add_mac;
2932 infop->mgi_remmac = hxge_rx_group_rem_mac;
2933 infop->mgi_count = HXGE_MAX_RDCS;
2934 break;
2935
2936 case MAC_RING_TYPE_TX:
2937 default:
2938 break;
2939 }
2940 }
2941
2942 static int
hxge_ring_get_htable_idx(p_hxge_t hxgep,mac_ring_type_t type,uint32_t channel)2943 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2944 {
2945 int i;
2946
2947 ASSERT(hxgep->ldgvp != NULL);
2948
2949 switch (type) {
2950 case MAC_RING_TYPE_RX:
2951 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2952 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2953 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2954 return ((int)
2955 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2956 }
2957 }
2958 break;
2959
2960 case MAC_RING_TYPE_TX:
2961 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2962 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2963 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2964 return ((int)
2965 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2966 }
2967 }
2968 break;
2969
2970 default:
2971 break;
2972 }
2973
2974 return (-1);
2975 }
2976
2977 /*
2978 * Callback function for the GLDv3 layer to register all rings.
2979 */
2980 /*ARGSUSED*/
2981 static void
hxge_fill_ring(void * arg,mac_ring_type_t type,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)2982 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2983 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2984 {
2985 p_hxge_t hxgep = arg;
2986
2987 ASSERT(hxgep != NULL);
2988 ASSERT(infop != NULL);
2989
2990 switch (type) {
2991 case MAC_RING_TYPE_TX: {
2992 p_hxge_ring_handle_t rhp;
2993 mac_intr_t *mintr = &infop->mri_intr;
2994 p_hxge_intr_t intrp;
2995 int htable_idx;
2996
2997 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2998 rhp = &hxgep->tx_ring_handles[index];
2999 rhp->hxgep = hxgep;
3000 rhp->index = index;
3001 rhp->ring_handle = rh;
3002 infop->mri_driver = (mac_ring_driver_t)rhp;
3003 infop->mri_start = hxge_tx_ring_start;
3004 infop->mri_stop = hxge_tx_ring_stop;
3005 infop->mri_tx = hxge_tx_ring_send;
3006 infop->mri_stat = hxge_tx_ring_stat;
3007
3008 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3009 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3010 if (htable_idx >= 0)
3011 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3012 else
3013 mintr->mi_ddi_handle = NULL;
3014 break;
3015 }
3016
3017 case MAC_RING_TYPE_RX: {
3018 p_hxge_ring_handle_t rhp;
3019 mac_intr_t hxge_mac_intr;
3020 p_hxge_intr_t intrp;
3021 int htable_idx;
3022
3023 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3024 rhp = &hxgep->rx_ring_handles[index];
3025 rhp->hxgep = hxgep;
3026 rhp->index = index;
3027 rhp->ring_handle = rh;
3028
3029 /*
3030 * Entrypoint to enable interrupt (disable poll) and
3031 * disable interrupt (enable poll).
3032 */
3033 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3034 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3035 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3036
3037 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3038 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3039 if (htable_idx >= 0)
3040 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3041 else
3042 hxge_mac_intr.mi_ddi_handle = NULL;
3043
3044 infop->mri_driver = (mac_ring_driver_t)rhp;
3045 infop->mri_start = hxge_rx_ring_start;
3046 infop->mri_stop = hxge_rx_ring_stop;
3047 infop->mri_intr = hxge_mac_intr;
3048 infop->mri_poll = hxge_rx_poll;
3049 infop->mri_stat = hxge_rx_ring_stat;
3050 break;
3051 }
3052
3053 default:
3054 break;
3055 }
3056 }
3057
3058 /*ARGSUSED*/
3059 boolean_t
hxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)3060 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3061 {
3062 p_hxge_t hxgep = arg;
3063
3064 switch (cap) {
3065 case MAC_CAPAB_HCKSUM: {
3066 uint32_t *txflags = cap_data;
3067
3068 *txflags = HCKSUM_INET_PARTIAL;
3069 break;
3070 }
3071
3072 case MAC_CAPAB_RINGS: {
3073 mac_capab_rings_t *cap_rings = cap_data;
3074
3075 MUTEX_ENTER(hxgep->genlock);
3076 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3077 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3078 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3079 cap_rings->mr_rget = hxge_fill_ring;
3080 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3081 cap_rings->mr_gget = hxge_group_get;
3082 cap_rings->mr_gaddring = NULL;
3083 cap_rings->mr_gremring = NULL;
3084 } else {
3085 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3086 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3087 cap_rings->mr_rget = hxge_fill_ring;
3088 cap_rings->mr_gnum = 0;
3089 cap_rings->mr_gget = NULL;
3090 cap_rings->mr_gaddring = NULL;
3091 cap_rings->mr_gremring = NULL;
3092 }
3093 MUTEX_EXIT(hxgep->genlock);
3094 break;
3095 }
3096
3097 default:
3098 return (B_FALSE);
3099 }
3100 return (B_TRUE);
3101 }
3102
3103 static boolean_t
hxge_param_locked(mac_prop_id_t pr_num)3104 hxge_param_locked(mac_prop_id_t pr_num)
3105 {
3106 /*
3107 * All adv_* parameters are locked (read-only) while
3108 * the device is in any sort of loopback mode ...
3109 */
3110 switch (pr_num) {
3111 case MAC_PROP_ADV_1000FDX_CAP:
3112 case MAC_PROP_EN_1000FDX_CAP:
3113 case MAC_PROP_ADV_1000HDX_CAP:
3114 case MAC_PROP_EN_1000HDX_CAP:
3115 case MAC_PROP_ADV_100FDX_CAP:
3116 case MAC_PROP_EN_100FDX_CAP:
3117 case MAC_PROP_ADV_100HDX_CAP:
3118 case MAC_PROP_EN_100HDX_CAP:
3119 case MAC_PROP_ADV_10FDX_CAP:
3120 case MAC_PROP_EN_10FDX_CAP:
3121 case MAC_PROP_ADV_10HDX_CAP:
3122 case MAC_PROP_EN_10HDX_CAP:
3123 case MAC_PROP_AUTONEG:
3124 case MAC_PROP_FLOWCTRL:
3125 return (B_TRUE);
3126 }
3127 return (B_FALSE);
3128 }
3129
3130 /*
3131 * callback functions for set/get of properties
3132 */
3133 static int
hxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3134 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3135 uint_t pr_valsize, const void *pr_val)
3136 {
3137 hxge_t *hxgep = barg;
3138 p_hxge_stats_t statsp;
3139 int err = 0;
3140 uint32_t new_mtu, old_framesize, new_framesize;
3141
3142 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3143
3144 statsp = hxgep->statsp;
3145 MUTEX_ENTER(hxgep->genlock);
3146 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3147 hxge_param_locked(pr_num)) {
3148 /*
3149 * All adv_* parameters are locked (read-only)
3150 * while the device is in any sort of loopback mode.
3151 */
3152 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3153 "==> hxge_m_setprop: loopback mode: read only"));
3154 MUTEX_EXIT(hxgep->genlock);
3155 return (EBUSY);
3156 }
3157
3158 switch (pr_num) {
3159 /*
3160 * These properties are either not exist or read only
3161 */
3162 case MAC_PROP_EN_1000FDX_CAP:
3163 case MAC_PROP_EN_100FDX_CAP:
3164 case MAC_PROP_EN_10FDX_CAP:
3165 case MAC_PROP_EN_1000HDX_CAP:
3166 case MAC_PROP_EN_100HDX_CAP:
3167 case MAC_PROP_EN_10HDX_CAP:
3168 case MAC_PROP_ADV_1000FDX_CAP:
3169 case MAC_PROP_ADV_1000HDX_CAP:
3170 case MAC_PROP_ADV_100FDX_CAP:
3171 case MAC_PROP_ADV_100HDX_CAP:
3172 case MAC_PROP_ADV_10FDX_CAP:
3173 case MAC_PROP_ADV_10HDX_CAP:
3174 case MAC_PROP_STATUS:
3175 case MAC_PROP_SPEED:
3176 case MAC_PROP_DUPLEX:
3177 case MAC_PROP_AUTONEG:
3178 /*
3179 * Flow control is handled in the shared domain and
3180 * it is readonly here.
3181 */
3182 case MAC_PROP_FLOWCTRL:
3183 err = EINVAL;
3184 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3185 "==> hxge_m_setprop: read only property %d",
3186 pr_num));
3187 break;
3188
3189 case MAC_PROP_MTU:
3190 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3191 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3192 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3193
3194 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3195 if (new_framesize == hxgep->vmac.maxframesize) {
3196 err = 0;
3197 break;
3198 }
3199
3200 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3201 err = EBUSY;
3202 break;
3203 }
3204
3205 if (new_framesize < MIN_FRAME_SIZE ||
3206 new_framesize > MAX_FRAME_SIZE) {
3207 err = EINVAL;
3208 break;
3209 }
3210
3211 old_framesize = hxgep->vmac.maxframesize;
3212 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3213
3214 if (hxge_vmac_set_framesize(hxgep)) {
3215 hxgep->vmac.maxframesize =
3216 (uint16_t)old_framesize;
3217 err = EINVAL;
3218 break;
3219 }
3220
3221 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3222 if (err) {
3223 hxgep->vmac.maxframesize =
3224 (uint16_t)old_framesize;
3225 (void) hxge_vmac_set_framesize(hxgep);
3226 }
3227
3228 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3229 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3230 new_mtu, hxgep->vmac.maxframesize));
3231 break;
3232
3233 case MAC_PROP_PRIVATE:
3234 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3235 "==> hxge_m_setprop: private property"));
3236 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3237 pr_val);
3238 break;
3239
3240 default:
3241 err = ENOTSUP;
3242 break;
3243 }
3244
3245 MUTEX_EXIT(hxgep->genlock);
3246
3247 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3248 "<== hxge_m_setprop (return %d)", err));
3249
3250 return (err);
3251 }
3252
3253 static int
hxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3254 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3255 uint_t pr_valsize, void *pr_val)
3256 {
3257 hxge_t *hxgep = barg;
3258 p_hxge_stats_t statsp = hxgep->statsp;
3259 int err = 0;
3260 link_flowctrl_t fl;
3261 uint64_t tmp = 0;
3262 link_state_t ls;
3263
3264 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3265 "==> hxge_m_getprop: pr_num %d", pr_num));
3266
3267 switch (pr_num) {
3268 case MAC_PROP_DUPLEX:
3269 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3270 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3271 "==> hxge_m_getprop: duplex mode %d",
3272 *(uint8_t *)pr_val));
3273 break;
3274
3275 case MAC_PROP_SPEED:
3276 ASSERT(pr_valsize >= sizeof (uint64_t));
3277 tmp = statsp->mac_stats.link_speed * 1000000ull;
3278 bcopy(&tmp, pr_val, sizeof (tmp));
3279 break;
3280
3281 case MAC_PROP_STATUS:
3282 ASSERT(pr_valsize >= sizeof (link_state_t));
3283 if (!statsp->mac_stats.link_up)
3284 ls = LINK_STATE_DOWN;
3285 else
3286 ls = LINK_STATE_UP;
3287 bcopy(&ls, pr_val, sizeof (ls));
3288 break;
3289
3290 case MAC_PROP_FLOWCTRL:
3291 /*
3292 * Flow control is supported by the shared domain and
3293 * it is currently transmit only
3294 */
3295 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3296 fl = LINK_FLOWCTRL_TX;
3297 bcopy(&fl, pr_val, sizeof (fl));
3298 break;
3299 case MAC_PROP_AUTONEG:
3300 /* 10G link only and it is not negotiable */
3301 *(uint8_t *)pr_val = 0;
3302 break;
3303 case MAC_PROP_ADV_1000FDX_CAP:
3304 case MAC_PROP_ADV_100FDX_CAP:
3305 case MAC_PROP_ADV_10FDX_CAP:
3306 case MAC_PROP_ADV_1000HDX_CAP:
3307 case MAC_PROP_ADV_100HDX_CAP:
3308 case MAC_PROP_ADV_10HDX_CAP:
3309 case MAC_PROP_EN_1000FDX_CAP:
3310 case MAC_PROP_EN_100FDX_CAP:
3311 case MAC_PROP_EN_10FDX_CAP:
3312 case MAC_PROP_EN_1000HDX_CAP:
3313 case MAC_PROP_EN_100HDX_CAP:
3314 case MAC_PROP_EN_10HDX_CAP:
3315 err = ENOTSUP;
3316 break;
3317
3318 case MAC_PROP_PRIVATE:
3319 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3320 pr_val);
3321 break;
3322
3323 default:
3324 err = EINVAL;
3325 break;
3326 }
3327
3328 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3329
3330 return (err);
3331 }
3332
3333 static void
hxge_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3334 hxge_m_propinfo(void *arg, const char *pr_name,
3335 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3336 {
3337 _NOTE(ARGUNUSED(arg));
3338 switch (pr_num) {
3339 case MAC_PROP_DUPLEX:
3340 case MAC_PROP_SPEED:
3341 case MAC_PROP_STATUS:
3342 case MAC_PROP_AUTONEG:
3343 case MAC_PROP_FLOWCTRL:
3344 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3345 break;
3346
3347 case MAC_PROP_MTU:
3348 mac_prop_info_set_range_uint32(prh,
3349 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3350 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3351 break;
3352
3353 case MAC_PROP_PRIVATE: {
3354 char valstr[MAXNAMELEN];
3355
3356 bzero(valstr, sizeof (valstr));
3357
3358 /* Receive Interrupt Blanking Parameters */
3359 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3360 (void) snprintf(valstr, sizeof (valstr), "%d",
3361 RXDMA_RCR_TO_DEFAULT);
3362 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3363 (void) snprintf(valstr, sizeof (valstr), "%d",
3364 RXDMA_RCR_PTHRES_DEFAULT);
3365
3366 /* Classification and Load Distribution Configuration */
3367 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3368 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3369 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3370 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3371 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3372 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3373 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3374 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3375 (void) snprintf(valstr, sizeof (valstr), "%d",
3376 HXGE_CLASS_TCAM_LOOKUP);
3377 }
3378
3379 if (strlen(valstr) > 0)
3380 mac_prop_info_set_default_str(prh, valstr);
3381 break;
3382 }
3383 }
3384 }
3385
3386
3387 /* ARGSUSED */
3388 static int
hxge_set_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)3389 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3390 const void *pr_val)
3391 {
3392 p_hxge_param_t param_arr = hxgep->param_arr;
3393 int err = 0;
3394
3395 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3396 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3397
3398 if (pr_val == NULL) {
3399 return (EINVAL);
3400 }
3401
3402 /* Blanking */
3403 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3404 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3405 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3406 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3407 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3408 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3409
3410 /* Classification */
3411 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3412 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3413 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3414 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3415 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3416 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3417 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3418 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3419 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3420 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3421 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3422 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3423 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3424 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3425 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3426 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3427 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3428 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3429 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3430 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3431 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3432 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3433 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3434 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3435 } else {
3436 err = EINVAL;
3437 }
3438
3439 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3440 "<== hxge_set_priv_prop: err %d", err));
3441
3442 return (err);
3443 }
3444
3445 static int
hxge_get_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)3446 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3447 void *pr_val)
3448 {
3449 p_hxge_param_t param_arr = hxgep->param_arr;
3450 char valstr[MAXNAMELEN];
3451 int err = 0;
3452 uint_t strsize;
3453 int value = 0;
3454
3455 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3456 "==> hxge_get_priv_prop: property %s", pr_name));
3457
3458 /* Receive Interrupt Blanking Parameters */
3459 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3460 value = hxgep->intr_timeout;
3461 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3462 value = hxgep->intr_threshold;
3463
3464 /* Classification and Load Distribution Configuration */
3465 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3466 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3467 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3468
3469 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3470 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3471 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3472 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3473
3474 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3475 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3476 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3477 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3478
3479 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3480 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3481 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3482 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3483
3484 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3485 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3486 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3487 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3488
3489 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3490 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3491 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3492 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3493
3494 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3495 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3496 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3497 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3498
3499 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3500 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3501 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3502 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3503
3504 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3505 } else {
3506 err = EINVAL;
3507 }
3508
3509 if (err == 0) {
3510 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3511
3512 strsize = (uint_t)strlen(valstr);
3513 if (pr_valsize < strsize) {
3514 err = ENOBUFS;
3515 } else {
3516 (void) strlcpy(pr_val, valstr, pr_valsize);
3517 }
3518 }
3519
3520 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3521 "<== hxge_get_priv_prop: return %d", err));
3522
3523 return (err);
3524 }
3525 /*
3526 * Module loading and removing entry points.
3527 */
3528 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3529 nodev, NULL, D_MP, NULL, NULL);
3530
3531 extern struct mod_ops mod_driverops;
3532
3533 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3534
3535 /*
3536 * Module linkage information for the kernel.
3537 */
3538 static struct modldrv hxge_modldrv = {
3539 &mod_driverops,
3540 HXGE_DESC_VER,
3541 &hxge_dev_ops
3542 };
3543
3544 static struct modlinkage modlinkage = {
3545 MODREV_1, (void *) &hxge_modldrv, NULL
3546 };
3547
3548 int
_init(void)3549 _init(void)
3550 {
3551 int status;
3552
3553 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3554 mac_init_ops(&hxge_dev_ops, "hxge");
3555 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3556 if (status != 0) {
3557 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3558 "failed to init device soft state"));
3559 mac_fini_ops(&hxge_dev_ops);
3560 goto _init_exit;
3561 }
3562
3563 status = mod_install(&modlinkage);
3564 if (status != 0) {
3565 ddi_soft_state_fini(&hxge_list);
3566 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3567 goto _init_exit;
3568 }
3569
3570 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3571
3572 _init_exit:
3573 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3574
3575 return (status);
3576 }
3577
3578 int
_fini(void)3579 _fini(void)
3580 {
3581 int status;
3582
3583 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3584
3585 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3586
3587 if (hxge_mblks_pending)
3588 return (EBUSY);
3589
3590 status = mod_remove(&modlinkage);
3591 if (status != DDI_SUCCESS) {
3592 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3593 "Module removal failed 0x%08x", status));
3594 goto _fini_exit;
3595 }
3596
3597 mac_fini_ops(&hxge_dev_ops);
3598
3599 ddi_soft_state_fini(&hxge_list);
3600
3601 MUTEX_DESTROY(&hxge_common_lock);
3602
3603 _fini_exit:
3604 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3605
3606 return (status);
3607 }
3608
3609 int
_info(struct modinfo * modinfop)3610 _info(struct modinfo *modinfop)
3611 {
3612 int status;
3613
3614 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3615 status = mod_info(&modlinkage, modinfop);
3616 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3617
3618 return (status);
3619 }
3620
3621 /*ARGSUSED*/
3622 static hxge_status_t
hxge_add_intrs(p_hxge_t hxgep)3623 hxge_add_intrs(p_hxge_t hxgep)
3624 {
3625 int intr_types;
3626 int type = 0;
3627 int ddi_status = DDI_SUCCESS;
3628 hxge_status_t status = HXGE_OK;
3629
3630 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3631
3632 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3633 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3634 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3635 hxgep->hxge_intr_type.intr_added = 0;
3636 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3637 hxgep->hxge_intr_type.intr_type = 0;
3638
3639 if (hxge_msi_enable) {
3640 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3641 }
3642
3643 /* Get the supported interrupt types */
3644 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3645 != DDI_SUCCESS) {
3646 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3647 "ddi_intr_get_supported_types failed: status 0x%08x",
3648 ddi_status));
3649 return (HXGE_ERROR | HXGE_DDI_FAILED);
3650 }
3651
3652 hxgep->hxge_intr_type.intr_types = intr_types;
3653
3654 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3655 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3656
3657 /*
3658 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3659 * (1): 1 - MSI
3660 * (2): 2 - MSI-X
3661 * others - FIXED
3662 */
3663 switch (hxge_msi_enable) {
3664 default:
3665 type = DDI_INTR_TYPE_FIXED;
3666 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3667 "use fixed (intx emulation) type %08x", type));
3668 break;
3669
3670 case 2:
3671 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3672 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3673 if (intr_types & DDI_INTR_TYPE_MSIX) {
3674 type = DDI_INTR_TYPE_MSIX;
3675 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3676 "==> hxge_add_intrs: "
3677 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3678 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3679 type = DDI_INTR_TYPE_MSI;
3680 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3681 "==> hxge_add_intrs: "
3682 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3683 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3684 type = DDI_INTR_TYPE_FIXED;
3685 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3686 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3687 }
3688 break;
3689
3690 case 1:
3691 if (intr_types & DDI_INTR_TYPE_MSI) {
3692 type = DDI_INTR_TYPE_MSI;
3693 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3694 "==> hxge_add_intrs: "
3695 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3696 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3697 type = DDI_INTR_TYPE_MSIX;
3698 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3699 "==> hxge_add_intrs: "
3700 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3701 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3702 type = DDI_INTR_TYPE_FIXED;
3703 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3704 "==> hxge_add_intrs: "
3705 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3706 }
3707 }
3708
3709 hxgep->hxge_intr_type.intr_type = type;
3710 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3711 type == DDI_INTR_TYPE_FIXED) &&
3712 hxgep->hxge_intr_type.niu_msi_enable) {
3713 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3714 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3715 " hxge_add_intrs: "
3716 " hxge_add_intrs_adv failed: status 0x%08x",
3717 status));
3718 return (status);
3719 } else {
3720 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3721 "interrupts registered : type %d", type));
3722 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3723
3724 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3725 "\nAdded advanced hxge add_intr_adv "
3726 "intr type 0x%x\n", type));
3727
3728 return (status);
3729 }
3730 }
3731
3732 if (!hxgep->hxge_intr_type.intr_registered) {
3733 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3734 "==> hxge_add_intrs: failed to register interrupts"));
3735 return (HXGE_ERROR | HXGE_DDI_FAILED);
3736 }
3737
3738 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3739
3740 return (status);
3741 }
3742
3743 /*ARGSUSED*/
3744 static hxge_status_t
hxge_add_intrs_adv(p_hxge_t hxgep)3745 hxge_add_intrs_adv(p_hxge_t hxgep)
3746 {
3747 int intr_type;
3748 p_hxge_intr_t intrp;
3749 hxge_status_t status;
3750
3751 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3752
3753 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3754 intr_type = intrp->intr_type;
3755
3756 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3757 intr_type));
3758
3759 switch (intr_type) {
3760 case DDI_INTR_TYPE_MSI: /* 0x2 */
3761 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3762 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3763 break;
3764
3765 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3766 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3767 break;
3768
3769 default:
3770 status = HXGE_ERROR;
3771 break;
3772 }
3773
3774 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3775
3776 return (status);
3777 }
3778
3779 /*ARGSUSED*/
3780 static hxge_status_t
hxge_add_intrs_adv_type(p_hxge_t hxgep,uint32_t int_type)3781 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3782 {
3783 dev_info_t *dip = hxgep->dip;
3784 p_hxge_ldg_t ldgp;
3785 p_hxge_intr_t intrp;
3786 uint_t *inthandler;
3787 void *arg1, *arg2;
3788 int behavior;
3789 int nintrs, navail;
3790 int nactual, nrequired, nrequest;
3791 int inum = 0;
3792 int loop = 0;
3793 int x, y;
3794 int ddi_status = DDI_SUCCESS;
3795 hxge_status_t status = HXGE_OK;
3796
3797 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3798
3799 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3800
3801 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3802 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3803 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3804 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3805 "nintrs: %d", ddi_status, nintrs));
3806 return (HXGE_ERROR | HXGE_DDI_FAILED);
3807 }
3808
3809 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3810 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3811 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3812 "ddi_intr_get_navail() failed, status: 0x%x%, "
3813 "nintrs: %d", ddi_status, navail));
3814 return (HXGE_ERROR | HXGE_DDI_FAILED);
3815 }
3816
3817 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3818 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3819 int_type, nintrs, navail));
3820
3821 /* PSARC/2007/453 MSI-X interrupt limit override */
3822 if (int_type == DDI_INTR_TYPE_MSIX) {
3823 nrequest = hxge_create_msi_property(hxgep);
3824 if (nrequest < navail) {
3825 navail = nrequest;
3826 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3827 "hxge_add_intrs_adv_type: nintrs %d "
3828 "navail %d (nrequest %d)",
3829 nintrs, navail, nrequest));
3830 }
3831 }
3832
3833 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3834 /* MSI must be power of 2 */
3835 if ((navail & 16) == 16) {
3836 navail = 16;
3837 } else if ((navail & 8) == 8) {
3838 navail = 8;
3839 } else if ((navail & 4) == 4) {
3840 navail = 4;
3841 } else if ((navail & 2) == 2) {
3842 navail = 2;
3843 } else {
3844 navail = 1;
3845 }
3846 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3847 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3848 "navail %d", nintrs, navail));
3849 }
3850
3851 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3852 "requesting: intr type %d nintrs %d, navail %d",
3853 int_type, nintrs, navail));
3854
3855 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3856 DDI_INTR_ALLOC_NORMAL);
3857 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3858 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3859
3860 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3861 navail, &nactual, behavior);
3862 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3863 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3864 " ddi_intr_alloc() failed: %d", ddi_status));
3865 kmem_free(intrp->htable, intrp->intr_size);
3866 return (HXGE_ERROR | HXGE_DDI_FAILED);
3867 }
3868
3869 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3870 "ddi_intr_alloc() returned: navail %d nactual %d",
3871 navail, nactual));
3872
3873 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3874 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3875 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3876 " ddi_intr_get_pri() failed: %d", ddi_status));
3877 /* Free already allocated interrupts */
3878 for (y = 0; y < nactual; y++) {
3879 (void) ddi_intr_free(intrp->htable[y]);
3880 }
3881
3882 kmem_free(intrp->htable, intrp->intr_size);
3883 return (HXGE_ERROR | HXGE_DDI_FAILED);
3884 }
3885
3886 nrequired = 0;
3887 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3888 if (status != HXGE_OK) {
3889 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3890 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3891 "failed: 0x%x", status));
3892 /* Free already allocated interrupts */
3893 for (y = 0; y < nactual; y++) {
3894 (void) ddi_intr_free(intrp->htable[y]);
3895 }
3896
3897 kmem_free(intrp->htable, intrp->intr_size);
3898 return (status);
3899 }
3900
3901 ldgp = hxgep->ldgvp->ldgp;
3902 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3903 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3904
3905 if (nactual < nrequired)
3906 loop = nactual;
3907 else
3908 loop = nrequired;
3909
3910 for (x = 0; x < loop; x++, ldgp++) {
3911 ldgp->vector = (uint8_t)x;
3912 arg1 = ldgp->ldvp;
3913 arg2 = hxgep;
3914 if (ldgp->nldvs == 1) {
3915 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3916 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3917 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3918 "1-1 int handler (entry %d)\n",
3919 arg1, arg2, x));
3920 } else if (ldgp->nldvs > 1) {
3921 inthandler = (uint_t *)ldgp->sys_intr_handler;
3922 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3923 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3924 "nldevs %d int handler (entry %d)\n",
3925 arg1, arg2, ldgp->nldvs, x));
3926 }
3927 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3928 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3929 "htable 0x%llx", x, intrp->htable[x]));
3930
3931 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3932 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3933 DDI_SUCCESS) {
3934 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3935 "==> hxge_add_intrs_adv_type: failed #%d "
3936 "status 0x%x", x, ddi_status));
3937 for (y = 0; y < intrp->intr_added; y++) {
3938 (void) ddi_intr_remove_handler(
3939 intrp->htable[y]);
3940 }
3941
3942 /* Free already allocated intr */
3943 for (y = 0; y < nactual; y++) {
3944 (void) ddi_intr_free(intrp->htable[y]);
3945 }
3946 kmem_free(intrp->htable, intrp->intr_size);
3947
3948 (void) hxge_ldgv_uninit(hxgep);
3949
3950 return (HXGE_ERROR | HXGE_DDI_FAILED);
3951 }
3952
3953 ldgp->htable_idx = x;
3954 intrp->intr_added++;
3955 }
3956 intrp->msi_intx_cnt = nactual;
3957
3958 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3959 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3960 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3961
3962 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3963 (void) hxge_intr_ldgv_init(hxgep);
3964
3965 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3966
3967 return (status);
3968 }
3969
3970 /*ARGSUSED*/
3971 static hxge_status_t
hxge_add_intrs_adv_type_fix(p_hxge_t hxgep,uint32_t int_type)3972 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3973 {
3974 dev_info_t *dip = hxgep->dip;
3975 p_hxge_ldg_t ldgp;
3976 p_hxge_intr_t intrp;
3977 uint_t *inthandler;
3978 void *arg1, *arg2;
3979 int behavior;
3980 int nintrs, navail;
3981 int nactual, nrequired;
3982 int inum = 0;
3983 int x, y;
3984 int ddi_status = DDI_SUCCESS;
3985 hxge_status_t status = HXGE_OK;
3986
3987 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3988 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3989
3990 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3991 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3992 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3993 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3994 "nintrs: %d", status, nintrs));
3995 return (HXGE_ERROR | HXGE_DDI_FAILED);
3996 }
3997
3998 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3999 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
4000 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4001 "ddi_intr_get_navail() failed, status: 0x%x%, "
4002 "nintrs: %d", ddi_status, navail));
4003 return (HXGE_ERROR | HXGE_DDI_FAILED);
4004 }
4005
4006 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4007 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4008 nintrs, navail));
4009
4010 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4011 DDI_INTR_ALLOC_NORMAL);
4012 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4013 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4014 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4015 navail, &nactual, behavior);
4016 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4017 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4018 " ddi_intr_alloc() failed: %d", ddi_status));
4019 kmem_free(intrp->htable, intrp->intr_size);
4020 return (HXGE_ERROR | HXGE_DDI_FAILED);
4021 }
4022
4023 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4024 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4025 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4026 " ddi_intr_get_pri() failed: %d", ddi_status));
4027 /* Free already allocated interrupts */
4028 for (y = 0; y < nactual; y++) {
4029 (void) ddi_intr_free(intrp->htable[y]);
4030 }
4031
4032 kmem_free(intrp->htable, intrp->intr_size);
4033 return (HXGE_ERROR | HXGE_DDI_FAILED);
4034 }
4035
4036 nrequired = 0;
4037 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4038 if (status != HXGE_OK) {
4039 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4040 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4041 "failed: 0x%x", status));
4042 /* Free already allocated interrupts */
4043 for (y = 0; y < nactual; y++) {
4044 (void) ddi_intr_free(intrp->htable[y]);
4045 }
4046
4047 kmem_free(intrp->htable, intrp->intr_size);
4048 return (status);
4049 }
4050
4051 ldgp = hxgep->ldgvp->ldgp;
4052 for (x = 0; x < nrequired; x++, ldgp++) {
4053 ldgp->vector = (uint8_t)x;
4054 arg1 = ldgp->ldvp;
4055 arg2 = hxgep;
4056 if (ldgp->nldvs == 1) {
4057 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4058 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4059 "hxge_add_intrs_adv_type_fix: "
4060 "1-1 int handler(%d) ldg %d ldv %d "
4061 "arg1 $%p arg2 $%p\n",
4062 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4063 } else if (ldgp->nldvs > 1) {
4064 inthandler = (uint_t *)ldgp->sys_intr_handler;
4065 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4066 "hxge_add_intrs_adv_type_fix: "
4067 "shared ldv %d int handler(%d) ldv %d ldg %d"
4068 "arg1 0x%016llx arg2 0x%016llx\n",
4069 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4070 arg1, arg2));
4071 }
4072
4073 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4074 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4075 DDI_SUCCESS) {
4076 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4077 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4078 "status 0x%x", x, ddi_status));
4079 for (y = 0; y < intrp->intr_added; y++) {
4080 (void) ddi_intr_remove_handler(
4081 intrp->htable[y]);
4082 }
4083 for (y = 0; y < nactual; y++) {
4084 (void) ddi_intr_free(intrp->htable[y]);
4085 }
4086 /* Free already allocated intr */
4087 kmem_free(intrp->htable, intrp->intr_size);
4088
4089 (void) hxge_ldgv_uninit(hxgep);
4090
4091 return (HXGE_ERROR | HXGE_DDI_FAILED);
4092 }
4093 intrp->intr_added++;
4094 }
4095
4096 intrp->msi_intx_cnt = nactual;
4097
4098 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4099
4100 status = hxge_intr_ldgv_init(hxgep);
4101
4102 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4103
4104 return (status);
4105 }
4106
4107 /*ARGSUSED*/
4108 static void
hxge_remove_intrs(p_hxge_t hxgep)4109 hxge_remove_intrs(p_hxge_t hxgep)
4110 {
4111 int i, inum;
4112 p_hxge_intr_t intrp;
4113
4114 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4115 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4116 if (!intrp->intr_registered) {
4117 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4118 "<== hxge_remove_intrs: interrupts not registered"));
4119 return;
4120 }
4121
4122 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4123
4124 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4125 (void) ddi_intr_block_disable(intrp->htable,
4126 intrp->intr_added);
4127 } else {
4128 for (i = 0; i < intrp->intr_added; i++) {
4129 (void) ddi_intr_disable(intrp->htable[i]);
4130 }
4131 }
4132
4133 for (inum = 0; inum < intrp->intr_added; inum++) {
4134 if (intrp->htable[inum]) {
4135 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4136 }
4137 }
4138
4139 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4140 if (intrp->htable[inum]) {
4141 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4142 "hxge_remove_intrs: ddi_intr_free inum %d "
4143 "msi_intx_cnt %d intr_added %d",
4144 inum, intrp->msi_intx_cnt, intrp->intr_added));
4145
4146 (void) ddi_intr_free(intrp->htable[inum]);
4147 }
4148 }
4149
4150 kmem_free(intrp->htable, intrp->intr_size);
4151 intrp->intr_registered = B_FALSE;
4152 intrp->intr_enabled = B_FALSE;
4153 intrp->msi_intx_cnt = 0;
4154 intrp->intr_added = 0;
4155
4156 (void) hxge_ldgv_uninit(hxgep);
4157
4158 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4159 }
4160
4161 /*ARGSUSED*/
4162 static void
hxge_intrs_enable(p_hxge_t hxgep)4163 hxge_intrs_enable(p_hxge_t hxgep)
4164 {
4165 p_hxge_intr_t intrp;
4166 int i;
4167 int status;
4168
4169 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4170
4171 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4172
4173 if (!intrp->intr_registered) {
4174 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4175 "interrupts are not registered"));
4176 return;
4177 }
4178
4179 if (intrp->intr_enabled) {
4180 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4181 "<== hxge_intrs_enable: already enabled"));
4182 return;
4183 }
4184
4185 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4186 status = ddi_intr_block_enable(intrp->htable,
4187 intrp->intr_added);
4188 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4189 "block enable - status 0x%x total inums #%d\n",
4190 status, intrp->intr_added));
4191 } else {
4192 for (i = 0; i < intrp->intr_added; i++) {
4193 status = ddi_intr_enable(intrp->htable[i]);
4194 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4195 "ddi_intr_enable:enable - status 0x%x "
4196 "total inums %d enable inum #%d\n",
4197 status, intrp->intr_added, i));
4198 if (status == DDI_SUCCESS) {
4199 intrp->intr_enabled = B_TRUE;
4200 }
4201 }
4202 }
4203
4204 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4205 }
4206
4207 /*ARGSUSED*/
4208 static void
hxge_intrs_disable(p_hxge_t hxgep)4209 hxge_intrs_disable(p_hxge_t hxgep)
4210 {
4211 p_hxge_intr_t intrp;
4212 int i;
4213
4214 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4215
4216 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4217
4218 if (!intrp->intr_registered) {
4219 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4220 "interrupts are not registered"));
4221 return;
4222 }
4223
4224 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4225 (void) ddi_intr_block_disable(intrp->htable,
4226 intrp->intr_added);
4227 } else {
4228 for (i = 0; i < intrp->intr_added; i++) {
4229 (void) ddi_intr_disable(intrp->htable[i]);
4230 }
4231 }
4232
4233 intrp->intr_enabled = B_FALSE;
4234 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4235 }
4236
4237 static hxge_status_t
hxge_mac_register(p_hxge_t hxgep)4238 hxge_mac_register(p_hxge_t hxgep)
4239 {
4240 mac_register_t *macp;
4241 int status;
4242
4243 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4244
4245 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4246 return (HXGE_ERROR);
4247
4248 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4249 macp->m_driver = hxgep;
4250 macp->m_dip = hxgep->dip;
4251 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4252 macp->m_callbacks = &hxge_m_callbacks;
4253 macp->m_min_sdu = 0;
4254 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4255 macp->m_margin = VLAN_TAGSZ;
4256 macp->m_priv_props = hxge_priv_props;
4257 macp->m_v12n = MAC_VIRT_LEVEL1;
4258
4259 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4260 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4261 macp->m_src_addr[0],
4262 macp->m_src_addr[1],
4263 macp->m_src_addr[2],
4264 macp->m_src_addr[3],
4265 macp->m_src_addr[4],
4266 macp->m_src_addr[5]));
4267
4268 status = mac_register(macp, &hxgep->mach);
4269 mac_free(macp);
4270
4271 if (status != 0) {
4272 cmn_err(CE_WARN,
4273 "hxge_mac_register failed (status %d instance %d)",
4274 status, hxgep->instance);
4275 return (HXGE_ERROR);
4276 }
4277
4278 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4279 "(instance %d)", hxgep->instance));
4280
4281 return (HXGE_OK);
4282 }
4283
4284 static int
hxge_init_common_dev(p_hxge_t hxgep)4285 hxge_init_common_dev(p_hxge_t hxgep)
4286 {
4287 p_hxge_hw_list_t hw_p;
4288 dev_info_t *p_dip;
4289
4290 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4291
4292 p_dip = hxgep->p_dip;
4293 MUTEX_ENTER(&hxge_common_lock);
4294
4295 /*
4296 * Loop through existing per Hydra hardware list.
4297 */
4298 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4299 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4300 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4301 hw_p, p_dip));
4302 if (hw_p->parent_devp == p_dip) {
4303 hxgep->hxge_hw_p = hw_p;
4304 hw_p->ndevs++;
4305 hw_p->hxge_p = hxgep;
4306 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4307 "==> hxge_init_common_device: "
4308 "hw_p $%p parent dip $%p ndevs %d (found)",
4309 hw_p, p_dip, hw_p->ndevs));
4310 break;
4311 }
4312 }
4313
4314 if (hw_p == NULL) {
4315 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4316 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4317 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4318 hw_p->parent_devp = p_dip;
4319 hw_p->magic = HXGE_MAGIC;
4320 hxgep->hxge_hw_p = hw_p;
4321 hw_p->ndevs++;
4322 hw_p->hxge_p = hxgep;
4323 hw_p->next = hxge_hw_list;
4324
4325 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4326 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4327 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4328
4329 hxge_hw_list = hw_p;
4330 }
4331 MUTEX_EXIT(&hxge_common_lock);
4332 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4333 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4334 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4335
4336 return (HXGE_OK);
4337 }
4338
4339 static void
hxge_uninit_common_dev(p_hxge_t hxgep)4340 hxge_uninit_common_dev(p_hxge_t hxgep)
4341 {
4342 p_hxge_hw_list_t hw_p, h_hw_p;
4343 dev_info_t *p_dip;
4344
4345 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4346 if (hxgep->hxge_hw_p == NULL) {
4347 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4348 "<== hxge_uninit_common_dev (no common)"));
4349 return;
4350 }
4351
4352 MUTEX_ENTER(&hxge_common_lock);
4353 h_hw_p = hxge_hw_list;
4354 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4355 p_dip = hw_p->parent_devp;
4356 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4357 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4358 hw_p->magic == HXGE_MAGIC) {
4359 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4360 "==> hxge_uninit_common_dev: "
4361 "hw_p $%p parent dip $%p ndevs %d (found)",
4362 hw_p, p_dip, hw_p->ndevs));
4363
4364 hxgep->hxge_hw_p = NULL;
4365 if (hw_p->ndevs) {
4366 hw_p->ndevs--;
4367 }
4368 hw_p->hxge_p = NULL;
4369 if (!hw_p->ndevs) {
4370 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4371 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4372 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4373 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4374 "==> hxge_uninit_common_dev: "
4375 "hw_p $%p parent dip $%p ndevs %d (last)",
4376 hw_p, p_dip, hw_p->ndevs));
4377
4378 if (hw_p == hxge_hw_list) {
4379 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4380 "==> hxge_uninit_common_dev:"
4381 "remove head "
4382 "hw_p $%p parent dip $%p "
4383 "ndevs %d (head)",
4384 hw_p, p_dip, hw_p->ndevs));
4385 hxge_hw_list = hw_p->next;
4386 } else {
4387 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4388 "==> hxge_uninit_common_dev:"
4389 "remove middle "
4390 "hw_p $%p parent dip $%p "
4391 "ndevs %d (middle)",
4392 hw_p, p_dip, hw_p->ndevs));
4393 h_hw_p->next = hw_p->next;
4394 }
4395
4396 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4397 }
4398 break;
4399 } else {
4400 h_hw_p = hw_p;
4401 }
4402 }
4403
4404 MUTEX_EXIT(&hxge_common_lock);
4405 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4406 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4407
4408 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4409 }
4410
4411 #define HXGE_MSIX_ENTRIES 32
4412 #define HXGE_MSIX_WAIT_COUNT 10
4413 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4414
4415 static void
hxge_link_poll(void * arg)4416 hxge_link_poll(void *arg)
4417 {
4418 p_hxge_t hxgep = (p_hxge_t)arg;
4419 hpi_handle_t handle;
4420 cip_link_stat_t link_stat;
4421 hxge_timeout *to = &hxgep->timeout;
4422
4423 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4424 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4425
4426 if (to->report_link_status ||
4427 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4428 to->link_status = link_stat.bits.xpcs0_link_up;
4429 to->report_link_status = B_FALSE;
4430
4431 if (link_stat.bits.xpcs0_link_up) {
4432 hxge_link_update(hxgep, LINK_STATE_UP);
4433 } else {
4434 hxge_link_update(hxgep, LINK_STATE_DOWN);
4435 }
4436 }
4437
4438 /* Restart the link status timer to check the link status */
4439 MUTEX_ENTER(&to->lock);
4440 to->id = timeout(hxge_link_poll, arg, to->ticks);
4441 MUTEX_EXIT(&to->lock);
4442 }
4443
4444 static void
hxge_link_update(p_hxge_t hxgep,link_state_t state)4445 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4446 {
4447 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4448
4449 mac_link_update(hxgep->mach, state);
4450 if (state == LINK_STATE_UP) {
4451 statsp->mac_stats.link_speed = 10000;
4452 statsp->mac_stats.link_duplex = 2;
4453 statsp->mac_stats.link_up = 1;
4454 } else {
4455 statsp->mac_stats.link_speed = 0;
4456 statsp->mac_stats.link_duplex = 0;
4457 statsp->mac_stats.link_up = 0;
4458 }
4459 }
4460
4461 static void
hxge_msix_init(p_hxge_t hxgep)4462 hxge_msix_init(p_hxge_t hxgep)
4463 {
4464 uint32_t data0;
4465 uint32_t data1;
4466 uint32_t data2;
4467 int i;
4468 uint32_t msix_entry0;
4469 uint32_t msix_entry1;
4470 uint32_t msix_entry2;
4471 uint32_t msix_entry3;
4472
4473 /* Change to use MSIx bar instead of indirect access */
4474 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4475 data0 = 0xffffffff - i;
4476 data1 = 0xffffffff - i - 1;
4477 data2 = 0xffffffff - i - 2;
4478
4479 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4480 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4481 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4482 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4483 }
4484
4485 /* Initialize ram data out buffer. */
4486 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4487 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4488 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4489 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4490 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4491 }
4492 }
4493
4494 /*
4495 * The following function is to support
4496 * PSARC/2007/453 MSI-X interrupt limit override.
4497 */
4498 static int
hxge_create_msi_property(p_hxge_t hxgep)4499 hxge_create_msi_property(p_hxge_t hxgep)
4500 {
4501 int nmsi;
4502 extern int ncpus;
4503
4504 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4505
4506 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4507 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4508 /*
4509 * The maximum MSI-X requested will be 8.
4510 * If the # of CPUs is less than 8, we will reqeust
4511 * # MSI-X based on the # of CPUs.
4512 */
4513 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4514 nmsi = HXGE_MSIX_REQUEST_10G;
4515 } else {
4516 nmsi = ncpus;
4517 }
4518
4519 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4520 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4521 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4522 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4523
4524 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4525 return (nmsi);
4526 }
4527