1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
24 */
25
26 /*
27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
28 */
29 #include <sys/nxge/nxge_impl.h>
30 #include <sys/nxge/nxge_hio.h>
31 #include <sys/nxge/nxge_rxdma.h>
32 #include <sys/pcie.h>
33
34 uint32_t nxge_use_partition = 0; /* debug partition flag */
35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */
36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */
37 /*
38 * PSARC/2007/453 MSI-X interrupt limit override
39 */
40 uint32_t nxge_msi_enable = 2;
41
42 /*
43 * Software workaround for a Neptune (PCI-E)
44 * hardware interrupt bug which the hardware
45 * may generate spurious interrupts after the
46 * device interrupt handler was removed. If this flag
47 * is enabled, the driver will reset the
48 * hardware when devices are being detached.
49 */
50 uint32_t nxge_peu_reset_enable = 0;
51
52 /*
53 * Software workaround for the hardware
54 * checksum bugs that affect packet transmission
55 * and receive:
56 *
57 * Usage of nxge_cksum_offload:
58 *
59 * (1) nxge_cksum_offload = 0 (default):
60 * - transmits packets:
61 * TCP: uses the hardware checksum feature.
62 * UDP: driver will compute the software checksum
63 * based on the partial checksum computed
64 * by the IP layer.
65 * - receives packets
66 * TCP: marks packets checksum flags based on hardware result.
67 * UDP: will not mark checksum flags.
68 *
69 * (2) nxge_cksum_offload = 1:
70 * - transmit packets:
71 * TCP/UDP: uses the hardware checksum feature.
72 * - receives packets
73 * TCP/UDP: marks packet checksum flags based on hardware result.
74 *
75 * (3) nxge_cksum_offload = 2:
76 * - The driver will not register its checksum capability.
77 * Checksum for both TCP and UDP will be computed
78 * by the stack.
79 * - The software LSO is not allowed in this case.
80 *
81 * (4) nxge_cksum_offload > 2:
82 * - Will be treated as it is set to 2
83 * (stack will compute the checksum).
84 *
85 * (5) If the hardware bug is fixed, this workaround
86 * needs to be updated accordingly to reflect
87 * the new hardware revision.
88 */
89 uint32_t nxge_cksum_offload = 0;
90
91 /*
92 * Globals: tunable parameters (/etc/system or adb)
93 *
94 */
95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
96 uint32_t nxge_rbr_spare_size = 0;
97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT;
98 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
100 boolean_t nxge_no_msg = B_TRUE; /* control message display */
101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */
102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX;
103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN;
104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN;
105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU;
106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL;
107
108 /* MAX LSO size */
109 #define NXGE_LSO_MAXLEN 65535
110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN;
111
112
113 /*
114 * Add tunable to reduce the amount of time spent in the
115 * ISR doing Rx Processing.
116 */
117 uint32_t nxge_max_rx_pkts = 1024;
118
119 /*
120 * Tunables to manage the receive buffer blocks.
121 *
122 * nxge_rx_threshold_hi: copy all buffers.
123 * nxge_rx_bcopy_size_type: receive buffer block size type.
124 * nxge_rx_threshold_lo: copy only up to tunable block size type.
125 */
126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
129
130 uint32_t nxge_use_kmem_alloc = 1;
131
132 rtrace_t npi_rtracebuf;
133
134 /*
135 * The hardware sometimes fails to allow enough time for the link partner
136 * to send an acknowledgement for packets that the hardware sent to it. The
137 * hardware resends the packets earlier than it should be in those instances.
138 * This behavior caused some switches to acknowledge the wrong packets
139 * and it triggered the fatal error.
140 * This software workaround is to set the replay timer to a value
141 * suggested by the hardware team.
142 *
143 * PCI config space replay timer register:
144 * The following replay timeout value is 0xc
145 * for bit 14:18.
146 */
147 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8
148 #define PCI_REPLAY_TIMEOUT_SHIFT 14
149
150 uint32_t nxge_set_replay_timer = 1;
151 uint32_t nxge_replay_timeout = 0xc;
152
153 /*
154 * The transmit serialization sometimes causes
155 * longer sleep before calling the driver transmit
156 * function as it sleeps longer than it should.
157 * The performace group suggests that a time wait tunable
158 * can be used to set the maximum wait time when needed
159 * and the default is set to 1 tick.
160 */
161 uint32_t nxge_tx_serial_maxsleep = 1;
162
163 #if defined(sun4v)
164 /*
165 * Hypervisor N2/NIU services information.
166 */
167 /*
168 * The following is the default API supported:
169 * major 1 and minor 1.
170 *
171 * Please update the MAX_NIU_MAJORS,
172 * MAX_NIU_MINORS, and minor number supported
173 * when the newer Hypervior API interfaces
174 * are added. Also, please update nxge_hsvc_register()
175 * if needed.
176 */
177 static hsvc_info_t niu_hsvc = {
178 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
179 NIU_MINOR_VER, "nxge"
180 };
181
182 static int nxge_hsvc_register(p_nxge_t);
183 #endif
184
185 /*
186 * Function Prototypes
187 */
188 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
189 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
190 static void nxge_unattach(p_nxge_t);
191 static int nxge_quiesce(dev_info_t *);
192
193 #if NXGE_PROPERTY
194 static void nxge_remove_hard_properties(p_nxge_t);
195 #endif
196
197 /*
198 * These two functions are required by nxge_hio.c
199 */
200 extern int nxge_m_mmac_remove(void *arg, int slot);
201 extern void nxge_grp_cleanup(p_nxge_t nxge);
202
203 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
204
205 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
206 static void nxge_destroy_mutexes(p_nxge_t);
207
208 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
209 static void nxge_unmap_regs(p_nxge_t nxgep);
210 #ifdef NXGE_DEBUG
211 static void nxge_test_map_regs(p_nxge_t nxgep);
212 #endif
213
214 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
215 static void nxge_remove_intrs(p_nxge_t nxgep);
216
217 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
218 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
219 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
220 static void nxge_intrs_enable(p_nxge_t nxgep);
221 static void nxge_intrs_disable(p_nxge_t nxgep);
222
223 static void nxge_suspend(p_nxge_t);
224 static nxge_status_t nxge_resume(p_nxge_t);
225
226 static nxge_status_t nxge_setup_dev(p_nxge_t);
227 static void nxge_destroy_dev(p_nxge_t);
228
229 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
230 static void nxge_free_mem_pool(p_nxge_t);
231
232 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
233 static void nxge_free_rx_mem_pool(p_nxge_t);
234
235 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
236 static void nxge_free_tx_mem_pool(p_nxge_t);
237
238 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
239 struct ddi_dma_attr *,
240 size_t, ddi_device_acc_attr_t *, uint_t,
241 p_nxge_dma_common_t);
242
243 static void nxge_dma_mem_free(p_nxge_dma_common_t);
244 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
245
246 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
247 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
248 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
249
250 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
251 p_nxge_dma_common_t *, size_t);
252 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
253
254 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
255 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
256 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
257
258 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
259 p_nxge_dma_common_t *,
260 size_t);
261 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
262
263 static int nxge_init_common_dev(p_nxge_t);
264 static void nxge_uninit_common_dev(p_nxge_t);
265 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
266 char *, caddr_t);
267 #if defined(sun4v)
268 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep);
269 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm);
270 #endif
271
272 /*
273 * The next declarations are for the GLDv3 interface.
274 */
275 static int nxge_m_start(void *);
276 static void nxge_m_stop(void *);
277 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
278 static int nxge_m_promisc(void *, boolean_t);
279 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
280 nxge_status_t nxge_mac_register(p_nxge_t);
281 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
282 int slot, int rdctbl, boolean_t usetbl);
283 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot,
284 boolean_t factory);
285
286 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *);
287 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
288 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
289 uint_t, const void *);
290 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
291 uint_t, void *);
292 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t,
293 mac_prop_info_handle_t);
294 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t);
295 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
296 const void *);
297 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *);
298 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int,
299 mac_ring_info_t *, mac_ring_handle_t);
300 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t,
301 mac_ring_type_t);
302 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t,
303 mac_ring_type_t);
304
305 static void nxge_niu_peu_reset(p_nxge_t nxgep);
306 static void nxge_set_pci_replay_timeout(nxge_t *);
307
308 char *nxge_priv_props[] = {
309 "_adv_10gfdx_cap",
310 "_adv_pause_cap",
311 "_function_number",
312 "_fw_version",
313 "_port_mode",
314 "_hot_swap_phy",
315 "_rxdma_intr_time",
316 "_rxdma_intr_pkts",
317 "_class_opt_ipv4_tcp",
318 "_class_opt_ipv4_udp",
319 "_class_opt_ipv4_ah",
320 "_class_opt_ipv4_sctp",
321 "_class_opt_ipv6_tcp",
322 "_class_opt_ipv6_udp",
323 "_class_opt_ipv6_ah",
324 "_class_opt_ipv6_sctp",
325 "_soft_lso_enable",
326 NULL
327 };
328
329 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL
330 #define MAX_DUMP_SZ 256
331
332 #define NXGE_M_CALLBACK_FLAGS \
333 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
334
335 mac_callbacks_t nxge_m_callbacks = {
336 NXGE_M_CALLBACK_FLAGS,
337 nxge_m_stat,
338 nxge_m_start,
339 nxge_m_stop,
340 nxge_m_promisc,
341 nxge_m_multicst,
342 NULL,
343 NULL,
344 NULL,
345 nxge_m_ioctl,
346 nxge_m_getcapab,
347 NULL,
348 NULL,
349 nxge_m_setprop,
350 nxge_m_getprop,
351 nxge_m_propinfo
352 };
353
354 void
355 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
356
357 /* PSARC/2007/453 MSI-X interrupt limit override. */
358 #define NXGE_MSIX_REQUEST_10G 8
359 #define NXGE_MSIX_REQUEST_1G 2
360 static int nxge_create_msi_property(p_nxge_t);
361 /*
362 * For applications that care about the
363 * latency, it was requested by PAE and the
364 * customers that the driver has tunables that
365 * allow the user to tune it to a higher number
366 * interrupts to spread the interrupts among
367 * multiple channels. The DDI framework limits
368 * the maximum number of MSI-X resources to allocate
369 * to 8 (ddi_msix_alloc_limit). If more than 8
370 * is set, ddi_msix_alloc_limit must be set accordingly.
371 * The default number of MSI interrupts are set to
372 * 8 for 10G and 2 for 1G link.
373 */
374 #define NXGE_MSIX_MAX_ALLOWED 32
375 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G;
376 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G;
377
378 /*
379 * These global variables control the message
380 * output.
381 */
382 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
383 uint64_t nxge_debug_level;
384
385 /*
386 * This list contains the instance structures for the Neptune
387 * devices present in the system. The lock exists to guarantee
388 * mutually exclusive access to the list.
389 */
390 void *nxge_list = NULL;
391 void *nxge_hw_list = NULL;
392 nxge_os_mutex_t nxge_common_lock;
393 nxge_os_mutex_t nxgedebuglock;
394
395 extern uint64_t npi_debug_level;
396
397 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *);
398 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *);
399 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t);
400 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t);
401 extern void nxge_fm_init(p_nxge_t,
402 ddi_device_acc_attr_t *,
403 ddi_dma_attr_t *);
404 extern void nxge_fm_fini(p_nxge_t);
405 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
406
407 /*
408 * Count used to maintain the number of buffers being used
409 * by Neptune instances and loaned up to the upper layers.
410 */
411 uint32_t nxge_mblks_pending = 0;
412
413 /*
414 * Device register access attributes for PIO.
415 */
416 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
417 DDI_DEVICE_ATTR_V1,
418 DDI_STRUCTURE_LE_ACC,
419 DDI_STRICTORDER_ACC,
420 DDI_DEFAULT_ACC
421 };
422
423 /*
424 * Device descriptor access attributes for DMA.
425 */
426 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
427 DDI_DEVICE_ATTR_V0,
428 DDI_STRUCTURE_LE_ACC,
429 DDI_STRICTORDER_ACC
430 };
431
432 /*
433 * Device buffer access attributes for DMA.
434 */
435 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
436 DDI_DEVICE_ATTR_V0,
437 DDI_STRUCTURE_BE_ACC,
438 DDI_STRICTORDER_ACC
439 };
440
441 ddi_dma_attr_t nxge_desc_dma_attr = {
442 DMA_ATTR_V0, /* version number. */
443 0, /* low address */
444 0xffffffffffffffff, /* high address */
445 0xffffffffffffffff, /* address counter max */
446 #ifndef NIU_PA_WORKAROUND
447 0x100000, /* alignment */
448 #else
449 0x2000,
450 #endif
451 0xfc00fc, /* dlim_burstsizes */
452 0x1, /* minimum transfer size */
453 0xffffffffffffffff, /* maximum transfer size */
454 0xffffffffffffffff, /* maximum segment size */
455 1, /* scatter/gather list length */
456 (unsigned int) 1, /* granularity */
457 0 /* attribute flags */
458 };
459
460 ddi_dma_attr_t nxge_tx_dma_attr = {
461 DMA_ATTR_V0, /* version number. */
462 0, /* low address */
463 0xffffffffffffffff, /* high address */
464 0xffffffffffffffff, /* address counter max */
465 #if defined(_BIG_ENDIAN)
466 0x2000, /* alignment */
467 #else
468 0x1000, /* alignment */
469 #endif
470 0xfc00fc, /* dlim_burstsizes */
471 0x1, /* minimum transfer size */
472 0xffffffffffffffff, /* maximum transfer size */
473 0xffffffffffffffff, /* maximum segment size */
474 5, /* scatter/gather list length */
475 (unsigned int) 1, /* granularity */
476 0 /* attribute flags */
477 };
478
479 ddi_dma_attr_t nxge_rx_dma_attr = {
480 DMA_ATTR_V0, /* version number. */
481 0, /* low address */
482 0xffffffffffffffff, /* high address */
483 0xffffffffffffffff, /* address counter max */
484 0x2000, /* alignment */
485 0xfc00fc, /* dlim_burstsizes */
486 0x1, /* minimum transfer size */
487 0xffffffffffffffff, /* maximum transfer size */
488 0xffffffffffffffff, /* maximum segment size */
489 1, /* scatter/gather list length */
490 (unsigned int) 1, /* granularity */
491 DDI_DMA_RELAXED_ORDERING /* attribute flags */
492 };
493
494 ddi_dma_lim_t nxge_dma_limits = {
495 (uint_t)0, /* dlim_addr_lo */
496 (uint_t)0xffffffff, /* dlim_addr_hi */
497 (uint_t)0xffffffff, /* dlim_cntr_max */
498 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
499 0x1, /* dlim_minxfer */
500 1024 /* dlim_speed */
501 };
502
503 dma_method_t nxge_force_dma = DVMA;
504
505 /*
506 * dma chunk sizes.
507 *
508 * Try to allocate the largest possible size
509 * so that fewer number of dma chunks would be managed
510 */
511 #ifdef NIU_PA_WORKAROUND
512 size_t alloc_sizes [] = {0x2000};
513 #else
514 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
515 0x10000, 0x20000, 0x40000, 0x80000,
516 0x100000, 0x200000, 0x400000, 0x800000,
517 0x1000000, 0x2000000, 0x4000000};
518 #endif
519
520 /*
521 * Translate "dev_t" to a pointer to the associated "dev_info_t".
522 */
523
524 extern void nxge_get_environs(nxge_t *);
525
526 static int
nxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)527 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
528 {
529 p_nxge_t nxgep = NULL;
530 int instance;
531 int status = DDI_SUCCESS;
532 uint8_t portn;
533 nxge_mmac_t *mmac_info;
534
535 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
536
537 /*
538 * Get the device instance since we'll need to setup
539 * or retrieve a soft state for this instance.
540 */
541 instance = ddi_get_instance(dip);
542
543 switch (cmd) {
544 case DDI_ATTACH:
545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
546 break;
547
548 case DDI_RESUME:
549 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
550 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
551 if (nxgep == NULL) {
552 status = DDI_FAILURE;
553 break;
554 }
555 if (nxgep->dip != dip) {
556 status = DDI_FAILURE;
557 break;
558 }
559 if (nxgep->suspended == DDI_PM_SUSPEND) {
560 status = ddi_dev_is_needed(nxgep->dip, 0, 1);
561 } else {
562 status = nxge_resume(nxgep);
563 }
564 goto nxge_attach_exit;
565
566 case DDI_PM_RESUME:
567 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
568 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
569 if (nxgep == NULL) {
570 status = DDI_FAILURE;
571 break;
572 }
573 if (nxgep->dip != dip) {
574 status = DDI_FAILURE;
575 break;
576 }
577 status = nxge_resume(nxgep);
578 goto nxge_attach_exit;
579
580 default:
581 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
582 status = DDI_FAILURE;
583 goto nxge_attach_exit;
584 }
585
586
587 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
588 status = DDI_FAILURE;
589 goto nxge_attach_exit;
590 }
591
592 nxgep = ddi_get_soft_state(nxge_list, instance);
593 if (nxgep == NULL) {
594 status = NXGE_ERROR;
595 goto nxge_attach_fail2;
596 }
597
598 nxgep->nxge_magic = NXGE_MAGIC;
599
600 nxgep->drv_state = 0;
601 nxgep->dip = dip;
602 nxgep->instance = instance;
603 nxgep->p_dip = ddi_get_parent(dip);
604 nxgep->nxge_debug_level = nxge_debug_level;
605 npi_debug_level = nxge_debug_level;
606
607 /* Are we a guest running in a Hybrid I/O environment? */
608 nxge_get_environs(nxgep);
609
610 status = nxge_map_regs(nxgep);
611
612 if (status != NXGE_OK) {
613 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
614 goto nxge_attach_fail3;
615 }
616
617 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr);
618
619 /* Create & initialize the per-Neptune data structure */
620 /* (even if we're a guest). */
621 status = nxge_init_common_dev(nxgep);
622 if (status != NXGE_OK) {
623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
624 "nxge_init_common_dev failed"));
625 goto nxge_attach_fail4;
626 }
627
628 /*
629 * Software workaround: set the replay timer.
630 */
631 if (nxgep->niu_type != N2_NIU) {
632 nxge_set_pci_replay_timeout(nxgep);
633 }
634
635 #if defined(sun4v)
636 /* This is required by nxge_hio_init(), which follows. */
637 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
638 goto nxge_attach_fail4;
639 #endif
640
641 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
642 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
643 "nxge_hio_init failed"));
644 goto nxge_attach_fail4;
645 }
646
647 if (nxgep->niu_type == NEPTUNE_2_10GF) {
648 if (nxgep->function_num > 1) {
649 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
650 " function %d. Only functions 0 and 1 are "
651 "supported for this card.", nxgep->function_num));
652 status = NXGE_ERROR;
653 goto nxge_attach_fail4;
654 }
655 }
656
657 if (isLDOMguest(nxgep)) {
658 /*
659 * Use the function number here.
660 */
661 nxgep->mac.portnum = nxgep->function_num;
662 nxgep->mac.porttype = PORT_TYPE_LOGICAL;
663
664 /* XXX We'll set the MAC address counts to 1 for now. */
665 mmac_info = &nxgep->nxge_mmac_info;
666 mmac_info->num_mmac = 1;
667 mmac_info->naddrfree = 1;
668 } else {
669 portn = NXGE_GET_PORT_NUM(nxgep->function_num);
670 nxgep->mac.portnum = portn;
671 if ((portn == 0) || (portn == 1))
672 nxgep->mac.porttype = PORT_TYPE_XMAC;
673 else
674 nxgep->mac.porttype = PORT_TYPE_BMAC;
675 /*
676 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
677 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
678 * The two types of MACs have different characterizations.
679 */
680 mmac_info = &nxgep->nxge_mmac_info;
681 if (nxgep->function_num < 2) {
682 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
683 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
684 } else {
685 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
686 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
687 }
688 }
689 /*
690 * Setup the Ndd parameters for the this instance.
691 */
692 nxge_init_param(nxgep);
693
694 /*
695 * Setup Register Tracing Buffer.
696 */
697 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
698
699 /* init stats ptr */
700 nxge_init_statsp(nxgep);
701
702 /*
703 * Copy the vpd info from eeprom to a local data
704 * structure, and then check its validity.
705 */
706 if (!isLDOMguest(nxgep)) {
707 int *regp;
708 uint_t reglen;
709 int rv;
710
711 nxge_vpd_info_get(nxgep);
712
713 /* Find the NIU config handle. */
714 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
715 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
716 "reg", ®p, ®len);
717
718 if (rv != DDI_PROP_SUCCESS) {
719 goto nxge_attach_fail5;
720 }
721 /*
722 * The address_hi, that is the first int, in the reg
723 * property consists of config handle, but need to remove
724 * the bits 28-31 which are OBP specific info.
725 */
726 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
727 ddi_prop_free(regp);
728 }
729
730 /*
731 * Set the defaults for the MTU size.
732 */
733 nxge_hw_id_init(nxgep);
734
735 if (isLDOMguest(nxgep)) {
736 uchar_t *prop_val;
737 uint_t prop_len;
738 uint32_t max_frame_size;
739
740 extern void nxge_get_logical_props(p_nxge_t);
741
742 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
743 nxgep->mac.portmode = PORT_LOGICAL;
744 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
745 "phy-type", "virtual transceiver");
746
747 nxgep->nports = 1;
748 nxgep->board_ver = 0; /* XXX What? */
749
750 /*
751 * local-mac-address property gives us info on which
752 * specific MAC address the Hybrid resource is associated
753 * with.
754 */
755 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
756 "local-mac-address", &prop_val,
757 &prop_len) != DDI_PROP_SUCCESS) {
758 goto nxge_attach_fail5;
759 }
760 if (prop_len != ETHERADDRL) {
761 ddi_prop_free(prop_val);
762 goto nxge_attach_fail5;
763 }
764 ether_copy(prop_val, nxgep->hio_mac_addr);
765 ddi_prop_free(prop_val);
766 nxge_get_logical_props(nxgep);
767
768 /*
769 * Enable Jumbo property based on the "max-frame-size"
770 * property value.
771 */
772 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY,
773 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
774 "max-frame-size", NXGE_MTU_DEFAULT_MAX);
775 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
776 (max_frame_size <= TX_JUMBO_MTU)) {
777 nxgep->mac.is_jumbo = B_TRUE;
778 nxgep->mac.maxframesize = (uint16_t)max_frame_size;
779 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
780 NXGE_EHEADER_VLAN_CRC;
781 }
782 } else {
783 status = nxge_xcvr_find(nxgep);
784
785 if (status != NXGE_OK) {
786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
787 " Couldn't determine card type"
788 " .... exit "));
789 goto nxge_attach_fail5;
790 }
791
792 status = nxge_get_config_properties(nxgep);
793
794 if (status != NXGE_OK) {
795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
796 "get_hw create failed"));
797 goto nxge_attach_fail;
798 }
799 }
800
801 /*
802 * Setup the Kstats for the driver.
803 */
804 nxge_setup_kstats(nxgep);
805
806 if (!isLDOMguest(nxgep))
807 nxge_setup_param(nxgep);
808
809 status = nxge_setup_system_dma_pages(nxgep);
810 if (status != NXGE_OK) {
811 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
812 goto nxge_attach_fail;
813 }
814
815
816 if (!isLDOMguest(nxgep))
817 nxge_hw_init_niu_common(nxgep);
818
819 status = nxge_setup_mutexes(nxgep);
820 if (status != NXGE_OK) {
821 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
822 goto nxge_attach_fail;
823 }
824
825 #if defined(sun4v)
826 if (isLDOMguest(nxgep)) {
827 /* Find our VR & channel sets. */
828 status = nxge_hio_vr_add(nxgep);
829 if (status != DDI_SUCCESS) {
830 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
831 "nxge_hio_vr_add failed"));
832 (void) hsvc_unregister(&nxgep->niu_hsvc);
833 nxgep->niu_hsvc_available = B_FALSE;
834 goto nxge_attach_fail;
835 }
836 goto nxge_attach_exit;
837 }
838 #endif
839
840 status = nxge_setup_dev(nxgep);
841 if (status != DDI_SUCCESS) {
842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
843 goto nxge_attach_fail;
844 }
845
846 status = nxge_add_intrs(nxgep);
847 if (status != DDI_SUCCESS) {
848 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
849 goto nxge_attach_fail;
850 }
851
852 /* If a guest, register with vio_net instead. */
853 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
854 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
855 "unable to register to mac layer (%d)", status));
856 goto nxge_attach_fail;
857 }
858
859 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
860
861 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
862 "registered to mac (instance %d)", instance));
863
864 /* nxge_link_monitor calls xcvr.check_link recursively */
865 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
866
867 goto nxge_attach_exit;
868
869 nxge_attach_fail:
870 nxge_unattach(nxgep);
871 goto nxge_attach_fail1;
872
873 nxge_attach_fail5:
874 /*
875 * Tear down the ndd parameters setup.
876 */
877 nxge_destroy_param(nxgep);
878
879 /*
880 * Tear down the kstat setup.
881 */
882 nxge_destroy_kstats(nxgep);
883
884 nxge_attach_fail4:
885 if (nxgep->nxge_hw_p) {
886 nxge_uninit_common_dev(nxgep);
887 nxgep->nxge_hw_p = NULL;
888 }
889
890 nxge_attach_fail3:
891 /*
892 * Unmap the register setup.
893 */
894 nxge_unmap_regs(nxgep);
895
896 nxge_fm_fini(nxgep);
897
898 nxge_attach_fail2:
899 ddi_soft_state_free(nxge_list, nxgep->instance);
900
901 nxge_attach_fail1:
902 if (status != NXGE_OK)
903 status = (NXGE_ERROR | NXGE_DDI_FAILED);
904 nxgep = NULL;
905
906 nxge_attach_exit:
907 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
908 status));
909
910 return (status);
911 }
912
913 static int
nxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)914 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
915 {
916 int status = DDI_SUCCESS;
917 int instance;
918 p_nxge_t nxgep = NULL;
919
920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
921 instance = ddi_get_instance(dip);
922 nxgep = ddi_get_soft_state(nxge_list, instance);
923 if (nxgep == NULL) {
924 status = DDI_FAILURE;
925 goto nxge_detach_exit;
926 }
927
928 switch (cmd) {
929 case DDI_DETACH:
930 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
931 break;
932
933 case DDI_PM_SUSPEND:
934 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
935 nxgep->suspended = DDI_PM_SUSPEND;
936 nxge_suspend(nxgep);
937 break;
938
939 case DDI_SUSPEND:
940 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
941 if (nxgep->suspended != DDI_PM_SUSPEND) {
942 nxgep->suspended = DDI_SUSPEND;
943 nxge_suspend(nxgep);
944 }
945 break;
946
947 default:
948 status = DDI_FAILURE;
949 }
950
951 if (cmd != DDI_DETACH)
952 goto nxge_detach_exit;
953
954 /*
955 * Stop the xcvr polling.
956 */
957 nxgep->suspended = cmd;
958
959 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
960
961 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
962 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
963 "<== nxge_detach status = 0x%08X", status));
964 return (DDI_FAILURE);
965 }
966
967 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
968 "<== nxge_detach (mac_unregister) status = 0x%08X", status));
969
970 nxge_unattach(nxgep);
971 nxgep = NULL;
972
973 nxge_detach_exit:
974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
975 status));
976
977 return (status);
978 }
979
980 static void
nxge_unattach(p_nxge_t nxgep)981 nxge_unattach(p_nxge_t nxgep)
982 {
983 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
984
985 if (nxgep == NULL || nxgep->dev_regs == NULL) {
986 return;
987 }
988
989 nxgep->nxge_magic = 0;
990
991 if (nxgep->nxge_timerid) {
992 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
993 nxgep->nxge_timerid = 0;
994 }
995
996 /*
997 * If this flag is set, it will affect the Neptune
998 * only.
999 */
1000 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
1001 nxge_niu_peu_reset(nxgep);
1002 }
1003
1004 #if defined(sun4v)
1005 if (isLDOMguest(nxgep)) {
1006 (void) nxge_hio_vr_release(nxgep);
1007 }
1008 #endif
1009
1010 if (nxgep->nxge_hw_p) {
1011 nxge_uninit_common_dev(nxgep);
1012 nxgep->nxge_hw_p = NULL;
1013 }
1014
1015 #if defined(sun4v)
1016 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
1017 (void) hsvc_unregister(&nxgep->niu_hsvc);
1018 nxgep->niu_hsvc_available = B_FALSE;
1019 }
1020 #endif
1021 /*
1022 * Stop any further interrupts.
1023 */
1024 nxge_remove_intrs(nxgep);
1025
1026 /*
1027 * Stop the device and free resources.
1028 */
1029 if (!isLDOMguest(nxgep)) {
1030 nxge_destroy_dev(nxgep);
1031 }
1032
1033 /*
1034 * Tear down the ndd parameters setup.
1035 */
1036 nxge_destroy_param(nxgep);
1037
1038 /*
1039 * Tear down the kstat setup.
1040 */
1041 nxge_destroy_kstats(nxgep);
1042
1043 /*
1044 * Free any memory allocated for PHY properties
1045 */
1046 if (nxgep->phy_prop.cnt > 0) {
1047 KMEM_FREE(nxgep->phy_prop.arr,
1048 sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt);
1049 nxgep->phy_prop.cnt = 0;
1050 }
1051
1052 /*
1053 * Destroy all mutexes.
1054 */
1055 nxge_destroy_mutexes(nxgep);
1056
1057 /*
1058 * Remove the list of ndd parameters which
1059 * were setup during attach.
1060 */
1061 if (nxgep->dip) {
1062 NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1063 " nxge_unattach: remove all properties"));
1064
1065 (void) ddi_prop_remove_all(nxgep->dip);
1066 }
1067
1068 #if NXGE_PROPERTY
1069 nxge_remove_hard_properties(nxgep);
1070 #endif
1071
1072 /*
1073 * Unmap the register setup.
1074 */
1075 nxge_unmap_regs(nxgep);
1076
1077 nxge_fm_fini(nxgep);
1078
1079 ddi_soft_state_free(nxge_list, nxgep->instance);
1080
1081 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1082 }
1083
1084 #if defined(sun4v)
1085 int
nxge_hsvc_register(nxge_t * nxgep)1086 nxge_hsvc_register(nxge_t *nxgep)
1087 {
1088 nxge_status_t status;
1089 int i, j;
1090
1091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register"));
1092 if (nxgep->niu_type != N2_NIU) {
1093 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register"));
1094 return (DDI_SUCCESS);
1095 }
1096
1097 /*
1098 * Currently, the NIU Hypervisor API supports two major versions:
1099 * version 1 and 2.
1100 * If Hypervisor introduces a higher major or minor version,
1101 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
1102 */
1103 nxgep->niu_hsvc_available = B_FALSE;
1104 bcopy(&niu_hsvc, &nxgep->niu_hsvc,
1105 sizeof (hsvc_info_t));
1106
1107 for (i = NIU_MAJOR_HI; i > 0; i--) {
1108 nxgep->niu_hsvc.hsvc_major = i;
1109 for (j = NIU_MINOR_HI; j >= 0; j--) {
1110 nxgep->niu_hsvc.hsvc_minor = j;
1111 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1112 "nxge_hsvc_register: %s: negotiating "
1113 "hypervisor services revision %d "
1114 "group: 0x%lx major: 0x%lx "
1115 "minor: 0x%lx",
1116 nxgep->niu_hsvc.hsvc_modname,
1117 nxgep->niu_hsvc.hsvc_rev,
1118 nxgep->niu_hsvc.hsvc_group,
1119 nxgep->niu_hsvc.hsvc_major,
1120 nxgep->niu_hsvc.hsvc_minor,
1121 nxgep->niu_min_ver));
1122
1123 if ((status = hsvc_register(&nxgep->niu_hsvc,
1124 &nxgep->niu_min_ver)) == 0) {
1125 /* Use the supported minor */
1126 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver;
1127 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1128 "nxge_hsvc_register: %s: negotiated "
1129 "hypervisor services revision %d "
1130 "group: 0x%lx major: 0x%lx "
1131 "minor: 0x%lx (niu_min_ver 0x%lx)",
1132 nxgep->niu_hsvc.hsvc_modname,
1133 nxgep->niu_hsvc.hsvc_rev,
1134 nxgep->niu_hsvc.hsvc_group,
1135 nxgep->niu_hsvc.hsvc_major,
1136 nxgep->niu_hsvc.hsvc_minor,
1137 nxgep->niu_min_ver));
1138
1139 nxgep->niu_hsvc_available = B_TRUE;
1140 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1141 "<== nxge_hsvc_register: "
1142 "NIU Hypervisor service enabled"));
1143 return (DDI_SUCCESS);
1144 }
1145
1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1147 "nxge_hsvc_register: %s: negotiated failed - "
1148 "try lower major number "
1149 "hypervisor services revision %d "
1150 "group: 0x%lx major: 0x%lx minor: 0x%lx "
1151 "errno: %d",
1152 nxgep->niu_hsvc.hsvc_modname,
1153 nxgep->niu_hsvc.hsvc_rev,
1154 nxgep->niu_hsvc.hsvc_group,
1155 nxgep->niu_hsvc.hsvc_major,
1156 nxgep->niu_hsvc.hsvc_minor, status));
1157 }
1158 }
1159
1160 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1161 "nxge_hsvc_register: %s: cannot negotiate "
1162 "hypervisor services revision %d group: 0x%lx "
1163 "major: 0x%lx minor: 0x%lx errno: %d",
1164 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1165 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1166 niu_hsvc.hsvc_minor, status));
1167
1168 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1169 "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
1170
1171 return (DDI_FAILURE);
1172 }
1173 #endif
1174
1175 static char n2_siu_name[] = "niu";
1176
1177 static nxge_status_t
nxge_map_regs(p_nxge_t nxgep)1178 nxge_map_regs(p_nxge_t nxgep)
1179 {
1180 int ddi_status = DDI_SUCCESS;
1181 p_dev_regs_t dev_regs;
1182 char buf[MAXPATHLEN + 1];
1183 char *devname;
1184 #ifdef NXGE_DEBUG
1185 char *sysname;
1186 #endif
1187 off_t regsize;
1188 nxge_status_t status = NXGE_OK;
1189 #if !defined(_BIG_ENDIAN)
1190 off_t pci_offset;
1191 uint16_t pcie_devctl;
1192 #endif
1193
1194 if (isLDOMguest(nxgep)) {
1195 return (nxge_guest_regs_map(nxgep));
1196 }
1197
1198 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1199 nxgep->dev_regs = NULL;
1200 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1201 dev_regs->nxge_regh = NULL;
1202 dev_regs->nxge_pciregh = NULL;
1203 dev_regs->nxge_msix_regh = NULL;
1204 dev_regs->nxge_vir_regh = NULL;
1205 dev_regs->nxge_vir2_regh = NULL;
1206 nxgep->niu_type = NIU_TYPE_NONE;
1207
1208 devname = ddi_pathname(nxgep->dip, buf);
1209 ASSERT(strlen(devname) > 0);
1210 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1211 "nxge_map_regs: pathname devname %s", devname));
1212
1213 /*
1214 * The driver is running on a N2-NIU system if devname is something
1215 * like "/niu@80/network@0"
1216 */
1217 if (strstr(devname, n2_siu_name)) {
1218 /* N2/NIU */
1219 nxgep->niu_type = N2_NIU;
1220 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1221 "nxge_map_regs: N2/NIU devname %s", devname));
1222 /*
1223 * Get function number:
1224 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
1225 */
1226 nxgep->function_num =
1227 (devname[strlen(devname) -1] == '1' ? 1 : 0);
1228 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1229 "nxge_map_regs: N2/NIU function number %d",
1230 nxgep->function_num));
1231 } else {
1232 int *prop_val;
1233 uint_t prop_len;
1234 uint8_t func_num;
1235
1236 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1237 0, "reg",
1238 &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1239 NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1240 "Reg property not found"));
1241 ddi_status = DDI_FAILURE;
1242 goto nxge_map_regs_fail0;
1243
1244 } else {
1245 func_num = (prop_val[0] >> 8) & 0x7;
1246 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1247 "Reg property found: fun # %d",
1248 func_num));
1249 nxgep->function_num = func_num;
1250 if (isLDOMguest(nxgep)) {
1251 nxgep->function_num /= 2;
1252 kmem_free(dev_regs, sizeof (dev_regs_t));
1253 return (NXGE_OK);
1254 }
1255 ddi_prop_free(prop_val);
1256 }
1257 }
1258
1259 switch (nxgep->niu_type) {
1260 default:
1261 (void) ddi_dev_regsize(nxgep->dip, 0, ®size);
1262 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1263 "nxge_map_regs: pci config size 0x%x", regsize));
1264
1265 ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1266 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1267 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1268 if (ddi_status != DDI_SUCCESS) {
1269 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1270 "ddi_map_regs, nxge bus config regs failed"));
1271 goto nxge_map_regs_fail0;
1272 }
1273 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1274 "nxge_map_reg: PCI config addr 0x%0llx "
1275 " handle 0x%0llx", dev_regs->nxge_pciregp,
1276 dev_regs->nxge_pciregh));
1277 /*
1278 * IMP IMP
1279 * workaround for bit swapping bug in HW
1280 * which ends up in no-snoop = yes
1281 * resulting, in DMA not synched properly
1282 */
1283 #if !defined(_BIG_ENDIAN)
1284 /* workarounds for x86 systems */
1285 pci_offset = 0x80 + PCIE_DEVCTL;
1286 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh,
1287 pci_offset);
1288 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP;
1289 pcie_devctl |= PCIE_DEVCTL_RO_EN;
1290 pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1291 pcie_devctl);
1292 #endif
1293
1294 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1295 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1296 "nxge_map_regs: pio size 0x%x", regsize));
1297 /* set up the device mapped register */
1298 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1299 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1300 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1301 if (ddi_status != DDI_SUCCESS) {
1302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1303 "ddi_map_regs for Neptune global reg failed"));
1304 goto nxge_map_regs_fail1;
1305 }
1306
1307 /* set up the msi/msi-x mapped register */
1308 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1309 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1310 "nxge_map_regs: msix size 0x%x", regsize));
1311 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1312 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1313 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1314 if (ddi_status != DDI_SUCCESS) {
1315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1316 "ddi_map_regs for msi reg failed"));
1317 goto nxge_map_regs_fail2;
1318 }
1319
1320 /* set up the vio region mapped register */
1321 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1322 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1323 "nxge_map_regs: vio size 0x%x", regsize));
1324 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1325 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1326 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1327
1328 if (ddi_status != DDI_SUCCESS) {
1329 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1330 "ddi_map_regs for nxge vio reg failed"));
1331 goto nxge_map_regs_fail3;
1332 }
1333 nxgep->dev_regs = dev_regs;
1334
1335 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1336 NPI_PCI_ADD_HANDLE_SET(nxgep,
1337 (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1338 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1339 NPI_MSI_ADD_HANDLE_SET(nxgep,
1340 (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1341
1342 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1343 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1344
1345 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1346 NPI_REG_ADD_HANDLE_SET(nxgep,
1347 (npi_reg_ptr_t)dev_regs->nxge_regp);
1348
1349 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1350 NPI_VREG_ADD_HANDLE_SET(nxgep,
1351 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1352
1353 break;
1354
1355 case N2_NIU:
1356 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1357 /*
1358 * Set up the device mapped register (FWARC 2006/556)
1359 * (changed back to 1: reg starts at 1!)
1360 */
1361 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1362 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1363 "nxge_map_regs: dev size 0x%x", regsize));
1364 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1365 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1366 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1367
1368 if (ddi_status != DDI_SUCCESS) {
1369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1370 "ddi_map_regs for N2/NIU, global reg failed "));
1371 goto nxge_map_regs_fail1;
1372 }
1373
1374 /* set up the first vio region mapped register */
1375 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1377 "nxge_map_regs: vio (1) size 0x%x", regsize));
1378 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1379 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1380 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1381
1382 if (ddi_status != DDI_SUCCESS) {
1383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1384 "ddi_map_regs for nxge vio reg failed"));
1385 goto nxge_map_regs_fail2;
1386 }
1387 /* set up the second vio region mapped register */
1388 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1389 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1390 "nxge_map_regs: vio (3) size 0x%x", regsize));
1391 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1392 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1393 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1394
1395 if (ddi_status != DDI_SUCCESS) {
1396 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1397 "ddi_map_regs for nxge vio2 reg failed"));
1398 goto nxge_map_regs_fail3;
1399 }
1400 nxgep->dev_regs = dev_regs;
1401
1402 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1403 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1404
1405 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1406 NPI_REG_ADD_HANDLE_SET(nxgep,
1407 (npi_reg_ptr_t)dev_regs->nxge_regp);
1408
1409 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1410 NPI_VREG_ADD_HANDLE_SET(nxgep,
1411 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1412
1413 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1414 NPI_V2REG_ADD_HANDLE_SET(nxgep,
1415 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1416
1417 break;
1418 }
1419
1420 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1421 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1422
1423 goto nxge_map_regs_exit;
1424 nxge_map_regs_fail3:
1425 if (dev_regs->nxge_msix_regh) {
1426 ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1427 }
1428 if (dev_regs->nxge_vir_regh) {
1429 ddi_regs_map_free(&dev_regs->nxge_regh);
1430 }
1431 nxge_map_regs_fail2:
1432 if (dev_regs->nxge_regh) {
1433 ddi_regs_map_free(&dev_regs->nxge_regh);
1434 }
1435 nxge_map_regs_fail1:
1436 if (dev_regs->nxge_pciregh) {
1437 ddi_regs_map_free(&dev_regs->nxge_pciregh);
1438 }
1439 nxge_map_regs_fail0:
1440 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1441 kmem_free(dev_regs, sizeof (dev_regs_t));
1442
1443 nxge_map_regs_exit:
1444 if (ddi_status != DDI_SUCCESS)
1445 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1446 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1447 return (status);
1448 }
1449
1450 static void
nxge_unmap_regs(p_nxge_t nxgep)1451 nxge_unmap_regs(p_nxge_t nxgep)
1452 {
1453 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1454
1455 if (isLDOMguest(nxgep)) {
1456 nxge_guest_regs_map_free(nxgep);
1457 return;
1458 }
1459
1460 if (nxgep->dev_regs) {
1461 if (nxgep->dev_regs->nxge_pciregh) {
1462 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1463 "==> nxge_unmap_regs: bus"));
1464 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1465 nxgep->dev_regs->nxge_pciregh = NULL;
1466 }
1467 if (nxgep->dev_regs->nxge_regh) {
1468 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1469 "==> nxge_unmap_regs: device registers"));
1470 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1471 nxgep->dev_regs->nxge_regh = NULL;
1472 }
1473 if (nxgep->dev_regs->nxge_msix_regh) {
1474 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1475 "==> nxge_unmap_regs: device interrupts"));
1476 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1477 nxgep->dev_regs->nxge_msix_regh = NULL;
1478 }
1479 if (nxgep->dev_regs->nxge_vir_regh) {
1480 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1481 "==> nxge_unmap_regs: vio region"));
1482 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1483 nxgep->dev_regs->nxge_vir_regh = NULL;
1484 }
1485 if (nxgep->dev_regs->nxge_vir2_regh) {
1486 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1487 "==> nxge_unmap_regs: vio2 region"));
1488 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1489 nxgep->dev_regs->nxge_vir2_regh = NULL;
1490 }
1491
1492 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1493 nxgep->dev_regs = NULL;
1494 }
1495
1496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1497 }
1498
1499 static nxge_status_t
nxge_setup_mutexes(p_nxge_t nxgep)1500 nxge_setup_mutexes(p_nxge_t nxgep)
1501 {
1502 int ddi_status = DDI_SUCCESS;
1503 nxge_status_t status = NXGE_OK;
1504 nxge_classify_t *classify_ptr;
1505 int partition;
1506
1507 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1508
1509 /*
1510 * Get the interrupt cookie so the mutexes can be
1511 * Initialized.
1512 */
1513 if (isLDOMguest(nxgep)) {
1514 nxgep->interrupt_cookie = 0;
1515 } else {
1516 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1517 &nxgep->interrupt_cookie);
1518
1519 if (ddi_status != DDI_SUCCESS) {
1520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1521 "<== nxge_setup_mutexes: failed 0x%x",
1522 ddi_status));
1523 goto nxge_setup_mutexes_exit;
1524 }
1525 }
1526
1527 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1528 MUTEX_INIT(&nxgep->poll_lock, NULL,
1529 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1530
1531 /*
1532 * Initialize mutexes for this device.
1533 */
1534 MUTEX_INIT(nxgep->genlock, NULL,
1535 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1536 MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1537 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1538 MUTEX_INIT(&nxgep->mif_lock, NULL,
1539 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1540 MUTEX_INIT(&nxgep->group_lock, NULL,
1541 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1542 RW_INIT(&nxgep->filter_lock, NULL,
1543 RW_DRIVER, (void *)nxgep->interrupt_cookie);
1544
1545 classify_ptr = &nxgep->classifier;
1546 /*
1547 * FFLP Mutexes are never used in interrupt context
1548 * as fflp operation can take very long time to
1549 * complete and hence not suitable to invoke from interrupt
1550 * handlers.
1551 */
1552 MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1553 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1554 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1555 MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1556 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1557 for (partition = 0; partition < MAX_PARTITION; partition++) {
1558 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1559 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1560 }
1561 }
1562
1563 nxge_setup_mutexes_exit:
1564 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1565 "<== nxge_setup_mutexes status = %x", status));
1566
1567 if (ddi_status != DDI_SUCCESS)
1568 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1569
1570 return (status);
1571 }
1572
1573 static void
nxge_destroy_mutexes(p_nxge_t nxgep)1574 nxge_destroy_mutexes(p_nxge_t nxgep)
1575 {
1576 int partition;
1577 nxge_classify_t *classify_ptr;
1578
1579 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1580 RW_DESTROY(&nxgep->filter_lock);
1581 MUTEX_DESTROY(&nxgep->group_lock);
1582 MUTEX_DESTROY(&nxgep->mif_lock);
1583 MUTEX_DESTROY(&nxgep->ouraddr_lock);
1584 MUTEX_DESTROY(nxgep->genlock);
1585
1586 classify_ptr = &nxgep->classifier;
1587 MUTEX_DESTROY(&classify_ptr->tcam_lock);
1588
1589 /* Destroy all polling resources. */
1590 MUTEX_DESTROY(&nxgep->poll_lock);
1591 cv_destroy(&nxgep->poll_cv);
1592
1593 /* free data structures, based on HW type */
1594 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1595 MUTEX_DESTROY(&classify_ptr->fcram_lock);
1596 for (partition = 0; partition < MAX_PARTITION; partition++) {
1597 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1598 }
1599 }
1600
1601 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1602 }
1603
1604 nxge_status_t
nxge_init(p_nxge_t nxgep)1605 nxge_init(p_nxge_t nxgep)
1606 {
1607 nxge_status_t status = NXGE_OK;
1608
1609 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1610
1611 if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1612 return (status);
1613 }
1614
1615 /*
1616 * Allocate system memory for the receive/transmit buffer blocks
1617 * and receive/transmit descriptor rings.
1618 */
1619 status = nxge_alloc_mem_pool(nxgep);
1620 if (status != NXGE_OK) {
1621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1622 goto nxge_init_fail1;
1623 }
1624
1625 if (!isLDOMguest(nxgep)) {
1626 /*
1627 * Initialize and enable the TXC registers.
1628 * (Globally enable the Tx controller,
1629 * enable the port, configure the dma channel bitmap,
1630 * configure the max burst size).
1631 */
1632 status = nxge_txc_init(nxgep);
1633 if (status != NXGE_OK) {
1634 NXGE_ERROR_MSG((nxgep,
1635 NXGE_ERR_CTL, "init txc failed\n"));
1636 goto nxge_init_fail2;
1637 }
1638 }
1639
1640 /*
1641 * Initialize and enable TXDMA channels.
1642 */
1643 status = nxge_init_txdma_channels(nxgep);
1644 if (status != NXGE_OK) {
1645 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1646 goto nxge_init_fail3;
1647 }
1648
1649 /*
1650 * Initialize and enable RXDMA channels.
1651 */
1652 status = nxge_init_rxdma_channels(nxgep);
1653 if (status != NXGE_OK) {
1654 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1655 goto nxge_init_fail4;
1656 }
1657
1658 /*
1659 * The guest domain is now done.
1660 */
1661 if (isLDOMguest(nxgep)) {
1662 nxgep->drv_state |= STATE_HW_INITIALIZED;
1663 goto nxge_init_exit;
1664 }
1665
1666 /*
1667 * Initialize TCAM and FCRAM (Neptune).
1668 */
1669 status = nxge_classify_init(nxgep);
1670 if (status != NXGE_OK) {
1671 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1672 goto nxge_init_fail5;
1673 }
1674
1675 /*
1676 * Initialize ZCP
1677 */
1678 status = nxge_zcp_init(nxgep);
1679 if (status != NXGE_OK) {
1680 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1681 goto nxge_init_fail5;
1682 }
1683
1684 /*
1685 * Initialize IPP.
1686 */
1687 status = nxge_ipp_init(nxgep);
1688 if (status != NXGE_OK) {
1689 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1690 goto nxge_init_fail5;
1691 }
1692
1693 /*
1694 * Initialize the MAC block.
1695 */
1696 status = nxge_mac_init(nxgep);
1697 if (status != NXGE_OK) {
1698 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1699 goto nxge_init_fail5;
1700 }
1701
1702 /*
1703 * Enable the interrrupts for DDI.
1704 */
1705 nxge_intrs_enable(nxgep);
1706
1707 nxgep->drv_state |= STATE_HW_INITIALIZED;
1708
1709 goto nxge_init_exit;
1710
1711 nxge_init_fail5:
1712 nxge_uninit_rxdma_channels(nxgep);
1713 nxge_init_fail4:
1714 nxge_uninit_txdma_channels(nxgep);
1715 nxge_init_fail3:
1716 if (!isLDOMguest(nxgep)) {
1717 (void) nxge_txc_uninit(nxgep);
1718 }
1719 nxge_init_fail2:
1720 nxge_free_mem_pool(nxgep);
1721 nxge_init_fail1:
1722 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1723 "<== nxge_init status (failed) = 0x%08x", status));
1724 return (status);
1725
1726 nxge_init_exit:
1727 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1728 status));
1729 return (status);
1730 }
1731
1732
1733 timeout_id_t
nxge_start_timer(p_nxge_t nxgep,fptrv_t func,int msec)1734 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1735 {
1736 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1737 return (timeout(func, (caddr_t)nxgep,
1738 drv_usectohz(1000 * msec)));
1739 }
1740 return (NULL);
1741 }
1742
1743 /*ARGSUSED*/
1744 void
nxge_stop_timer(p_nxge_t nxgep,timeout_id_t timerid)1745 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1746 {
1747 if (timerid) {
1748 (void) untimeout(timerid);
1749 }
1750 }
1751
1752 void
nxge_uninit(p_nxge_t nxgep)1753 nxge_uninit(p_nxge_t nxgep)
1754 {
1755 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1756
1757 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1758 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1759 "==> nxge_uninit: not initialized"));
1760 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1761 "<== nxge_uninit"));
1762 return;
1763 }
1764
1765 if (!isLDOMguest(nxgep)) {
1766 /*
1767 * Reset the receive MAC side.
1768 */
1769 (void) nxge_rx_mac_disable(nxgep);
1770
1771 /*
1772 * Drain the IPP.
1773 */
1774 (void) nxge_ipp_drain(nxgep);
1775 }
1776
1777 /* stop timer */
1778 if (nxgep->nxge_timerid) {
1779 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1780 nxgep->nxge_timerid = 0;
1781 }
1782
1783 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1784 (void) nxge_intr_hw_disable(nxgep);
1785
1786
1787 /* Disable and soft reset the IPP */
1788 if (!isLDOMguest(nxgep))
1789 (void) nxge_ipp_disable(nxgep);
1790
1791 /* Free classification resources */
1792 (void) nxge_classify_uninit(nxgep);
1793
1794 /*
1795 * Reset the transmit/receive DMA side.
1796 */
1797 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1798 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1799
1800 nxge_uninit_txdma_channels(nxgep);
1801 nxge_uninit_rxdma_channels(nxgep);
1802
1803 /*
1804 * Reset the transmit MAC side.
1805 */
1806 (void) nxge_tx_mac_disable(nxgep);
1807
1808 nxge_free_mem_pool(nxgep);
1809
1810 /*
1811 * Start the timer if the reset flag is not set.
1812 * If this reset flag is set, the link monitor
1813 * will not be started in order to stop furthur bus
1814 * activities coming from this interface.
1815 * The driver will start the monitor function
1816 * if the interface was initialized again later.
1817 */
1818 if (!nxge_peu_reset_enable) {
1819 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1820 }
1821
1822 nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1823
1824 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1825 "nxge_mblks_pending %d", nxge_mblks_pending));
1826 }
1827
1828 void
nxge_get64(p_nxge_t nxgep,p_mblk_t mp)1829 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1830 {
1831 uint64_t reg;
1832 uint64_t regdata;
1833 int i, retry;
1834
1835 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t));
1836 regdata = 0;
1837 retry = 1;
1838
1839 for (i = 0; i < retry; i++) {
1840 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data);
1841 }
1842 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t));
1843 }
1844
1845 void
nxge_put64(p_nxge_t nxgep,p_mblk_t mp)1846 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1847 {
1848 uint64_t reg;
1849 uint64_t buf[2];
1850
1851 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1852 reg = buf[0];
1853
1854 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1855 }
1856
1857 /*ARGSUSED*/
1858 /*VARARGS*/
1859 void
nxge_debug_msg(p_nxge_t nxgep,uint64_t level,char * fmt,...)1860 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1861 {
1862 char msg_buffer[1048];
1863 char prefix_buffer[32];
1864 int instance;
1865 uint64_t debug_level;
1866 int cmn_level = CE_CONT;
1867 va_list ap;
1868
1869 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1870 /* In case a developer has changed nxge_debug_level. */
1871 if (nxgep->nxge_debug_level != nxge_debug_level)
1872 nxgep->nxge_debug_level = nxge_debug_level;
1873 }
1874
1875 debug_level = (nxgep == NULL) ? nxge_debug_level :
1876 nxgep->nxge_debug_level;
1877
1878 if ((level & debug_level) ||
1879 (level == NXGE_NOTE) ||
1880 (level == NXGE_ERR_CTL)) {
1881 /* do the msg processing */
1882 MUTEX_ENTER(&nxgedebuglock);
1883
1884 if ((level & NXGE_NOTE)) {
1885 cmn_level = CE_NOTE;
1886 }
1887
1888 if (level & NXGE_ERR_CTL) {
1889 cmn_level = CE_WARN;
1890 }
1891
1892 va_start(ap, fmt);
1893 (void) vsprintf(msg_buffer, fmt, ap);
1894 va_end(ap);
1895 if (nxgep == NULL) {
1896 instance = -1;
1897 (void) sprintf(prefix_buffer, "%s :", "nxge");
1898 } else {
1899 instance = nxgep->instance;
1900 (void) sprintf(prefix_buffer,
1901 "%s%d :", "nxge", instance);
1902 }
1903
1904 MUTEX_EXIT(&nxgedebuglock);
1905 cmn_err(cmn_level, "!%s %s\n",
1906 prefix_buffer, msg_buffer);
1907
1908 }
1909 }
1910
1911 char *
nxge_dump_packet(char * addr,int size)1912 nxge_dump_packet(char *addr, int size)
1913 {
1914 uchar_t *ap = (uchar_t *)addr;
1915 int i;
1916 static char etherbuf[1024];
1917 char *cp = etherbuf;
1918 char digits[] = "0123456789abcdef";
1919
1920 if (!size)
1921 size = 60;
1922
1923 if (size > MAX_DUMP_SZ) {
1924 /* Dump the leading bytes */
1925 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1926 if (*ap > 0x0f)
1927 *cp++ = digits[*ap >> 4];
1928 *cp++ = digits[*ap++ & 0xf];
1929 *cp++ = ':';
1930 }
1931 for (i = 0; i < 20; i++)
1932 *cp++ = '.';
1933 /* Dump the last MAX_DUMP_SZ/2 bytes */
1934 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1935 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1936 if (*ap > 0x0f)
1937 *cp++ = digits[*ap >> 4];
1938 *cp++ = digits[*ap++ & 0xf];
1939 *cp++ = ':';
1940 }
1941 } else {
1942 for (i = 0; i < size; i++) {
1943 if (*ap > 0x0f)
1944 *cp++ = digits[*ap >> 4];
1945 *cp++ = digits[*ap++ & 0xf];
1946 *cp++ = ':';
1947 }
1948 }
1949 *--cp = 0;
1950 return (etherbuf);
1951 }
1952
1953 #ifdef NXGE_DEBUG
1954 static void
nxge_test_map_regs(p_nxge_t nxgep)1955 nxge_test_map_regs(p_nxge_t nxgep)
1956 {
1957 ddi_acc_handle_t cfg_handle;
1958 p_pci_cfg_t cfg_ptr;
1959 ddi_acc_handle_t dev_handle;
1960 char *dev_ptr;
1961 ddi_acc_handle_t pci_config_handle;
1962 uint32_t regval;
1963 int i;
1964
1965 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1966
1967 dev_handle = nxgep->dev_regs->nxge_regh;
1968 dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1969
1970 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1971 cfg_handle = nxgep->dev_regs->nxge_pciregh;
1972 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1973
1974 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1975 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1977 "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1978 &cfg_ptr->vendorid));
1979 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1980 "\tvendorid 0x%x devid 0x%x",
1981 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1982 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0)));
1983 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1984 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1985 "bar1c 0x%x",
1986 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0),
1987 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1988 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1989 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1990 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1991 "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1992 "base 28 0x%x bar2c 0x%x\n",
1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
1995 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
1996 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
1997 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1998 "\nNeptune PCI BAR: base30 0x%x\n",
1999 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
2000
2001 cfg_handle = nxgep->dev_regs->nxge_pciregh;
2002 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
2003 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2004 "first 0x%llx second 0x%llx third 0x%llx "
2005 "last 0x%llx ",
2006 NXGE_PIO_READ64(dev_handle,
2007 (uint64_t *)(dev_ptr + 0), 0),
2008 NXGE_PIO_READ64(dev_handle,
2009 (uint64_t *)(dev_ptr + 8), 0),
2010 NXGE_PIO_READ64(dev_handle,
2011 (uint64_t *)(dev_ptr + 16), 0),
2012 NXGE_PIO_READ64(cfg_handle,
2013 (uint64_t *)(dev_ptr + 24), 0)));
2014 }
2015 }
2016
2017 #endif
2018
2019 static void
nxge_suspend(p_nxge_t nxgep)2020 nxge_suspend(p_nxge_t nxgep)
2021 {
2022 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
2023
2024 nxge_intrs_disable(nxgep);
2025 nxge_destroy_dev(nxgep);
2026
2027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
2028 }
2029
2030 static nxge_status_t
nxge_resume(p_nxge_t nxgep)2031 nxge_resume(p_nxge_t nxgep)
2032 {
2033 nxge_status_t status = NXGE_OK;
2034
2035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
2036
2037 nxgep->suspended = DDI_RESUME;
2038 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
2039 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
2040 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
2041 (void) nxge_rx_mac_enable(nxgep);
2042 (void) nxge_tx_mac_enable(nxgep);
2043 nxge_intrs_enable(nxgep);
2044 nxgep->suspended = 0;
2045
2046 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2047 "<== nxge_resume status = 0x%x", status));
2048 return (status);
2049 }
2050
2051 static nxge_status_t
nxge_setup_dev(p_nxge_t nxgep)2052 nxge_setup_dev(p_nxge_t nxgep)
2053 {
2054 nxge_status_t status = NXGE_OK;
2055
2056 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
2057 nxgep->mac.portnum));
2058
2059 status = nxge_link_init(nxgep);
2060
2061 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
2062 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2063 "port%d Bad register acc handle", nxgep->mac.portnum));
2064 status = NXGE_ERROR;
2065 }
2066
2067 if (status != NXGE_OK) {
2068 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2069 " nxge_setup_dev status "
2070 "(xcvr init 0x%08x)", status));
2071 goto nxge_setup_dev_exit;
2072 }
2073
2074 nxge_setup_dev_exit:
2075 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2076 "<== nxge_setup_dev port %d status = 0x%08x",
2077 nxgep->mac.portnum, status));
2078
2079 return (status);
2080 }
2081
2082 static void
nxge_destroy_dev(p_nxge_t nxgep)2083 nxge_destroy_dev(p_nxge_t nxgep)
2084 {
2085 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2086
2087 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2088
2089 (void) nxge_hw_stop(nxgep);
2090
2091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2092 }
2093
2094 static nxge_status_t
nxge_setup_system_dma_pages(p_nxge_t nxgep)2095 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2096 {
2097 int ddi_status = DDI_SUCCESS;
2098 uint_t count;
2099 ddi_dma_cookie_t cookie;
2100 uint_t iommu_pagesize;
2101 nxge_status_t status = NXGE_OK;
2102
2103 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2104 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2105 if (nxgep->niu_type != N2_NIU) {
2106 iommu_pagesize = dvma_pagesize(nxgep->dip);
2107 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2108 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2109 " default_block_size %d iommu_pagesize %d",
2110 nxgep->sys_page_sz,
2111 ddi_ptob(nxgep->dip, (ulong_t)1),
2112 nxgep->rx_default_block_size,
2113 iommu_pagesize));
2114
2115 if (iommu_pagesize != 0) {
2116 if (nxgep->sys_page_sz == iommu_pagesize) {
2117 if (iommu_pagesize > 0x4000)
2118 nxgep->sys_page_sz = 0x4000;
2119 } else {
2120 if (nxgep->sys_page_sz > iommu_pagesize)
2121 nxgep->sys_page_sz = iommu_pagesize;
2122 }
2123 }
2124 }
2125 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2126 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2127 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2128 "default_block_size %d page mask %d",
2129 nxgep->sys_page_sz,
2130 ddi_ptob(nxgep->dip, (ulong_t)1),
2131 nxgep->rx_default_block_size,
2132 nxgep->sys_page_mask));
2133
2134
2135 switch (nxgep->sys_page_sz) {
2136 default:
2137 nxgep->sys_page_sz = 0x1000;
2138 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2139 nxgep->rx_default_block_size = 0x1000;
2140 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2141 break;
2142 case 0x1000:
2143 nxgep->rx_default_block_size = 0x1000;
2144 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2145 break;
2146 case 0x2000:
2147 nxgep->rx_default_block_size = 0x2000;
2148 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2149 break;
2150 case 0x4000:
2151 nxgep->rx_default_block_size = 0x4000;
2152 nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2153 break;
2154 case 0x8000:
2155 nxgep->rx_default_block_size = 0x8000;
2156 nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2157 break;
2158 }
2159
2160 #ifndef USE_RX_BIG_BUF
2161 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2162 #else
2163 nxgep->rx_default_block_size = 0x2000;
2164 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2165 #endif
2166 /*
2167 * Get the system DMA burst size.
2168 */
2169 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2170 DDI_DMA_DONTWAIT, 0,
2171 &nxgep->dmasparehandle);
2172 if (ddi_status != DDI_SUCCESS) {
2173 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2174 "ddi_dma_alloc_handle: failed "
2175 " status 0x%x", ddi_status));
2176 goto nxge_get_soft_properties_exit;
2177 }
2178
2179 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2180 (caddr_t)nxgep->dmasparehandle,
2181 sizeof (nxgep->dmasparehandle),
2182 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2183 DDI_DMA_DONTWAIT, 0,
2184 &cookie, &count);
2185 if (ddi_status != DDI_DMA_MAPPED) {
2186 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2187 "Binding spare handle to find system"
2188 " burstsize failed."));
2189 ddi_status = DDI_FAILURE;
2190 goto nxge_get_soft_properties_fail1;
2191 }
2192
2193 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2194 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2195
2196 nxge_get_soft_properties_fail1:
2197 ddi_dma_free_handle(&nxgep->dmasparehandle);
2198
2199 nxge_get_soft_properties_exit:
2200
2201 if (ddi_status != DDI_SUCCESS)
2202 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2203
2204 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2205 "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2206 return (status);
2207 }
2208
2209 static nxge_status_t
nxge_alloc_mem_pool(p_nxge_t nxgep)2210 nxge_alloc_mem_pool(p_nxge_t nxgep)
2211 {
2212 nxge_status_t status = NXGE_OK;
2213
2214 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2215
2216 status = nxge_alloc_rx_mem_pool(nxgep);
2217 if (status != NXGE_OK) {
2218 return (NXGE_ERROR);
2219 }
2220
2221 status = nxge_alloc_tx_mem_pool(nxgep);
2222 if (status != NXGE_OK) {
2223 nxge_free_rx_mem_pool(nxgep);
2224 return (NXGE_ERROR);
2225 }
2226
2227 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2228 return (NXGE_OK);
2229 }
2230
2231 static void
nxge_free_mem_pool(p_nxge_t nxgep)2232 nxge_free_mem_pool(p_nxge_t nxgep)
2233 {
2234 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2235
2236 nxge_free_rx_mem_pool(nxgep);
2237 nxge_free_tx_mem_pool(nxgep);
2238
2239 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2240 }
2241
2242 nxge_status_t
nxge_alloc_rx_mem_pool(p_nxge_t nxgep)2243 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2244 {
2245 uint32_t rdc_max;
2246 p_nxge_dma_pt_cfg_t p_all_cfgp;
2247 p_nxge_hw_pt_cfg_t p_cfgp;
2248 p_nxge_dma_pool_t dma_poolp;
2249 p_nxge_dma_common_t *dma_buf_p;
2250 p_nxge_dma_pool_t dma_cntl_poolp;
2251 p_nxge_dma_common_t *dma_cntl_p;
2252 uint32_t *num_chunks; /* per dma */
2253 nxge_status_t status = NXGE_OK;
2254
2255 uint32_t nxge_port_rbr_size;
2256 uint32_t nxge_port_rbr_spare_size;
2257 uint32_t nxge_port_rcr_size;
2258 uint32_t rx_cntl_alloc_size;
2259
2260 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2261
2262 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2263 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2264 rdc_max = NXGE_MAX_RDCS;
2265
2266 /*
2267 * Allocate memory for the common DMA data structures.
2268 */
2269 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2270 KM_SLEEP);
2271 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2272 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2273
2274 dma_cntl_poolp = (p_nxge_dma_pool_t)
2275 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2276 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2277 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2278
2279 num_chunks = (uint32_t *)KMEM_ZALLOC(
2280 sizeof (uint32_t) * rdc_max, KM_SLEEP);
2281
2282 /*
2283 * Assume that each DMA channel will be configured with
2284 * the default block size.
2285 * rbr block counts are modulo the batch count (16).
2286 */
2287 nxge_port_rbr_size = p_all_cfgp->rbr_size;
2288 nxge_port_rcr_size = p_all_cfgp->rcr_size;
2289
2290 if (!nxge_port_rbr_size) {
2291 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2292 }
2293 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2294 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2295 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2296 }
2297
2298 p_all_cfgp->rbr_size = nxge_port_rbr_size;
2299 nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2300
2301 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2302 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2303 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2304 }
2305 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2306 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2307 "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2308 "set to default %d",
2309 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2310 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2311 }
2312 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2313 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2314 "nxge_alloc_rx_mem_pool: RCR too high %d, "
2315 "set to default %d",
2316 nxge_port_rcr_size, RCR_DEFAULT_MAX));
2317 nxge_port_rcr_size = RCR_DEFAULT_MAX;
2318 }
2319
2320 /*
2321 * N2/NIU has limitation on the descriptor sizes (contiguous
2322 * memory allocation on data buffers to 4M (contig_mem_alloc)
2323 * and little endian for control buffers (must use the ddi/dki mem alloc
2324 * function).
2325 */
2326 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2327 if (nxgep->niu_type == N2_NIU) {
2328 nxge_port_rbr_spare_size = 0;
2329 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2330 (!ISP2(nxge_port_rbr_size))) {
2331 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2332 }
2333 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2334 (!ISP2(nxge_port_rcr_size))) {
2335 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2336 }
2337 }
2338 #endif
2339
2340 /*
2341 * Addresses of receive block ring, receive completion ring and the
2342 * mailbox must be all cache-aligned (64 bytes).
2343 */
2344 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2345 rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2346 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2347 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2348
2349 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2350 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2351 "nxge_port_rcr_size = %d "
2352 "rx_cntl_alloc_size = %d",
2353 nxge_port_rbr_size, nxge_port_rbr_spare_size,
2354 nxge_port_rcr_size,
2355 rx_cntl_alloc_size));
2356
2357 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2358 if (nxgep->niu_type == N2_NIU) {
2359 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2360 (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2361
2362 if (!ISP2(rx_buf_alloc_size)) {
2363 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2364 "==> nxge_alloc_rx_mem_pool: "
2365 " must be power of 2"));
2366 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2367 goto nxge_alloc_rx_mem_pool_exit;
2368 }
2369
2370 if (rx_buf_alloc_size > (1 << 22)) {
2371 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2372 "==> nxge_alloc_rx_mem_pool: "
2373 " limit size to 4M"));
2374 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2375 goto nxge_alloc_rx_mem_pool_exit;
2376 }
2377
2378 if (rx_cntl_alloc_size < 0x2000) {
2379 rx_cntl_alloc_size = 0x2000;
2380 }
2381 }
2382 #endif
2383 nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2384 nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2385 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2386 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2387
2388 dma_poolp->ndmas = p_cfgp->max_rdcs;
2389 dma_poolp->num_chunks = num_chunks;
2390 dma_poolp->buf_allocated = B_TRUE;
2391 nxgep->rx_buf_pool_p = dma_poolp;
2392 dma_poolp->dma_buf_pool_p = dma_buf_p;
2393
2394 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2395 dma_cntl_poolp->buf_allocated = B_TRUE;
2396 nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2397 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2398
2399 /* Allocate the receive rings, too. */
2400 nxgep->rx_rbr_rings =
2401 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2402 nxgep->rx_rbr_rings->rbr_rings =
2403 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2404 nxgep->rx_rcr_rings =
2405 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2406 nxgep->rx_rcr_rings->rcr_rings =
2407 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2408 nxgep->rx_mbox_areas_p =
2409 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2410 nxgep->rx_mbox_areas_p->rxmbox_areas =
2411 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2412
2413 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2414 p_cfgp->max_rdcs;
2415
2416 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2417 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2418
2419 nxge_alloc_rx_mem_pool_exit:
2420 return (status);
2421 }
2422
2423 /*
2424 * nxge_alloc_rxb
2425 *
2426 * Allocate buffers for an RDC.
2427 *
2428 * Arguments:
2429 * nxgep
2430 * channel The channel to map into our kernel space.
2431 *
2432 * Notes:
2433 *
2434 * NPI function calls:
2435 *
2436 * NXGE function calls:
2437 *
2438 * Registers accessed:
2439 *
2440 * Context:
2441 *
2442 * Taking apart:
2443 *
2444 * Open questions:
2445 *
2446 */
2447 nxge_status_t
nxge_alloc_rxb(p_nxge_t nxgep,int channel)2448 nxge_alloc_rxb(
2449 p_nxge_t nxgep,
2450 int channel)
2451 {
2452 size_t rx_buf_alloc_size;
2453 nxge_status_t status = NXGE_OK;
2454
2455 nxge_dma_common_t **data;
2456 nxge_dma_common_t **control;
2457 uint32_t *num_chunks;
2458
2459 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2460
2461 /*
2462 * Allocate memory for the receive buffers and descriptor rings.
2463 * Replace these allocation functions with the interface functions
2464 * provided by the partition manager if/when they are available.
2465 */
2466
2467 /*
2468 * Allocate memory for the receive buffer blocks.
2469 */
2470 rx_buf_alloc_size = (nxgep->rx_default_block_size *
2471 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2472
2473 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2474 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2475
2476 if ((status = nxge_alloc_rx_buf_dma(
2477 nxgep, channel, data, rx_buf_alloc_size,
2478 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2479 return (status);
2480 }
2481
2482 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2483 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2484
2485 /*
2486 * Allocate memory for descriptor rings and mailbox.
2487 */
2488 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2489
2490 if ((status = nxge_alloc_rx_cntl_dma(
2491 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2492 != NXGE_OK) {
2493 nxge_free_rx_cntl_dma(nxgep, *control);
2494 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2495 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2496 return (status);
2497 }
2498
2499 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2500 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2501
2502 return (status);
2503 }
2504
2505 void
nxge_free_rxb(p_nxge_t nxgep,int channel)2506 nxge_free_rxb(
2507 p_nxge_t nxgep,
2508 int channel)
2509 {
2510 nxge_dma_common_t *data;
2511 nxge_dma_common_t *control;
2512 uint32_t num_chunks;
2513
2514 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2515
2516 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2517 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2518 nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2519
2520 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2521 nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2522
2523 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2524 nxge_free_rx_cntl_dma(nxgep, control);
2525
2526 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2527
2528 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2529 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2530
2531 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2532 }
2533
2534 static void
nxge_free_rx_mem_pool(p_nxge_t nxgep)2535 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2536 {
2537 int rdc_max = NXGE_MAX_RDCS;
2538
2539 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2540
2541 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2542 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2543 "<== nxge_free_rx_mem_pool "
2544 "(null rx buf pool or buf not allocated"));
2545 return;
2546 }
2547 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2548 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2549 "<== nxge_free_rx_mem_pool "
2550 "(null rx cntl buf pool or cntl buf not allocated"));
2551 return;
2552 }
2553
2554 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2555 sizeof (p_nxge_dma_common_t) * rdc_max);
2556 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2557
2558 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2559 sizeof (uint32_t) * rdc_max);
2560 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2561 sizeof (p_nxge_dma_common_t) * rdc_max);
2562 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2563
2564 nxgep->rx_buf_pool_p = 0;
2565 nxgep->rx_cntl_pool_p = 0;
2566
2567 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2568 sizeof (p_rx_rbr_ring_t) * rdc_max);
2569 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2570 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2571 sizeof (p_rx_rcr_ring_t) * rdc_max);
2572 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2573 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2574 sizeof (p_rx_mbox_t) * rdc_max);
2575 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2576
2577 nxgep->rx_rbr_rings = 0;
2578 nxgep->rx_rcr_rings = 0;
2579 nxgep->rx_mbox_areas_p = 0;
2580
2581 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2582 }
2583
2584
2585 static nxge_status_t
nxge_alloc_rx_buf_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2586 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2587 p_nxge_dma_common_t *dmap,
2588 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2589 {
2590 p_nxge_dma_common_t rx_dmap;
2591 nxge_status_t status = NXGE_OK;
2592 size_t total_alloc_size;
2593 size_t allocated = 0;
2594 int i, size_index, array_size;
2595 boolean_t use_kmem_alloc = B_FALSE;
2596
2597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2598
2599 rx_dmap = (p_nxge_dma_common_t)
2600 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2601 KM_SLEEP);
2602
2603 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2604 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2605 dma_channel, alloc_size, block_size, dmap));
2606
2607 total_alloc_size = alloc_size;
2608
2609 #if defined(RX_USE_RECLAIM_POST)
2610 total_alloc_size = alloc_size + alloc_size/4;
2611 #endif
2612
2613 i = 0;
2614 size_index = 0;
2615 array_size = sizeof (alloc_sizes)/sizeof (size_t);
2616 while ((size_index < array_size) &&
2617 (alloc_sizes[size_index] < alloc_size))
2618 size_index++;
2619 if (size_index >= array_size) {
2620 size_index = array_size - 1;
2621 }
2622
2623 /* For Neptune, use kmem_alloc if the kmem flag is set. */
2624 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2625 use_kmem_alloc = B_TRUE;
2626 #if defined(__x86)
2627 size_index = 0;
2628 #endif
2629 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2630 "==> nxge_alloc_rx_buf_dma: "
2631 "Neptune use kmem_alloc() - size_index %d",
2632 size_index));
2633 }
2634
2635 while ((allocated < total_alloc_size) &&
2636 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2637 rx_dmap[i].dma_chunk_index = i;
2638 rx_dmap[i].block_size = block_size;
2639 rx_dmap[i].alength = alloc_sizes[size_index];
2640 rx_dmap[i].orig_alength = rx_dmap[i].alength;
2641 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2642 rx_dmap[i].dma_channel = dma_channel;
2643 rx_dmap[i].contig_alloc_type = B_FALSE;
2644 rx_dmap[i].kmem_alloc_type = B_FALSE;
2645 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2646
2647 /*
2648 * N2/NIU: data buffers must be contiguous as the driver
2649 * needs to call Hypervisor api to set up
2650 * logical pages.
2651 */
2652 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2653 rx_dmap[i].contig_alloc_type = B_TRUE;
2654 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2655 } else if (use_kmem_alloc) {
2656 /* For Neptune, use kmem_alloc */
2657 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2658 "==> nxge_alloc_rx_buf_dma: "
2659 "Neptune use kmem_alloc()"));
2660 rx_dmap[i].kmem_alloc_type = B_TRUE;
2661 rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2662 }
2663
2664 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2665 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2666 "i %d nblocks %d alength %d",
2667 dma_channel, i, &rx_dmap[i], block_size,
2668 i, rx_dmap[i].nblocks,
2669 rx_dmap[i].alength));
2670 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2671 &nxge_rx_dma_attr,
2672 rx_dmap[i].alength,
2673 &nxge_dev_buf_dma_acc_attr,
2674 DDI_DMA_READ | DDI_DMA_STREAMING,
2675 (p_nxge_dma_common_t)(&rx_dmap[i]));
2676 if (status != NXGE_OK) {
2677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2678 "nxge_alloc_rx_buf_dma: Alloc Failed: "
2679 "dma %d size_index %d size requested %d",
2680 dma_channel,
2681 size_index,
2682 rx_dmap[i].alength));
2683 size_index--;
2684 } else {
2685 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2686 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2687 " nxge_alloc_rx_buf_dma DONE alloc mem: "
2688 "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2689 "buf_alloc_state %d alloc_type %d",
2690 dma_channel,
2691 &rx_dmap[i],
2692 rx_dmap[i].kaddrp,
2693 rx_dmap[i].alength,
2694 rx_dmap[i].buf_alloc_state,
2695 rx_dmap[i].buf_alloc_type));
2696 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2697 " alloc_rx_buf_dma allocated rdc %d "
2698 "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2699 dma_channel, i, rx_dmap[i].alength,
2700 rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2701 rx_dmap[i].kaddrp));
2702 i++;
2703 allocated += alloc_sizes[size_index];
2704 }
2705 }
2706
2707 if (allocated < total_alloc_size) {
2708 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2709 "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2710 "allocated 0x%x requested 0x%x",
2711 dma_channel,
2712 allocated, total_alloc_size));
2713 status = NXGE_ERROR;
2714 goto nxge_alloc_rx_mem_fail1;
2715 }
2716
2717 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2718 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2719 "allocated 0x%x requested 0x%x",
2720 dma_channel,
2721 allocated, total_alloc_size));
2722
2723 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2724 " alloc_rx_buf_dma rdc %d allocated %d chunks",
2725 dma_channel, i));
2726 *num_chunks = i;
2727 *dmap = rx_dmap;
2728
2729 goto nxge_alloc_rx_mem_exit;
2730
2731 nxge_alloc_rx_mem_fail1:
2732 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2733
2734 nxge_alloc_rx_mem_exit:
2735 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2736 "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2737
2738 return (status);
2739 }
2740
2741 /*ARGSUSED*/
2742 static void
nxge_free_rx_buf_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap,uint32_t num_chunks)2743 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2744 uint32_t num_chunks)
2745 {
2746 int i;
2747
2748 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2749 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2750
2751 if (dmap == 0)
2752 return;
2753
2754 for (i = 0; i < num_chunks; i++) {
2755 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2756 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2757 i, dmap));
2758 nxge_dma_free_rx_data_buf(dmap++);
2759 }
2760
2761 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2762 }
2763
2764 /*ARGSUSED*/
2765 static nxge_status_t
nxge_alloc_rx_cntl_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t size)2766 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2767 p_nxge_dma_common_t *dmap, size_t size)
2768 {
2769 p_nxge_dma_common_t rx_dmap;
2770 nxge_status_t status = NXGE_OK;
2771
2772 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2773
2774 rx_dmap = (p_nxge_dma_common_t)
2775 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2776
2777 rx_dmap->contig_alloc_type = B_FALSE;
2778 rx_dmap->kmem_alloc_type = B_FALSE;
2779
2780 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2781 &nxge_desc_dma_attr,
2782 size,
2783 &nxge_dev_desc_dma_acc_attr,
2784 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2785 rx_dmap);
2786 if (status != NXGE_OK) {
2787 goto nxge_alloc_rx_cntl_dma_fail1;
2788 }
2789
2790 *dmap = rx_dmap;
2791 goto nxge_alloc_rx_cntl_dma_exit;
2792
2793 nxge_alloc_rx_cntl_dma_fail1:
2794 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2795
2796 nxge_alloc_rx_cntl_dma_exit:
2797 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2798 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2799
2800 return (status);
2801 }
2802
2803 /*ARGSUSED*/
2804 static void
nxge_free_rx_cntl_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap)2805 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2806 {
2807 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2808
2809 if (dmap == 0)
2810 return;
2811
2812 nxge_dma_mem_free(dmap);
2813
2814 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2815 }
2816
2817 typedef struct {
2818 size_t tx_size;
2819 size_t cr_size;
2820 size_t threshhold;
2821 } nxge_tdc_sizes_t;
2822
2823 static
2824 nxge_status_t
nxge_tdc_sizes(nxge_t * nxgep,nxge_tdc_sizes_t * sizes)2825 nxge_tdc_sizes(
2826 nxge_t *nxgep,
2827 nxge_tdc_sizes_t *sizes)
2828 {
2829 uint32_t threshhold; /* The bcopy() threshhold */
2830 size_t tx_size; /* Transmit buffer size */
2831 size_t cr_size; /* Completion ring size */
2832
2833 /*
2834 * Assume that each DMA channel will be configured with the
2835 * default transmit buffer size for copying transmit data.
2836 * (If a packet is bigger than this, it will not be copied.)
2837 */
2838 if (nxgep->niu_type == N2_NIU) {
2839 threshhold = TX_BCOPY_SIZE;
2840 } else {
2841 threshhold = nxge_bcopy_thresh;
2842 }
2843 tx_size = nxge_tx_ring_size * threshhold;
2844
2845 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2846 cr_size += sizeof (txdma_mailbox_t);
2847
2848 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2849 if (nxgep->niu_type == N2_NIU) {
2850 if (!ISP2(tx_size)) {
2851 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2852 "==> nxge_tdc_sizes: Tx size"
2853 " must be power of 2"));
2854 return (NXGE_ERROR);
2855 }
2856
2857 if (tx_size > (1 << 22)) {
2858 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2859 "==> nxge_tdc_sizes: Tx size"
2860 " limited to 4M"));
2861 return (NXGE_ERROR);
2862 }
2863
2864 if (cr_size < 0x2000)
2865 cr_size = 0x2000;
2866 }
2867 #endif
2868
2869 sizes->threshhold = threshhold;
2870 sizes->tx_size = tx_size;
2871 sizes->cr_size = cr_size;
2872
2873 return (NXGE_OK);
2874 }
2875 /*
2876 * nxge_alloc_txb
2877 *
2878 * Allocate buffers for an TDC.
2879 *
2880 * Arguments:
2881 * nxgep
2882 * channel The channel to map into our kernel space.
2883 *
2884 * Notes:
2885 *
2886 * NPI function calls:
2887 *
2888 * NXGE function calls:
2889 *
2890 * Registers accessed:
2891 *
2892 * Context:
2893 *
2894 * Taking apart:
2895 *
2896 * Open questions:
2897 *
2898 */
2899 nxge_status_t
nxge_alloc_txb(p_nxge_t nxgep,int channel)2900 nxge_alloc_txb(
2901 p_nxge_t nxgep,
2902 int channel)
2903 {
2904 nxge_dma_common_t **dma_buf_p;
2905 nxge_dma_common_t **dma_cntl_p;
2906 uint32_t *num_chunks;
2907 nxge_status_t status = NXGE_OK;
2908
2909 nxge_tdc_sizes_t sizes;
2910
2911 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2912
2913 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2914 return (NXGE_ERROR);
2915
2916 /*
2917 * Allocate memory for transmit buffers and descriptor rings.
2918 * Replace these allocation functions with the interface functions
2919 * provided by the partition manager Real Soon Now.
2920 */
2921 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2922 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2923
2924 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2925
2926 /*
2927 * Allocate memory for transmit buffers and descriptor rings.
2928 * Replace allocation functions with interface functions provided
2929 * by the partition manager when it is available.
2930 *
2931 * Allocate memory for the transmit buffer pool.
2932 */
2933 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2934 "sizes: tx: %ld, cr:%ld, th:%ld",
2935 sizes.tx_size, sizes.cr_size, sizes.threshhold));
2936
2937 *num_chunks = 0;
2938 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2939 sizes.tx_size, sizes.threshhold, num_chunks);
2940 if (status != NXGE_OK) {
2941 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2942 return (status);
2943 }
2944
2945 /*
2946 * Allocate memory for descriptor rings and mailbox.
2947 */
2948 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2949 sizes.cr_size);
2950 if (status != NXGE_OK) {
2951 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2952 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2953 return (status);
2954 }
2955
2956 return (NXGE_OK);
2957 }
2958
2959 void
nxge_free_txb(p_nxge_t nxgep,int channel)2960 nxge_free_txb(
2961 p_nxge_t nxgep,
2962 int channel)
2963 {
2964 nxge_dma_common_t *data;
2965 nxge_dma_common_t *control;
2966 uint32_t num_chunks;
2967
2968 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2969
2970 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2971 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2972 nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2973
2974 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2975 nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2976
2977 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2978 nxge_free_tx_cntl_dma(nxgep, control);
2979
2980 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2981
2982 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2983 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2984
2985 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2986 }
2987
2988 /*
2989 * nxge_alloc_tx_mem_pool
2990 *
2991 * This function allocates all of the per-port TDC control data structures.
2992 * The per-channel (TDC) data structures are allocated when needed.
2993 *
2994 * Arguments:
2995 * nxgep
2996 *
2997 * Notes:
2998 *
2999 * Context:
3000 * Any domain
3001 */
3002 nxge_status_t
nxge_alloc_tx_mem_pool(p_nxge_t nxgep)3003 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
3004 {
3005 nxge_hw_pt_cfg_t *p_cfgp;
3006 nxge_dma_pool_t *dma_poolp;
3007 nxge_dma_common_t **dma_buf_p;
3008 nxge_dma_pool_t *dma_cntl_poolp;
3009 nxge_dma_common_t **dma_cntl_p;
3010 uint32_t *num_chunks; /* per dma */
3011 int tdc_max;
3012
3013 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
3014
3015 p_cfgp = &nxgep->pt_config.hw_config;
3016 tdc_max = NXGE_MAX_TDCS;
3017
3018 /*
3019 * Allocate memory for each transmit DMA channel.
3020 */
3021 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
3022 KM_SLEEP);
3023 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3024 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3025
3026 dma_cntl_poolp = (p_nxge_dma_pool_t)
3027 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
3028 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3029 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3030
3031 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
3032 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3033 "nxge_alloc_tx_mem_pool: TDC too high %d, "
3034 "set to default %d",
3035 nxge_tx_ring_size, TDC_DEFAULT_MAX));
3036 nxge_tx_ring_size = TDC_DEFAULT_MAX;
3037 }
3038
3039 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3040 /*
3041 * N2/NIU has limitation on the descriptor sizes (contiguous
3042 * memory allocation on data buffers to 4M (contig_mem_alloc)
3043 * and little endian for control buffers (must use the ddi/dki mem alloc
3044 * function). The transmit ring is limited to 8K (includes the
3045 * mailbox).
3046 */
3047 if (nxgep->niu_type == N2_NIU) {
3048 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
3049 (!ISP2(nxge_tx_ring_size))) {
3050 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
3051 }
3052 }
3053 #endif
3054
3055 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
3056
3057 num_chunks = (uint32_t *)KMEM_ZALLOC(
3058 sizeof (uint32_t) * tdc_max, KM_SLEEP);
3059
3060 dma_poolp->ndmas = p_cfgp->tdc.owned;
3061 dma_poolp->num_chunks = num_chunks;
3062 dma_poolp->dma_buf_pool_p = dma_buf_p;
3063 nxgep->tx_buf_pool_p = dma_poolp;
3064
3065 dma_poolp->buf_allocated = B_TRUE;
3066
3067 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
3068 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
3069 nxgep->tx_cntl_pool_p = dma_cntl_poolp;
3070
3071 dma_cntl_poolp->buf_allocated = B_TRUE;
3072
3073 nxgep->tx_rings =
3074 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
3075 nxgep->tx_rings->rings =
3076 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
3077 nxgep->tx_mbox_areas_p =
3078 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
3079 nxgep->tx_mbox_areas_p->txmbox_areas_p =
3080 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
3081
3082 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3083
3084 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3085 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3086 tdc_max, dma_poolp->ndmas));
3087
3088 return (NXGE_OK);
3089 }
3090
3091 nxge_status_t
nxge_alloc_tx_buf_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)3092 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3093 p_nxge_dma_common_t *dmap, size_t alloc_size,
3094 size_t block_size, uint32_t *num_chunks)
3095 {
3096 p_nxge_dma_common_t tx_dmap;
3097 nxge_status_t status = NXGE_OK;
3098 size_t total_alloc_size;
3099 size_t allocated = 0;
3100 int i, size_index, array_size;
3101
3102 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3103
3104 tx_dmap = (p_nxge_dma_common_t)
3105 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3106 KM_SLEEP);
3107
3108 total_alloc_size = alloc_size;
3109 i = 0;
3110 size_index = 0;
3111 array_size = sizeof (alloc_sizes) / sizeof (size_t);
3112 while ((size_index < array_size) &&
3113 (alloc_sizes[size_index] < alloc_size))
3114 size_index++;
3115 if (size_index >= array_size) {
3116 size_index = array_size - 1;
3117 }
3118
3119 while ((allocated < total_alloc_size) &&
3120 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3121
3122 tx_dmap[i].dma_chunk_index = i;
3123 tx_dmap[i].block_size = block_size;
3124 tx_dmap[i].alength = alloc_sizes[size_index];
3125 tx_dmap[i].orig_alength = tx_dmap[i].alength;
3126 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3127 tx_dmap[i].dma_channel = dma_channel;
3128 tx_dmap[i].contig_alloc_type = B_FALSE;
3129 tx_dmap[i].kmem_alloc_type = B_FALSE;
3130
3131 /*
3132 * N2/NIU: data buffers must be contiguous as the driver
3133 * needs to call Hypervisor api to set up
3134 * logical pages.
3135 */
3136 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3137 tx_dmap[i].contig_alloc_type = B_TRUE;
3138 }
3139
3140 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3141 &nxge_tx_dma_attr,
3142 tx_dmap[i].alength,
3143 &nxge_dev_buf_dma_acc_attr,
3144 DDI_DMA_WRITE | DDI_DMA_STREAMING,
3145 (p_nxge_dma_common_t)(&tx_dmap[i]));
3146 if (status != NXGE_OK) {
3147 size_index--;
3148 } else {
3149 i++;
3150 allocated += alloc_sizes[size_index];
3151 }
3152 }
3153
3154 if (allocated < total_alloc_size) {
3155 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3156 "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3157 "allocated 0x%x requested 0x%x",
3158 dma_channel,
3159 allocated, total_alloc_size));
3160 status = NXGE_ERROR;
3161 goto nxge_alloc_tx_mem_fail1;
3162 }
3163
3164 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3165 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3166 "allocated 0x%x requested 0x%x",
3167 dma_channel,
3168 allocated, total_alloc_size));
3169
3170 *num_chunks = i;
3171 *dmap = tx_dmap;
3172 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3173 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3174 *dmap, i));
3175 goto nxge_alloc_tx_mem_exit;
3176
3177 nxge_alloc_tx_mem_fail1:
3178 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3179
3180 nxge_alloc_tx_mem_exit:
3181 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3182 "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3183
3184 return (status);
3185 }
3186
3187 /*ARGSUSED*/
3188 static void
nxge_free_tx_buf_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap,uint32_t num_chunks)3189 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3190 uint32_t num_chunks)
3191 {
3192 int i;
3193
3194 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3195
3196 if (dmap == 0)
3197 return;
3198
3199 for (i = 0; i < num_chunks; i++) {
3200 nxge_dma_mem_free(dmap++);
3201 }
3202
3203 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3204 }
3205
3206 /*ARGSUSED*/
3207 nxge_status_t
nxge_alloc_tx_cntl_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t size)3208 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3209 p_nxge_dma_common_t *dmap, size_t size)
3210 {
3211 p_nxge_dma_common_t tx_dmap;
3212 nxge_status_t status = NXGE_OK;
3213
3214 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3215 tx_dmap = (p_nxge_dma_common_t)
3216 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3217
3218 tx_dmap->contig_alloc_type = B_FALSE;
3219 tx_dmap->kmem_alloc_type = B_FALSE;
3220
3221 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3222 &nxge_desc_dma_attr,
3223 size,
3224 &nxge_dev_desc_dma_acc_attr,
3225 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3226 tx_dmap);
3227 if (status != NXGE_OK) {
3228 goto nxge_alloc_tx_cntl_dma_fail1;
3229 }
3230
3231 *dmap = tx_dmap;
3232 goto nxge_alloc_tx_cntl_dma_exit;
3233
3234 nxge_alloc_tx_cntl_dma_fail1:
3235 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3236
3237 nxge_alloc_tx_cntl_dma_exit:
3238 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3239 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3240
3241 return (status);
3242 }
3243
3244 /*ARGSUSED*/
3245 static void
nxge_free_tx_cntl_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap)3246 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3247 {
3248 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3249
3250 if (dmap == 0)
3251 return;
3252
3253 nxge_dma_mem_free(dmap);
3254
3255 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3256 }
3257
3258 /*
3259 * nxge_free_tx_mem_pool
3260 *
3261 * This function frees all of the per-port TDC control data structures.
3262 * The per-channel (TDC) data structures are freed when the channel
3263 * is stopped.
3264 *
3265 * Arguments:
3266 * nxgep
3267 *
3268 * Notes:
3269 *
3270 * Context:
3271 * Any domain
3272 */
3273 static void
nxge_free_tx_mem_pool(p_nxge_t nxgep)3274 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3275 {
3276 int tdc_max = NXGE_MAX_TDCS;
3277
3278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3279
3280 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3281 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3282 "<== nxge_free_tx_mem_pool "
3283 "(null tx buf pool or buf not allocated"));
3284 return;
3285 }
3286 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3288 "<== nxge_free_tx_mem_pool "
3289 "(null tx cntl buf pool or cntl buf not allocated"));
3290 return;
3291 }
3292
3293 /* 1. Free the mailboxes. */
3294 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3295 sizeof (p_tx_mbox_t) * tdc_max);
3296 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3297
3298 nxgep->tx_mbox_areas_p = 0;
3299
3300 /* 2. Free the transmit ring arrays. */
3301 KMEM_FREE(nxgep->tx_rings->rings,
3302 sizeof (p_tx_ring_t) * tdc_max);
3303 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3304
3305 nxgep->tx_rings = 0;
3306
3307 /* 3. Free the completion ring data structures. */
3308 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3309 sizeof (p_nxge_dma_common_t) * tdc_max);
3310 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3311
3312 nxgep->tx_cntl_pool_p = 0;
3313
3314 /* 4. Free the data ring data structures. */
3315 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3316 sizeof (uint32_t) * tdc_max);
3317 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3318 sizeof (p_nxge_dma_common_t) * tdc_max);
3319 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3320
3321 nxgep->tx_buf_pool_p = 0;
3322
3323 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3324 }
3325
3326 /*ARGSUSED*/
3327 static nxge_status_t
nxge_dma_mem_alloc(p_nxge_t nxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_nxge_dma_common_t dma_p)3328 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3329 struct ddi_dma_attr *dma_attrp,
3330 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3331 p_nxge_dma_common_t dma_p)
3332 {
3333 caddr_t kaddrp;
3334 int ddi_status = DDI_SUCCESS;
3335 boolean_t contig_alloc_type;
3336 boolean_t kmem_alloc_type;
3337
3338 contig_alloc_type = dma_p->contig_alloc_type;
3339
3340 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3341 /*
3342 * contig_alloc_type for contiguous memory only allowed
3343 * for N2/NIU.
3344 */
3345 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3346 "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3347 dma_p->contig_alloc_type));
3348 return (NXGE_ERROR | NXGE_DDI_FAILED);
3349 }
3350
3351 dma_p->dma_handle = NULL;
3352 dma_p->acc_handle = NULL;
3353 dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3354 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3355 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3356 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3357 if (ddi_status != DDI_SUCCESS) {
3358 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3359 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3360 return (NXGE_ERROR | NXGE_DDI_FAILED);
3361 }
3362
3363 kmem_alloc_type = dma_p->kmem_alloc_type;
3364
3365 switch (contig_alloc_type) {
3366 case B_FALSE:
3367 switch (kmem_alloc_type) {
3368 case B_FALSE:
3369 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3370 length,
3371 acc_attr_p,
3372 xfer_flags,
3373 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3374 &dma_p->acc_handle);
3375 if (ddi_status != DDI_SUCCESS) {
3376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3377 "nxge_dma_mem_alloc: "
3378 "ddi_dma_mem_alloc failed"));
3379 ddi_dma_free_handle(&dma_p->dma_handle);
3380 dma_p->dma_handle = NULL;
3381 return (NXGE_ERROR | NXGE_DDI_FAILED);
3382 }
3383 if (dma_p->alength < length) {
3384 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3385 "nxge_dma_mem_alloc:di_dma_mem_alloc "
3386 "< length."));
3387 ddi_dma_mem_free(&dma_p->acc_handle);
3388 ddi_dma_free_handle(&dma_p->dma_handle);
3389 dma_p->acc_handle = NULL;
3390 dma_p->dma_handle = NULL;
3391 return (NXGE_ERROR);
3392 }
3393
3394 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3395 NULL,
3396 kaddrp, dma_p->alength, xfer_flags,
3397 DDI_DMA_DONTWAIT,
3398 0, &dma_p->dma_cookie, &dma_p->ncookies);
3399 if (ddi_status != DDI_DMA_MAPPED) {
3400 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3401 "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3402 "failed "
3403 "(staus 0x%x ncookies %d.)", ddi_status,
3404 dma_p->ncookies));
3405 if (dma_p->acc_handle) {
3406 ddi_dma_mem_free(&dma_p->acc_handle);
3407 dma_p->acc_handle = NULL;
3408 }
3409 ddi_dma_free_handle(&dma_p->dma_handle);
3410 dma_p->dma_handle = NULL;
3411 return (NXGE_ERROR | NXGE_DDI_FAILED);
3412 }
3413
3414 if (dma_p->ncookies != 1) {
3415 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3416 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3417 "> 1 cookie"
3418 "(staus 0x%x ncookies %d.)", ddi_status,
3419 dma_p->ncookies));
3420 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3421 if (dma_p->acc_handle) {
3422 ddi_dma_mem_free(&dma_p->acc_handle);
3423 dma_p->acc_handle = NULL;
3424 }
3425 ddi_dma_free_handle(&dma_p->dma_handle);
3426 dma_p->dma_handle = NULL;
3427 dma_p->acc_handle = NULL;
3428 return (NXGE_ERROR);
3429 }
3430 break;
3431
3432 case B_TRUE:
3433 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3434 if (kaddrp == NULL) {
3435 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3436 "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3437 "kmem alloc failed"));
3438 return (NXGE_ERROR);
3439 }
3440
3441 dma_p->alength = length;
3442 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3443 NULL, kaddrp, dma_p->alength, xfer_flags,
3444 DDI_DMA_DONTWAIT, 0,
3445 &dma_p->dma_cookie, &dma_p->ncookies);
3446 if (ddi_status != DDI_DMA_MAPPED) {
3447 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3448 "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3449 "(kmem_alloc) failed kaddrp $%p length %d "
3450 "(staus 0x%x (%d) ncookies %d.)",
3451 kaddrp, length,
3452 ddi_status, ddi_status, dma_p->ncookies));
3453 KMEM_FREE(kaddrp, length);
3454 dma_p->acc_handle = NULL;
3455 ddi_dma_free_handle(&dma_p->dma_handle);
3456 dma_p->dma_handle = NULL;
3457 dma_p->kaddrp = NULL;
3458 return (NXGE_ERROR | NXGE_DDI_FAILED);
3459 }
3460
3461 if (dma_p->ncookies != 1) {
3462 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3463 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3464 "(kmem_alloc) > 1 cookie"
3465 "(staus 0x%x ncookies %d.)", ddi_status,
3466 dma_p->ncookies));
3467 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3468 KMEM_FREE(kaddrp, length);
3469 ddi_dma_free_handle(&dma_p->dma_handle);
3470 dma_p->dma_handle = NULL;
3471 dma_p->acc_handle = NULL;
3472 dma_p->kaddrp = NULL;
3473 return (NXGE_ERROR);
3474 }
3475
3476 dma_p->kaddrp = kaddrp;
3477
3478 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3479 "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3480 "kaddr $%p alength %d",
3481 dma_p,
3482 kaddrp,
3483 dma_p->alength));
3484 break;
3485 }
3486 break;
3487
3488 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3489 case B_TRUE:
3490 kaddrp = (caddr_t)contig_mem_alloc(length);
3491 if (kaddrp == NULL) {
3492 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3493 "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3494 ddi_dma_free_handle(&dma_p->dma_handle);
3495 return (NXGE_ERROR | NXGE_DDI_FAILED);
3496 }
3497
3498 dma_p->alength = length;
3499 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3500 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3501 &dma_p->dma_cookie, &dma_p->ncookies);
3502 if (ddi_status != DDI_DMA_MAPPED) {
3503 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3504 "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3505 "(status 0x%x ncookies %d.)", ddi_status,
3506 dma_p->ncookies));
3507
3508 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3509 "==> nxge_dma_mem_alloc: (not mapped)"
3510 "length %lu (0x%x) "
3511 "free contig kaddrp $%p "
3512 "va_to_pa $%p",
3513 length, length,
3514 kaddrp,
3515 va_to_pa(kaddrp)));
3516
3517
3518 contig_mem_free((void *)kaddrp, length);
3519 ddi_dma_free_handle(&dma_p->dma_handle);
3520
3521 dma_p->dma_handle = NULL;
3522 dma_p->acc_handle = NULL;
3523 dma_p->alength = 0;
3524 dma_p->kaddrp = NULL;
3525
3526 return (NXGE_ERROR | NXGE_DDI_FAILED);
3527 }
3528
3529 if (dma_p->ncookies != 1 ||
3530 (dma_p->dma_cookie.dmac_laddress == 0)) {
3531 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3532 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3533 "cookie or "
3534 "dmac_laddress is NULL $%p size %d "
3535 " (status 0x%x ncookies %d.)",
3536 ddi_status,
3537 dma_p->dma_cookie.dmac_laddress,
3538 dma_p->dma_cookie.dmac_size,
3539 dma_p->ncookies));
3540
3541 contig_mem_free((void *)kaddrp, length);
3542 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3543 ddi_dma_free_handle(&dma_p->dma_handle);
3544
3545 dma_p->alength = 0;
3546 dma_p->dma_handle = NULL;
3547 dma_p->acc_handle = NULL;
3548 dma_p->kaddrp = NULL;
3549
3550 return (NXGE_ERROR | NXGE_DDI_FAILED);
3551 }
3552 break;
3553
3554 #else
3555 case B_TRUE:
3556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3557 "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3558 return (NXGE_ERROR | NXGE_DDI_FAILED);
3559 #endif
3560 }
3561
3562 dma_p->kaddrp = kaddrp;
3563 dma_p->last_kaddrp = (unsigned char *)kaddrp +
3564 dma_p->alength - RXBUF_64B_ALIGNED;
3565 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3566 dma_p->last_ioaddr_pp =
3567 (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3568 dma_p->alength - RXBUF_64B_ALIGNED;
3569
3570 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3571
3572 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3573 dma_p->orig_ioaddr_pp =
3574 (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3575 dma_p->orig_alength = length;
3576 dma_p->orig_kaddrp = kaddrp;
3577 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3578 #endif
3579
3580 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3581 "dma buffer allocated: dma_p $%p "
3582 "return dmac_ladress from cookie $%p cookie dmac_size %d "
3583 "dma_p->ioaddr_p $%p "
3584 "dma_p->orig_ioaddr_p $%p "
3585 "orig_vatopa $%p "
3586 "alength %d (0x%x) "
3587 "kaddrp $%p "
3588 "length %d (0x%x)",
3589 dma_p,
3590 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3591 dma_p->ioaddr_pp,
3592 dma_p->orig_ioaddr_pp,
3593 dma_p->orig_vatopa,
3594 dma_p->alength, dma_p->alength,
3595 kaddrp,
3596 length, length));
3597
3598 return (NXGE_OK);
3599 }
3600
3601 static void
nxge_dma_mem_free(p_nxge_dma_common_t dma_p)3602 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3603 {
3604 if (dma_p->dma_handle != NULL) {
3605 if (dma_p->ncookies) {
3606 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3607 dma_p->ncookies = 0;
3608 }
3609 ddi_dma_free_handle(&dma_p->dma_handle);
3610 dma_p->dma_handle = NULL;
3611 }
3612
3613 if (dma_p->acc_handle != NULL) {
3614 ddi_dma_mem_free(&dma_p->acc_handle);
3615 dma_p->acc_handle = NULL;
3616 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3617 }
3618
3619 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3620 if (dma_p->contig_alloc_type &&
3621 dma_p->orig_kaddrp && dma_p->orig_alength) {
3622 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3623 "kaddrp $%p (orig_kaddrp $%p)"
3624 "mem type %d ",
3625 "orig_alength %d "
3626 "alength 0x%x (%d)",
3627 dma_p->kaddrp,
3628 dma_p->orig_kaddrp,
3629 dma_p->contig_alloc_type,
3630 dma_p->orig_alength,
3631 dma_p->alength, dma_p->alength));
3632
3633 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3634 dma_p->orig_alength = 0;
3635 dma_p->orig_kaddrp = NULL;
3636 dma_p->contig_alloc_type = B_FALSE;
3637 }
3638 #endif
3639 dma_p->kaddrp = NULL;
3640 dma_p->alength = 0;
3641 }
3642
3643 static void
nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)3644 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3645 {
3646 uint64_t kaddr;
3647 uint32_t buf_size;
3648
3649 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3650
3651 if (dma_p->dma_handle != NULL) {
3652 if (dma_p->ncookies) {
3653 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3654 dma_p->ncookies = 0;
3655 }
3656 ddi_dma_free_handle(&dma_p->dma_handle);
3657 dma_p->dma_handle = NULL;
3658 }
3659
3660 if (dma_p->acc_handle != NULL) {
3661 ddi_dma_mem_free(&dma_p->acc_handle);
3662 dma_p->acc_handle = NULL;
3663 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3664 }
3665
3666 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3667 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3668 dma_p,
3669 dma_p->buf_alloc_state));
3670
3671 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3672 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3673 "<== nxge_dma_free_rx_data_buf: "
3674 "outstanding data buffers"));
3675 return;
3676 }
3677
3678 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3679 if (dma_p->contig_alloc_type &&
3680 dma_p->orig_kaddrp && dma_p->orig_alength) {
3681 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3682 "kaddrp $%p (orig_kaddrp $%p)"
3683 "mem type %d ",
3684 "orig_alength %d "
3685 "alength 0x%x (%d)",
3686 dma_p->kaddrp,
3687 dma_p->orig_kaddrp,
3688 dma_p->contig_alloc_type,
3689 dma_p->orig_alength,
3690 dma_p->alength, dma_p->alength));
3691
3692 kaddr = (uint64_t)dma_p->orig_kaddrp;
3693 buf_size = dma_p->orig_alength;
3694 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3695 dma_p->orig_alength = 0;
3696 dma_p->orig_kaddrp = NULL;
3697 dma_p->contig_alloc_type = B_FALSE;
3698 dma_p->kaddrp = NULL;
3699 dma_p->alength = 0;
3700 return;
3701 }
3702 #endif
3703
3704 if (dma_p->kmem_alloc_type) {
3705 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3706 "nxge_dma_free_rx_data_buf: free kmem "
3707 "kaddrp $%p (orig_kaddrp $%p)"
3708 "alloc type %d "
3709 "orig_alength %d "
3710 "alength 0x%x (%d)",
3711 dma_p->kaddrp,
3712 dma_p->orig_kaddrp,
3713 dma_p->kmem_alloc_type,
3714 dma_p->orig_alength,
3715 dma_p->alength, dma_p->alength));
3716 kaddr = (uint64_t)dma_p->kaddrp;
3717 buf_size = dma_p->orig_alength;
3718 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3719 "nxge_dma_free_rx_data_buf: free dmap $%p "
3720 "kaddr $%p buf_size %d",
3721 dma_p,
3722 kaddr, buf_size));
3723 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3724 dma_p->alength = 0;
3725 dma_p->orig_alength = 0;
3726 dma_p->kaddrp = NULL;
3727 dma_p->kmem_alloc_type = B_FALSE;
3728 }
3729
3730 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3731 }
3732
3733 /*
3734 * nxge_m_start() -- start transmitting and receiving.
3735 *
3736 * This function is called by the MAC layer when the first
3737 * stream is open to prepare the hardware ready for sending
3738 * and transmitting packets.
3739 */
3740 static int
nxge_m_start(void * arg)3741 nxge_m_start(void *arg)
3742 {
3743 p_nxge_t nxgep = (p_nxge_t)arg;
3744
3745 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3746
3747 /*
3748 * Are we already started?
3749 */
3750 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3751 return (0);
3752 }
3753
3754 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3755 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3756 }
3757
3758 /*
3759 * Make sure RX MAC is disabled while we initialize.
3760 */
3761 if (!isLDOMguest(nxgep)) {
3762 (void) nxge_rx_mac_disable(nxgep);
3763 }
3764
3765 /*
3766 * Grab the global lock.
3767 */
3768 MUTEX_ENTER(nxgep->genlock);
3769
3770 /*
3771 * Initialize the driver and hardware.
3772 */
3773 if (nxge_init(nxgep) != NXGE_OK) {
3774 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3775 "<== nxge_m_start: initialization failed"));
3776 MUTEX_EXIT(nxgep->genlock);
3777 return (EIO);
3778 }
3779
3780 /*
3781 * Start timer to check the system error and tx hangs
3782 */
3783 if (!isLDOMguest(nxgep))
3784 nxgep->nxge_timerid = nxge_start_timer(nxgep,
3785 nxge_check_hw_state, NXGE_CHECK_TIMER);
3786 #if defined(sun4v)
3787 else
3788 nxge_hio_start_timer(nxgep);
3789 #endif
3790
3791 nxgep->link_notify = B_TRUE;
3792 nxgep->link_check_count = 0;
3793 nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3794
3795 /*
3796 * Let the global lock go, since we are intialized.
3797 */
3798 MUTEX_EXIT(nxgep->genlock);
3799
3800 /*
3801 * Let the MAC start receiving packets, now that
3802 * we are initialized.
3803 */
3804 if (!isLDOMguest(nxgep)) {
3805 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
3806 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3807 "<== nxge_m_start: enable of RX mac failed"));
3808 return (EIO);
3809 }
3810
3811 /*
3812 * Enable hardware interrupts.
3813 */
3814 nxge_intr_hw_enable(nxgep);
3815 }
3816 #if defined(sun4v)
3817 else {
3818 /*
3819 * In guest domain we enable RDCs and their interrupts as
3820 * the last step.
3821 */
3822 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) {
3823 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3824 "<== nxge_m_start: enable of RDCs failed"));
3825 return (EIO);
3826 }
3827
3828 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) {
3829 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3830 "<== nxge_m_start: intrs enable for RDCs failed"));
3831 return (EIO);
3832 }
3833 }
3834 #endif
3835 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3836 return (0);
3837 }
3838
3839 static boolean_t
nxge_check_groups_stopped(p_nxge_t nxgep)3840 nxge_check_groups_stopped(p_nxge_t nxgep)
3841 {
3842 int i;
3843
3844 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
3845 if (nxgep->rx_hio_groups[i].started)
3846 return (B_FALSE);
3847 }
3848
3849 return (B_TRUE);
3850 }
3851
3852 /*
3853 * nxge_m_stop(): stop transmitting and receiving.
3854 */
3855 static void
nxge_m_stop(void * arg)3856 nxge_m_stop(void *arg)
3857 {
3858 p_nxge_t nxgep = (p_nxge_t)arg;
3859 boolean_t groups_stopped;
3860
3861 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3862
3863 /*
3864 * Are the groups stopped?
3865 */
3866 groups_stopped = nxge_check_groups_stopped(nxgep);
3867 ASSERT(groups_stopped == B_TRUE);
3868 if (!groups_stopped) {
3869 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n",
3870 nxgep->instance);
3871 return;
3872 }
3873
3874 if (!isLDOMguest(nxgep)) {
3875 /*
3876 * Disable the RX mac.
3877 */
3878 (void) nxge_rx_mac_disable(nxgep);
3879
3880 /*
3881 * Wait for the IPP to drain.
3882 */
3883 (void) nxge_ipp_drain(nxgep);
3884
3885 /*
3886 * Disable hardware interrupts.
3887 */
3888 nxge_intr_hw_disable(nxgep);
3889 }
3890 #if defined(sun4v)
3891 else {
3892 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE);
3893 }
3894 #endif
3895
3896 /*
3897 * Grab the global lock.
3898 */
3899 MUTEX_ENTER(nxgep->genlock);
3900
3901 nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3902 if (nxgep->nxge_timerid) {
3903 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3904 nxgep->nxge_timerid = 0;
3905 }
3906
3907 /*
3908 * Clean up.
3909 */
3910 nxge_uninit(nxgep);
3911
3912 nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3913
3914 /*
3915 * Let go of the global lock.
3916 */
3917 MUTEX_EXIT(nxgep->genlock);
3918 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3919 }
3920
3921 static int
nxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)3922 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3923 {
3924 p_nxge_t nxgep = (p_nxge_t)arg;
3925 struct ether_addr addrp;
3926
3927 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3928 "==> nxge_m_multicst: add %d", add));
3929
3930 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3931 if (add) {
3932 if (nxge_add_mcast_addr(nxgep, &addrp)) {
3933 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3934 "<== nxge_m_multicst: add multicast failed"));
3935 return (EINVAL);
3936 }
3937 } else {
3938 if (nxge_del_mcast_addr(nxgep, &addrp)) {
3939 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3940 "<== nxge_m_multicst: del multicast failed"));
3941 return (EINVAL);
3942 }
3943 }
3944
3945 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3946
3947 return (0);
3948 }
3949
3950 static int
nxge_m_promisc(void * arg,boolean_t on)3951 nxge_m_promisc(void *arg, boolean_t on)
3952 {
3953 p_nxge_t nxgep = (p_nxge_t)arg;
3954
3955 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3956 "==> nxge_m_promisc: on %d", on));
3957
3958 if (nxge_set_promisc(nxgep, on)) {
3959 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3960 "<== nxge_m_promisc: set promisc failed"));
3961 return (EINVAL);
3962 }
3963
3964 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3965 "<== nxge_m_promisc: on %d", on));
3966
3967 return (0);
3968 }
3969
3970 static void
nxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3971 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3972 {
3973 p_nxge_t nxgep = (p_nxge_t)arg;
3974 struct iocblk *iocp;
3975 boolean_t need_privilege;
3976 int err;
3977 int cmd;
3978
3979 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3980
3981 iocp = (struct iocblk *)mp->b_rptr;
3982 iocp->ioc_error = 0;
3983 need_privilege = B_TRUE;
3984 cmd = iocp->ioc_cmd;
3985 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
3986 switch (cmd) {
3987 default:
3988 miocnak(wq, mp, 0, EINVAL);
3989 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
3990 return;
3991
3992 case LB_GET_INFO_SIZE:
3993 case LB_GET_INFO:
3994 case LB_GET_MODE:
3995 need_privilege = B_FALSE;
3996 break;
3997 case LB_SET_MODE:
3998 break;
3999
4000
4001 case NXGE_GET_MII:
4002 case NXGE_PUT_MII:
4003 case NXGE_GET64:
4004 case NXGE_PUT64:
4005 case NXGE_GET_TX_RING_SZ:
4006 case NXGE_GET_TX_DESC:
4007 case NXGE_TX_SIDE_RESET:
4008 case NXGE_RX_SIDE_RESET:
4009 case NXGE_GLOBAL_RESET:
4010 case NXGE_RESET_MAC:
4011 case NXGE_TX_REGS_DUMP:
4012 case NXGE_RX_REGS_DUMP:
4013 case NXGE_INT_REGS_DUMP:
4014 case NXGE_VIR_INT_REGS_DUMP:
4015 case NXGE_PUT_TCAM:
4016 case NXGE_GET_TCAM:
4017 case NXGE_RTRACE:
4018 case NXGE_RDUMP:
4019 case NXGE_RX_CLASS:
4020 case NXGE_RX_HASH:
4021
4022 need_privilege = B_FALSE;
4023 break;
4024 case NXGE_INJECT_ERR:
4025 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
4026 nxge_err_inject(nxgep, wq, mp);
4027 break;
4028 }
4029
4030 if (need_privilege) {
4031 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
4032 if (err != 0) {
4033 miocnak(wq, mp, 0, err);
4034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4035 "<== nxge_m_ioctl: no priv"));
4036 return;
4037 }
4038 }
4039
4040 switch (cmd) {
4041
4042 case LB_GET_MODE:
4043 case LB_SET_MODE:
4044 case LB_GET_INFO_SIZE:
4045 case LB_GET_INFO:
4046 nxge_loopback_ioctl(nxgep, wq, mp, iocp);
4047 break;
4048
4049 case NXGE_GET_MII:
4050 case NXGE_PUT_MII:
4051 case NXGE_PUT_TCAM:
4052 case NXGE_GET_TCAM:
4053 case NXGE_GET64:
4054 case NXGE_PUT64:
4055 case NXGE_GET_TX_RING_SZ:
4056 case NXGE_GET_TX_DESC:
4057 case NXGE_TX_SIDE_RESET:
4058 case NXGE_RX_SIDE_RESET:
4059 case NXGE_GLOBAL_RESET:
4060 case NXGE_RESET_MAC:
4061 case NXGE_TX_REGS_DUMP:
4062 case NXGE_RX_REGS_DUMP:
4063 case NXGE_INT_REGS_DUMP:
4064 case NXGE_VIR_INT_REGS_DUMP:
4065 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4066 "==> nxge_m_ioctl: cmd 0x%x", cmd));
4067 nxge_hw_ioctl(nxgep, wq, mp, iocp);
4068 break;
4069 case NXGE_RX_CLASS:
4070 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0)
4071 miocnak(wq, mp, 0, EINVAL);
4072 else
4073 miocack(wq, mp, sizeof (rx_class_cfg_t), 0);
4074 break;
4075 case NXGE_RX_HASH:
4076
4077 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0)
4078 miocnak(wq, mp, 0, EINVAL);
4079 else
4080 miocack(wq, mp, sizeof (cfg_cmd_t), 0);
4081 break;
4082 }
4083
4084 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
4085 }
4086
4087 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
4088
4089 void
nxge_mmac_kstat_update(p_nxge_t nxgep,int slot,boolean_t factory)4090 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory)
4091 {
4092 p_nxge_mmac_stats_t mmac_stats;
4093 int i;
4094 nxge_mmac_t *mmac_info;
4095
4096 mmac_info = &nxgep->nxge_mmac_info;
4097
4098 mmac_stats = &nxgep->statsp->mmac_stats;
4099 mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
4100 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
4101
4102 for (i = 0; i < ETHERADDRL; i++) {
4103 if (factory) {
4104 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4105 = mmac_info->factory_mac_pool[slot][
4106 (ETHERADDRL-1) - i];
4107 } else {
4108 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4109 = mmac_info->mac_pool[slot].addr[
4110 (ETHERADDRL - 1) - i];
4111 }
4112 }
4113 }
4114
4115 /*
4116 * nxge_altmac_set() -- Set an alternate MAC address
4117 */
4118 static int
nxge_altmac_set(p_nxge_t nxgep,uint8_t * maddr,int slot,int rdctbl,boolean_t usetbl)4119 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot,
4120 int rdctbl, boolean_t usetbl)
4121 {
4122 uint8_t addrn;
4123 uint8_t portn;
4124 npi_mac_addr_t altmac;
4125 hostinfo_t mac_rdc;
4126 p_nxge_class_pt_cfg_t clscfgp;
4127
4128
4129 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4130 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4131 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4132
4133 portn = nxgep->mac.portnum;
4134 addrn = (uint8_t)slot - 1;
4135
4136 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
4137 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS)
4138 return (EIO);
4139
4140 /*
4141 * Set the rdc table number for the host info entry
4142 * for this mac address slot.
4143 */
4144 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4145 mac_rdc.value = 0;
4146 if (usetbl)
4147 mac_rdc.bits.w0.rdc_tbl_num = rdctbl;
4148 else
4149 mac_rdc.bits.w0.rdc_tbl_num =
4150 clscfgp->mac_host_info[addrn].rdctbl;
4151 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4152
4153 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4154 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4155 return (EIO);
4156 }
4157
4158 /*
4159 * Enable comparison with the alternate MAC address.
4160 * While the first alternate addr is enabled by bit 1 of register
4161 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4162 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4163 * accordingly before calling npi_mac_altaddr_entry.
4164 */
4165 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4166 addrn = (uint8_t)slot - 1;
4167 else
4168 addrn = (uint8_t)slot;
4169
4170 if (npi_mac_altaddr_enable(nxgep->npi_handle,
4171 nxgep->function_num, addrn) != NPI_SUCCESS) {
4172 return (EIO);
4173 }
4174
4175 return (0);
4176 }
4177
4178 /*
4179 * nxeg_m_mmac_add_g() - find an unused address slot, set the address
4180 * value to the one specified, enable the port to start filtering on
4181 * the new MAC address. Returns 0 on success.
4182 */
4183 int
nxge_m_mmac_add_g(void * arg,const uint8_t * maddr,int rdctbl,boolean_t usetbl)4184 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
4185 boolean_t usetbl)
4186 {
4187 p_nxge_t nxgep = arg;
4188 int slot;
4189 nxge_mmac_t *mmac_info;
4190 int err;
4191 nxge_status_t status;
4192
4193 mutex_enter(nxgep->genlock);
4194
4195 /*
4196 * Make sure that nxge is initialized, if _start() has
4197 * not been called.
4198 */
4199 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4200 status = nxge_init(nxgep);
4201 if (status != NXGE_OK) {
4202 mutex_exit(nxgep->genlock);
4203 return (ENXIO);
4204 }
4205 }
4206
4207 mmac_info = &nxgep->nxge_mmac_info;
4208 if (mmac_info->naddrfree == 0) {
4209 mutex_exit(nxgep->genlock);
4210 return (ENOSPC);
4211 }
4212
4213 /*
4214 * Search for the first available slot. Because naddrfree
4215 * is not zero, we are guaranteed to find one.
4216 * Each of the first two ports of Neptune has 16 alternate
4217 * MAC slots but only the first 7 (of 15) slots have assigned factory
4218 * MAC addresses. We first search among the slots without bundled
4219 * factory MACs. If we fail to find one in that range, then we
4220 * search the slots with bundled factory MACs. A factory MAC
4221 * will be wasted while the slot is used with a user MAC address.
4222 * But the slot could be used by factory MAC again after calling
4223 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4224 */
4225 for (slot = 0; slot <= mmac_info->num_mmac; slot++) {
4226 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4227 break;
4228 }
4229
4230 ASSERT(slot <= mmac_info->num_mmac);
4231
4232 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl,
4233 usetbl)) != 0) {
4234 mutex_exit(nxgep->genlock);
4235 return (err);
4236 }
4237
4238 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4239 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4240 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4241 mmac_info->naddrfree--;
4242 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4243
4244 mutex_exit(nxgep->genlock);
4245 return (0);
4246 }
4247
4248 /*
4249 * Remove the specified mac address and update the HW not to filter
4250 * the mac address anymore.
4251 */
4252 int
nxge_m_mmac_remove(void * arg,int slot)4253 nxge_m_mmac_remove(void *arg, int slot)
4254 {
4255 p_nxge_t nxgep = arg;
4256 nxge_mmac_t *mmac_info;
4257 uint8_t addrn;
4258 uint8_t portn;
4259 int err = 0;
4260 nxge_status_t status;
4261
4262 mutex_enter(nxgep->genlock);
4263
4264 /*
4265 * Make sure that nxge is initialized, if _start() has
4266 * not been called.
4267 */
4268 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4269 status = nxge_init(nxgep);
4270 if (status != NXGE_OK) {
4271 mutex_exit(nxgep->genlock);
4272 return (ENXIO);
4273 }
4274 }
4275
4276 mmac_info = &nxgep->nxge_mmac_info;
4277 if (slot < 1 || slot > mmac_info->num_mmac) {
4278 mutex_exit(nxgep->genlock);
4279 return (EINVAL);
4280 }
4281
4282 portn = nxgep->mac.portnum;
4283 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4284 addrn = (uint8_t)slot - 1;
4285 else
4286 addrn = (uint8_t)slot;
4287
4288 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4289 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4290 == NPI_SUCCESS) {
4291 mmac_info->naddrfree++;
4292 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4293 /*
4294 * Regardless if the MAC we just stopped filtering
4295 * is a user addr or a facory addr, we must set
4296 * the MMAC_VENDOR_ADDR flag if this slot has an
4297 * associated factory MAC to indicate that a factory
4298 * MAC is available.
4299 */
4300 if (slot <= mmac_info->num_factory_mmac) {
4301 mmac_info->mac_pool[slot].flags
4302 |= MMAC_VENDOR_ADDR;
4303 }
4304 /*
4305 * Clear mac_pool[slot].addr so that kstat shows 0
4306 * alternate MAC address if the slot is not used.
4307 * (But nxge_m_mmac_get returns the factory MAC even
4308 * when the slot is not used!)
4309 */
4310 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4311 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4312 } else {
4313 err = EIO;
4314 }
4315 } else {
4316 err = EINVAL;
4317 }
4318
4319 mutex_exit(nxgep->genlock);
4320 return (err);
4321 }
4322
4323 /*
4324 * The callback to query all the factory addresses. naddr must be the same as
4325 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
4326 * mcm_addr is the space allocated for keep all the addresses, whose size is
4327 * naddr * MAXMACADDRLEN.
4328 */
4329 static void
nxge_m_getfactaddr(void * arg,uint_t naddr,uint8_t * addr)4330 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr)
4331 {
4332 nxge_t *nxgep = arg;
4333 nxge_mmac_t *mmac_info;
4334 int i;
4335
4336 mutex_enter(nxgep->genlock);
4337
4338 mmac_info = &nxgep->nxge_mmac_info;
4339 ASSERT(naddr == mmac_info->num_factory_mmac);
4340
4341 for (i = 0; i < naddr; i++) {
4342 bcopy(mmac_info->factory_mac_pool[i + 1],
4343 addr + i * MAXMACADDRLEN, ETHERADDRL);
4344 }
4345
4346 mutex_exit(nxgep->genlock);
4347 }
4348
4349
4350 static boolean_t
nxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4351 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4352 {
4353 nxge_t *nxgep = arg;
4354 uint32_t *txflags = cap_data;
4355
4356 switch (cap) {
4357 case MAC_CAPAB_HCKSUM:
4358 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4359 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4360 if (nxge_cksum_offload <= 1) {
4361 *txflags = HCKSUM_INET_PARTIAL;
4362 }
4363 break;
4364
4365 case MAC_CAPAB_MULTIFACTADDR: {
4366 mac_capab_multifactaddr_t *mfacp = cap_data;
4367
4368 if (!isLDOMguest(nxgep)) {
4369 mutex_enter(nxgep->genlock);
4370 mfacp->mcm_naddr =
4371 nxgep->nxge_mmac_info.num_factory_mmac;
4372 mfacp->mcm_getaddr = nxge_m_getfactaddr;
4373 mutex_exit(nxgep->genlock);
4374 }
4375 break;
4376 }
4377
4378 case MAC_CAPAB_LSO: {
4379 mac_capab_lso_t *cap_lso = cap_data;
4380
4381 if (nxgep->soft_lso_enable) {
4382 if (nxge_cksum_offload <= 1) {
4383 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4384 if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4385 nxge_lso_max = NXGE_LSO_MAXLEN;
4386 }
4387 cap_lso->lso_basic_tcp_ipv4.lso_max =
4388 nxge_lso_max;
4389 }
4390 break;
4391 } else {
4392 return (B_FALSE);
4393 }
4394 }
4395
4396 case MAC_CAPAB_RINGS: {
4397 mac_capab_rings_t *cap_rings = cap_data;
4398 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
4399
4400 mutex_enter(nxgep->genlock);
4401 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
4402 if (isLDOMguest(nxgep)) {
4403 cap_rings->mr_group_type =
4404 MAC_GROUP_TYPE_STATIC;
4405 cap_rings->mr_rnum =
4406 NXGE_HIO_SHARE_MAX_CHANNELS;
4407 cap_rings->mr_rget = nxge_fill_ring;
4408 cap_rings->mr_gnum = 1;
4409 cap_rings->mr_gget = nxge_hio_group_get;
4410 cap_rings->mr_gaddring = NULL;
4411 cap_rings->mr_gremring = NULL;
4412 } else {
4413 /*
4414 * Service Domain.
4415 */
4416 cap_rings->mr_group_type =
4417 MAC_GROUP_TYPE_DYNAMIC;
4418 cap_rings->mr_rnum = p_cfgp->max_rdcs;
4419 cap_rings->mr_rget = nxge_fill_ring;
4420 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids;
4421 cap_rings->mr_gget = nxge_hio_group_get;
4422 cap_rings->mr_gaddring = nxge_group_add_ring;
4423 cap_rings->mr_gremring = nxge_group_rem_ring;
4424 }
4425
4426 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4427 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
4428 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids));
4429 } else {
4430 /*
4431 * TX Rings.
4432 */
4433 if (isLDOMguest(nxgep)) {
4434 cap_rings->mr_group_type =
4435 MAC_GROUP_TYPE_STATIC;
4436 cap_rings->mr_rnum =
4437 NXGE_HIO_SHARE_MAX_CHANNELS;
4438 cap_rings->mr_rget = nxge_fill_ring;
4439 cap_rings->mr_gnum = 0;
4440 cap_rings->mr_gget = NULL;
4441 cap_rings->mr_gaddring = NULL;
4442 cap_rings->mr_gremring = NULL;
4443 } else {
4444 /*
4445 * Service Domain.
4446 */
4447 cap_rings->mr_group_type =
4448 MAC_GROUP_TYPE_DYNAMIC;
4449 cap_rings->mr_rnum = p_cfgp->tdc.count;
4450 cap_rings->mr_rget = nxge_fill_ring;
4451
4452 /*
4453 * Share capable.
4454 *
4455 * Do not report the default group: hence -1
4456 */
4457 cap_rings->mr_gnum =
4458 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1;
4459 cap_rings->mr_gget = nxge_hio_group_get;
4460 cap_rings->mr_gaddring = nxge_group_add_ring;
4461 cap_rings->mr_gremring = nxge_group_rem_ring;
4462 }
4463
4464 NXGE_DEBUG_MSG((nxgep, TX_CTL,
4465 "==> nxge_m_getcapab: tx rings # of rings %d",
4466 p_cfgp->tdc.count));
4467 }
4468 mutex_exit(nxgep->genlock);
4469 break;
4470 }
4471
4472 #if defined(sun4v)
4473 case MAC_CAPAB_SHARES: {
4474 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4475
4476 /*
4477 * Only the service domain driver responds to
4478 * this capability request.
4479 */
4480 mutex_enter(nxgep->genlock);
4481 if (isLDOMservice(nxgep)) {
4482 mshares->ms_snum = 3;
4483 mshares->ms_handle = (void *)nxgep;
4484 mshares->ms_salloc = nxge_hio_share_alloc;
4485 mshares->ms_sfree = nxge_hio_share_free;
4486 mshares->ms_sadd = nxge_hio_share_add_group;
4487 mshares->ms_sremove = nxge_hio_share_rem_group;
4488 mshares->ms_squery = nxge_hio_share_query;
4489 mshares->ms_sbind = nxge_hio_share_bind;
4490 mshares->ms_sunbind = nxge_hio_share_unbind;
4491 mutex_exit(nxgep->genlock);
4492 } else {
4493 mutex_exit(nxgep->genlock);
4494 return (B_FALSE);
4495 }
4496 break;
4497 }
4498 #endif
4499 default:
4500 return (B_FALSE);
4501 }
4502 return (B_TRUE);
4503 }
4504
4505 static boolean_t
nxge_param_locked(mac_prop_id_t pr_num)4506 nxge_param_locked(mac_prop_id_t pr_num)
4507 {
4508 /*
4509 * All adv_* parameters are locked (read-only) while
4510 * the device is in any sort of loopback mode ...
4511 */
4512 switch (pr_num) {
4513 case MAC_PROP_ADV_1000FDX_CAP:
4514 case MAC_PROP_EN_1000FDX_CAP:
4515 case MAC_PROP_ADV_1000HDX_CAP:
4516 case MAC_PROP_EN_1000HDX_CAP:
4517 case MAC_PROP_ADV_100FDX_CAP:
4518 case MAC_PROP_EN_100FDX_CAP:
4519 case MAC_PROP_ADV_100HDX_CAP:
4520 case MAC_PROP_EN_100HDX_CAP:
4521 case MAC_PROP_ADV_10FDX_CAP:
4522 case MAC_PROP_EN_10FDX_CAP:
4523 case MAC_PROP_ADV_10HDX_CAP:
4524 case MAC_PROP_EN_10HDX_CAP:
4525 case MAC_PROP_AUTONEG:
4526 case MAC_PROP_FLOWCTRL:
4527 return (B_TRUE);
4528 }
4529 return (B_FALSE);
4530 }
4531
4532 /*
4533 * callback functions for set/get of properties
4534 */
4535 static int
nxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)4536 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4537 uint_t pr_valsize, const void *pr_val)
4538 {
4539 nxge_t *nxgep = barg;
4540 p_nxge_param_t param_arr = nxgep->param_arr;
4541 p_nxge_stats_t statsp = nxgep->statsp;
4542 int err = 0;
4543
4544 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4545
4546 mutex_enter(nxgep->genlock);
4547 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4548 nxge_param_locked(pr_num)) {
4549 /*
4550 * All adv_* parameters are locked (read-only)
4551 * while the device is in any sort of loopback mode.
4552 */
4553 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4554 "==> nxge_m_setprop: loopback mode: read only"));
4555 mutex_exit(nxgep->genlock);
4556 return (EBUSY);
4557 }
4558
4559 switch (pr_num) {
4560 case MAC_PROP_EN_1000FDX_CAP:
4561 nxgep->param_en_1000fdx =
4562 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val;
4563 goto reprogram;
4564
4565 case MAC_PROP_EN_100FDX_CAP:
4566 nxgep->param_en_100fdx =
4567 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val;
4568 goto reprogram;
4569
4570 case MAC_PROP_EN_10FDX_CAP:
4571 nxgep->param_en_10fdx =
4572 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val;
4573 goto reprogram;
4574
4575 case MAC_PROP_AUTONEG:
4576 param_arr[param_autoneg].value = *(uint8_t *)pr_val;
4577 goto reprogram;
4578
4579 case MAC_PROP_MTU: {
4580 uint32_t cur_mtu, new_mtu, old_framesize;
4581
4582 cur_mtu = nxgep->mac.default_mtu;
4583 ASSERT(pr_valsize >= sizeof (new_mtu));
4584 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4585
4586 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4587 "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4588 new_mtu, nxgep->mac.is_jumbo));
4589
4590 if (new_mtu == cur_mtu) {
4591 err = 0;
4592 break;
4593 }
4594
4595 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4596 err = EBUSY;
4597 break;
4598 }
4599
4600 if ((new_mtu < NXGE_DEFAULT_MTU) ||
4601 (new_mtu > NXGE_MAXIMUM_MTU)) {
4602 err = EINVAL;
4603 break;
4604 }
4605
4606 old_framesize = (uint32_t)nxgep->mac.maxframesize;
4607 nxgep->mac.maxframesize = (uint16_t)
4608 (new_mtu + NXGE_EHEADER_VLAN_CRC);
4609 if (nxge_mac_set_framesize(nxgep)) {
4610 nxgep->mac.maxframesize =
4611 (uint16_t)old_framesize;
4612 err = EINVAL;
4613 break;
4614 }
4615
4616 nxgep->mac.default_mtu = new_mtu;
4617 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU);
4618
4619 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4620 "==> nxge_m_setprop: set MTU: %d maxframe %d",
4621 new_mtu, nxgep->mac.maxframesize));
4622 break;
4623 }
4624
4625 case MAC_PROP_FLOWCTRL: {
4626 link_flowctrl_t fl;
4627
4628 ASSERT(pr_valsize >= sizeof (fl));
4629 bcopy(pr_val, &fl, sizeof (fl));
4630
4631 switch (fl) {
4632 case LINK_FLOWCTRL_NONE:
4633 param_arr[param_anar_pause].value = 0;
4634 break;
4635
4636 case LINK_FLOWCTRL_RX:
4637 param_arr[param_anar_pause].value = 1;
4638 break;
4639
4640 case LINK_FLOWCTRL_TX:
4641 case LINK_FLOWCTRL_BI:
4642 err = EINVAL;
4643 break;
4644 default:
4645 err = EINVAL;
4646 break;
4647 }
4648 reprogram:
4649 if ((err == 0) && !isLDOMguest(nxgep)) {
4650 if (!nxge_param_link_update(nxgep)) {
4651 err = EINVAL;
4652 }
4653 } else {
4654 err = EINVAL;
4655 }
4656 break;
4657 }
4658
4659 case MAC_PROP_PRIVATE:
4660 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4661 "==> nxge_m_setprop: private property"));
4662 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val);
4663 break;
4664
4665 default:
4666 err = ENOTSUP;
4667 break;
4668 }
4669
4670 mutex_exit(nxgep->genlock);
4671
4672 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4673 "<== nxge_m_setprop (return %d)", err));
4674 return (err);
4675 }
4676
4677 static int
nxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)4678 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4679 uint_t pr_valsize, void *pr_val)
4680 {
4681 nxge_t *nxgep = barg;
4682 p_nxge_param_t param_arr = nxgep->param_arr;
4683 p_nxge_stats_t statsp = nxgep->statsp;
4684
4685 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4686 "==> nxge_m_getprop: pr_num %d", pr_num));
4687
4688 switch (pr_num) {
4689 case MAC_PROP_DUPLEX:
4690 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4691 break;
4692
4693 case MAC_PROP_SPEED: {
4694 uint64_t val = statsp->mac_stats.link_speed * 1000000ull;
4695
4696 ASSERT(pr_valsize >= sizeof (val));
4697 bcopy(&val, pr_val, sizeof (val));
4698 break;
4699 }
4700
4701 case MAC_PROP_STATUS: {
4702 link_state_t state = statsp->mac_stats.link_up ?
4703 LINK_STATE_UP : LINK_STATE_DOWN;
4704
4705 ASSERT(pr_valsize >= sizeof (state));
4706 bcopy(&state, pr_val, sizeof (state));
4707 break;
4708 }
4709
4710 case MAC_PROP_AUTONEG:
4711 *(uint8_t *)pr_val = param_arr[param_autoneg].value;
4712 break;
4713
4714 case MAC_PROP_FLOWCTRL: {
4715 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ?
4716 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE;
4717
4718 ASSERT(pr_valsize >= sizeof (fl));
4719 bcopy(&fl, pr_val, sizeof (fl));
4720 break;
4721 }
4722
4723 case MAC_PROP_ADV_1000FDX_CAP:
4724 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value;
4725 break;
4726
4727 case MAC_PROP_EN_1000FDX_CAP:
4728 *(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4729 break;
4730
4731 case MAC_PROP_ADV_100FDX_CAP:
4732 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value;
4733 break;
4734
4735 case MAC_PROP_EN_100FDX_CAP:
4736 *(uint8_t *)pr_val = nxgep->param_en_100fdx;
4737 break;
4738
4739 case MAC_PROP_ADV_10FDX_CAP:
4740 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value;
4741 break;
4742
4743 case MAC_PROP_EN_10FDX_CAP:
4744 *(uint8_t *)pr_val = nxgep->param_en_10fdx;
4745 break;
4746
4747 case MAC_PROP_PRIVATE:
4748 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4749 pr_val));
4750
4751 default:
4752 return (ENOTSUP);
4753 }
4754
4755 return (0);
4756 }
4757
4758 static void
nxge_m_propinfo(void * barg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)4759 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4760 mac_prop_info_handle_t prh)
4761 {
4762 nxge_t *nxgep = barg;
4763 p_nxge_stats_t statsp = nxgep->statsp;
4764
4765 /*
4766 * By default permissions are read/write unless specified
4767 * otherwise by the driver.
4768 */
4769
4770 switch (pr_num) {
4771 case MAC_PROP_DUPLEX:
4772 case MAC_PROP_SPEED:
4773 case MAC_PROP_STATUS:
4774 case MAC_PROP_EN_1000HDX_CAP:
4775 case MAC_PROP_EN_100HDX_CAP:
4776 case MAC_PROP_EN_10HDX_CAP:
4777 case MAC_PROP_ADV_1000FDX_CAP:
4778 case MAC_PROP_ADV_1000HDX_CAP:
4779 case MAC_PROP_ADV_100FDX_CAP:
4780 case MAC_PROP_ADV_100HDX_CAP:
4781 case MAC_PROP_ADV_10FDX_CAP:
4782 case MAC_PROP_ADV_10HDX_CAP:
4783 /*
4784 * Note that read-only properties don't need to
4785 * provide default values since they cannot be
4786 * changed by the administrator.
4787 */
4788 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4789 break;
4790
4791 case MAC_PROP_EN_1000FDX_CAP:
4792 case MAC_PROP_EN_100FDX_CAP:
4793 case MAC_PROP_EN_10FDX_CAP:
4794 mac_prop_info_set_default_uint8(prh, 1);
4795 break;
4796
4797 case MAC_PROP_AUTONEG:
4798 mac_prop_info_set_default_uint8(prh, 1);
4799 break;
4800
4801 case MAC_PROP_FLOWCTRL:
4802 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX);
4803 break;
4804
4805 case MAC_PROP_MTU:
4806 mac_prop_info_set_range_uint32(prh,
4807 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU);
4808 break;
4809
4810 case MAC_PROP_PRIVATE:
4811 nxge_priv_propinfo(pr_name, prh);
4812 break;
4813 }
4814
4815 mutex_enter(nxgep->genlock);
4816 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4817 nxge_param_locked(pr_num)) {
4818 /*
4819 * Some properties are locked (read-only) while the
4820 * device is in any sort of loopback mode.
4821 */
4822 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4823 }
4824 mutex_exit(nxgep->genlock);
4825 }
4826
4827 static void
nxge_priv_propinfo(const char * pr_name,mac_prop_info_handle_t prh)4828 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh)
4829 {
4830 char valstr[64];
4831
4832 bzero(valstr, sizeof (valstr));
4833
4834 if (strcmp(pr_name, "_function_number") == 0 ||
4835 strcmp(pr_name, "_fw_version") == 0 ||
4836 strcmp(pr_name, "_port_mode") == 0 ||
4837 strcmp(pr_name, "_hot_swap_phy") == 0) {
4838 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4839
4840 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4841 (void) snprintf(valstr, sizeof (valstr),
4842 "%d", RXDMA_RCR_TO_DEFAULT);
4843
4844 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4845 (void) snprintf(valstr, sizeof (valstr),
4846 "%d", RXDMA_RCR_PTHRES_DEFAULT);
4847
4848 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
4849 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
4850 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
4851 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
4852 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
4853 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
4854 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
4855 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4856 (void) snprintf(valstr, sizeof (valstr), "%x",
4857 NXGE_CLASS_FLOW_GEN_SERVER);
4858
4859 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4860 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
4861
4862 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
4863 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4864
4865 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4866 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4867 }
4868
4869 if (strlen(valstr) > 0)
4870 mac_prop_info_set_default_str(prh, valstr);
4871 }
4872
4873 /* ARGSUSED */
4874 static int
nxge_set_priv_prop(p_nxge_t nxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)4875 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4876 const void *pr_val)
4877 {
4878 p_nxge_param_t param_arr = nxgep->param_arr;
4879 int err = 0;
4880 long result;
4881
4882 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4883 "==> nxge_set_priv_prop: name %s", pr_name));
4884
4885 /* Blanking */
4886 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4887 err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4888 (char *)pr_val,
4889 (caddr_t)¶m_arr[param_rxdma_intr_time]);
4890 if (err) {
4891 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4892 "<== nxge_set_priv_prop: "
4893 "unable to set (%s)", pr_name));
4894 err = EINVAL;
4895 } else {
4896 err = 0;
4897 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4898 "<== nxge_set_priv_prop: "
4899 "set (%s)", pr_name));
4900 }
4901
4902 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4903 "<== nxge_set_priv_prop: name %s (value %d)",
4904 pr_name, result));
4905
4906 return (err);
4907 }
4908
4909 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4910 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4911 (char *)pr_val,
4912 (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
4913 if (err) {
4914 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4915 "<== nxge_set_priv_prop: "
4916 "unable to set (%s)", pr_name));
4917 err = EINVAL;
4918 } else {
4919 err = 0;
4920 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4921 "<== nxge_set_priv_prop: "
4922 "set (%s)", pr_name));
4923 }
4924
4925 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4926 "<== nxge_set_priv_prop: name %s (value %d)",
4927 pr_name, result));
4928
4929 return (err);
4930 }
4931
4932 /* Classification */
4933 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4934 if (pr_val == NULL) {
4935 err = EINVAL;
4936 return (err);
4937 }
4938 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4939
4940 err = nxge_param_set_ip_opt(nxgep, NULL,
4941 NULL, (char *)pr_val,
4942 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
4943
4944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4945 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4946 pr_name, result));
4947
4948 return (err);
4949 }
4950
4951 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4952 if (pr_val == NULL) {
4953 err = EINVAL;
4954 return (err);
4955 }
4956 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4957
4958 err = nxge_param_set_ip_opt(nxgep, NULL,
4959 NULL, (char *)pr_val,
4960 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
4961
4962 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4963 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4964 pr_name, result));
4965
4966 return (err);
4967 }
4968 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4969 if (pr_val == NULL) {
4970 err = EINVAL;
4971 return (err);
4972 }
4973 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4974
4975 err = nxge_param_set_ip_opt(nxgep, NULL,
4976 NULL, (char *)pr_val,
4977 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
4978
4979 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4980 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4981 pr_name, result));
4982
4983 return (err);
4984 }
4985 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
4986 if (pr_val == NULL) {
4987 err = EINVAL;
4988 return (err);
4989 }
4990 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4991
4992 err = nxge_param_set_ip_opt(nxgep, NULL,
4993 NULL, (char *)pr_val,
4994 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
4995
4996 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4997 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4998 pr_name, result));
4999
5000 return (err);
5001 }
5002
5003 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5004 if (pr_val == NULL) {
5005 err = EINVAL;
5006 return (err);
5007 }
5008 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5009
5010 err = nxge_param_set_ip_opt(nxgep, NULL,
5011 NULL, (char *)pr_val,
5012 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5013
5014 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5015 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5016 pr_name, result));
5017
5018 return (err);
5019 }
5020
5021 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5022 if (pr_val == NULL) {
5023 err = EINVAL;
5024 return (err);
5025 }
5026 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5027
5028 err = nxge_param_set_ip_opt(nxgep, NULL,
5029 NULL, (char *)pr_val,
5030 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5031
5032 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5033 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5034 pr_name, result));
5035
5036 return (err);
5037 }
5038 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5039 if (pr_val == NULL) {
5040 err = EINVAL;
5041 return (err);
5042 }
5043 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5044
5045 err = nxge_param_set_ip_opt(nxgep, NULL,
5046 NULL, (char *)pr_val,
5047 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5048
5049 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5050 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5051 pr_name, result));
5052
5053 return (err);
5054 }
5055 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5056 if (pr_val == NULL) {
5057 err = EINVAL;
5058 return (err);
5059 }
5060 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5061
5062 err = nxge_param_set_ip_opt(nxgep, NULL,
5063 NULL, (char *)pr_val,
5064 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5065
5066 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5067 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5068 pr_name, result));
5069
5070 return (err);
5071 }
5072
5073 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5074 if (pr_val == NULL) {
5075 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5076 "==> nxge_set_priv_prop: name %s (null)", pr_name));
5077 err = EINVAL;
5078 return (err);
5079 }
5080
5081 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5082 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5083 "<== nxge_set_priv_prop: name %s "
5084 "(lso %d pr_val %s value %d)",
5085 pr_name, nxgep->soft_lso_enable, pr_val, result));
5086
5087 if (result > 1 || result < 0) {
5088 err = EINVAL;
5089 } else {
5090 if (nxgep->soft_lso_enable == (uint32_t)result) {
5091 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5092 "no change (%d %d)",
5093 nxgep->soft_lso_enable, result));
5094 return (0);
5095 }
5096 }
5097
5098 nxgep->soft_lso_enable = (int)result;
5099
5100 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5101 "<== nxge_set_priv_prop: name %s (value %d)",
5102 pr_name, result));
5103
5104 return (err);
5105 }
5106 /*
5107 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5108 * following code to be executed.
5109 */
5110 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5111 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5112 (caddr_t)¶m_arr[param_anar_10gfdx]);
5113 return (err);
5114 }
5115 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5116 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5117 (caddr_t)¶m_arr[param_anar_pause]);
5118 return (err);
5119 }
5120
5121 return (ENOTSUP);
5122 }
5123
5124 static int
nxge_get_priv_prop(p_nxge_t nxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)5125 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
5126 void *pr_val)
5127 {
5128 p_nxge_param_t param_arr = nxgep->param_arr;
5129 char valstr[MAXNAMELEN];
5130 int err = ENOTSUP;
5131 uint_t strsize;
5132
5133 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5134 "==> nxge_get_priv_prop: property %s", pr_name));
5135
5136 /* function number */
5137 if (strcmp(pr_name, "_function_number") == 0) {
5138 (void) snprintf(valstr, sizeof (valstr), "%d",
5139 nxgep->function_num);
5140 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5141 "==> nxge_get_priv_prop: name %s "
5142 "(value %d valstr %s)",
5143 pr_name, nxgep->function_num, valstr));
5144
5145 err = 0;
5146 goto done;
5147 }
5148
5149 /* Neptune firmware version */
5150 if (strcmp(pr_name, "_fw_version") == 0) {
5151 (void) snprintf(valstr, sizeof (valstr), "%s",
5152 nxgep->vpd_info.ver);
5153 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5154 "==> nxge_get_priv_prop: name %s "
5155 "(value %d valstr %s)",
5156 pr_name, nxgep->vpd_info.ver, valstr));
5157
5158 err = 0;
5159 goto done;
5160 }
5161
5162 /* port PHY mode */
5163 if (strcmp(pr_name, "_port_mode") == 0) {
5164 switch (nxgep->mac.portmode) {
5165 case PORT_1G_COPPER:
5166 (void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5167 nxgep->hot_swappable_phy ?
5168 "[Hot Swappable]" : "");
5169 break;
5170 case PORT_1G_FIBER:
5171 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5172 nxgep->hot_swappable_phy ?
5173 "[hot swappable]" : "");
5174 break;
5175 case PORT_10G_COPPER:
5176 (void) snprintf(valstr, sizeof (valstr),
5177 "10G copper %s",
5178 nxgep->hot_swappable_phy ?
5179 "[hot swappable]" : "");
5180 break;
5181 case PORT_10G_FIBER:
5182 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5183 nxgep->hot_swappable_phy ?
5184 "[hot swappable]" : "");
5185 break;
5186 case PORT_10G_SERDES:
5187 (void) snprintf(valstr, sizeof (valstr),
5188 "10G serdes %s", nxgep->hot_swappable_phy ?
5189 "[hot swappable]" : "");
5190 break;
5191 case PORT_1G_SERDES:
5192 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5193 nxgep->hot_swappable_phy ?
5194 "[hot swappable]" : "");
5195 break;
5196 case PORT_1G_TN1010:
5197 (void) snprintf(valstr, sizeof (valstr),
5198 "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5199 "[hot swappable]" : "");
5200 break;
5201 case PORT_10G_TN1010:
5202 (void) snprintf(valstr, sizeof (valstr),
5203 "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5204 "[hot swappable]" : "");
5205 break;
5206 case PORT_1G_RGMII_FIBER:
5207 (void) snprintf(valstr, sizeof (valstr),
5208 "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5209 "[hot swappable]" : "");
5210 break;
5211 case PORT_HSP_MODE:
5212 (void) snprintf(valstr, sizeof (valstr),
5213 "phy not present[hot swappable]");
5214 break;
5215 default:
5216 (void) snprintf(valstr, sizeof (valstr), "unknown %s",
5217 nxgep->hot_swappable_phy ?
5218 "[hot swappable]" : "");
5219 break;
5220 }
5221
5222 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5223 "==> nxge_get_priv_prop: name %s (value %s)",
5224 pr_name, valstr));
5225
5226 err = 0;
5227 goto done;
5228 }
5229
5230 /* Hot swappable PHY */
5231 if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5232 (void) snprintf(valstr, sizeof (valstr), "%s",
5233 nxgep->hot_swappable_phy ?
5234 "yes" : "no");
5235
5236 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5237 "==> nxge_get_priv_prop: name %s "
5238 "(value %d valstr %s)",
5239 pr_name, nxgep->hot_swappable_phy, valstr));
5240
5241 err = 0;
5242 goto done;
5243 }
5244
5245
5246 /* Receive Interrupt Blanking Parameters */
5247 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5248 err = 0;
5249 (void) snprintf(valstr, sizeof (valstr), "%d",
5250 nxgep->intr_timeout);
5251 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5252 "==> nxge_get_priv_prop: name %s (value %d)",
5253 pr_name,
5254 (uint32_t)nxgep->intr_timeout));
5255 goto done;
5256 }
5257
5258 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5259 err = 0;
5260 (void) snprintf(valstr, sizeof (valstr), "%d",
5261 nxgep->intr_threshold);
5262 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5263 "==> nxge_get_priv_prop: name %s (value %d)",
5264 pr_name, (uint32_t)nxgep->intr_threshold));
5265
5266 goto done;
5267 }
5268
5269 /* Classification and Load Distribution Configuration */
5270 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5271 err = nxge_dld_get_ip_opt(nxgep,
5272 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
5273
5274 (void) snprintf(valstr, sizeof (valstr), "%x",
5275 (int)param_arr[param_class_opt_ipv4_tcp].value);
5276
5277 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5278 "==> nxge_get_priv_prop: %s", valstr));
5279 goto done;
5280 }
5281
5282 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5283 err = nxge_dld_get_ip_opt(nxgep,
5284 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
5285
5286 (void) snprintf(valstr, sizeof (valstr), "%x",
5287 (int)param_arr[param_class_opt_ipv4_udp].value);
5288
5289 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5290 "==> nxge_get_priv_prop: %s", valstr));
5291 goto done;
5292 }
5293 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5294 err = nxge_dld_get_ip_opt(nxgep,
5295 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
5296
5297 (void) snprintf(valstr, sizeof (valstr), "%x",
5298 (int)param_arr[param_class_opt_ipv4_ah].value);
5299
5300 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5301 "==> nxge_get_priv_prop: %s", valstr));
5302 goto done;
5303 }
5304
5305 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5306 err = nxge_dld_get_ip_opt(nxgep,
5307 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5308
5309 (void) snprintf(valstr, sizeof (valstr), "%x",
5310 (int)param_arr[param_class_opt_ipv4_sctp].value);
5311
5312 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5313 "==> nxge_get_priv_prop: %s", valstr));
5314 goto done;
5315 }
5316
5317 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5318 err = nxge_dld_get_ip_opt(nxgep,
5319 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5320
5321 (void) snprintf(valstr, sizeof (valstr), "%x",
5322 (int)param_arr[param_class_opt_ipv6_tcp].value);
5323
5324 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5325 "==> nxge_get_priv_prop: %s", valstr));
5326 goto done;
5327 }
5328
5329 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5330 err = nxge_dld_get_ip_opt(nxgep,
5331 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5332
5333 (void) snprintf(valstr, sizeof (valstr), "%x",
5334 (int)param_arr[param_class_opt_ipv6_udp].value);
5335
5336 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5337 "==> nxge_get_priv_prop: %s", valstr));
5338 goto done;
5339 }
5340
5341 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5342 err = nxge_dld_get_ip_opt(nxgep,
5343 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5344
5345 (void) snprintf(valstr, sizeof (valstr), "%x",
5346 (int)param_arr[param_class_opt_ipv6_ah].value);
5347
5348 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5349 "==> nxge_get_priv_prop: %s", valstr));
5350 goto done;
5351 }
5352
5353 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5354 err = nxge_dld_get_ip_opt(nxgep,
5355 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5356
5357 (void) snprintf(valstr, sizeof (valstr), "%x",
5358 (int)param_arr[param_class_opt_ipv6_sctp].value);
5359
5360 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5361 "==> nxge_get_priv_prop: %s", valstr));
5362 goto done;
5363 }
5364
5365 /* Software LSO */
5366 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5367 (void) snprintf(valstr, sizeof (valstr),
5368 "%d", nxgep->soft_lso_enable);
5369 err = 0;
5370 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5371 "==> nxge_get_priv_prop: name %s (value %d)",
5372 pr_name, nxgep->soft_lso_enable));
5373
5374 goto done;
5375 }
5376 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5377 err = 0;
5378 if (nxgep->param_arr[param_anar_10gfdx].value != 0) {
5379 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5380 goto done;
5381 } else {
5382 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5383 goto done;
5384 }
5385 }
5386 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5387 err = 0;
5388 if (nxgep->param_arr[param_anar_pause].value != 0) {
5389 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5390 goto done;
5391 } else {
5392 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5393 goto done;
5394 }
5395 }
5396
5397 done:
5398 if (err == 0) {
5399 strsize = (uint_t)strlen(valstr);
5400 if (pr_valsize < strsize) {
5401 err = ENOBUFS;
5402 } else {
5403 (void) strlcpy(pr_val, valstr, pr_valsize);
5404 }
5405 }
5406
5407 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5408 "<== nxge_get_priv_prop: return %d", err));
5409 return (err);
5410 }
5411
5412 /*
5413 * Module loading and removing entry points.
5414 */
5415
5416 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5417 nodev, NULL, D_MP, NULL, nxge_quiesce);
5418
5419 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet"
5420
5421 /*
5422 * Module linkage information for the kernel.
5423 */
5424 static struct modldrv nxge_modldrv = {
5425 &mod_driverops,
5426 NXGE_DESC_VER,
5427 &nxge_dev_ops
5428 };
5429
5430 static struct modlinkage modlinkage = {
5431 MODREV_1, (void *) &nxge_modldrv, NULL
5432 };
5433
5434 int
_init(void)5435 _init(void)
5436 {
5437 int status;
5438
5439 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
5440
5441 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5442
5443 mac_init_ops(&nxge_dev_ops, "nxge");
5444
5445 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5446 if (status != 0) {
5447 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5448 "failed to init device soft state"));
5449 goto _init_exit;
5450 }
5451
5452 status = mod_install(&modlinkage);
5453 if (status != 0) {
5454 ddi_soft_state_fini(&nxge_list);
5455 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5456 goto _init_exit;
5457 }
5458
5459 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5460
5461 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5462 return (status);
5463
5464 _init_exit:
5465 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5466 MUTEX_DESTROY(&nxgedebuglock);
5467 return (status);
5468 }
5469
5470 int
_fini(void)5471 _fini(void)
5472 {
5473 int status;
5474
5475 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5476 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5477
5478 if (nxge_mblks_pending)
5479 return (EBUSY);
5480
5481 status = mod_remove(&modlinkage);
5482 if (status != DDI_SUCCESS) {
5483 NXGE_DEBUG_MSG((NULL, MOD_CTL,
5484 "Module removal failed 0x%08x",
5485 status));
5486 goto _fini_exit;
5487 }
5488
5489 mac_fini_ops(&nxge_dev_ops);
5490
5491 ddi_soft_state_fini(&nxge_list);
5492
5493 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5494
5495 MUTEX_DESTROY(&nxge_common_lock);
5496 MUTEX_DESTROY(&nxgedebuglock);
5497 return (status);
5498
5499 _fini_exit:
5500 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5501 return (status);
5502 }
5503
5504 int
_info(struct modinfo * modinfop)5505 _info(struct modinfo *modinfop)
5506 {
5507 int status;
5508
5509 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5510 status = mod_info(&modlinkage, modinfop);
5511 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5512
5513 return (status);
5514 }
5515
5516 /*ARGSUSED*/
5517 static int
nxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)5518 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5519 {
5520 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5521 p_nxge_t nxgep = rhp->nxgep;
5522 uint32_t channel;
5523 p_tx_ring_t ring;
5524
5525 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5526 ring = nxgep->tx_rings->rings[channel];
5527
5528 MUTEX_ENTER(&ring->lock);
5529 ASSERT(ring->tx_ring_handle == NULL);
5530 ring->tx_ring_handle = rhp->ring_handle;
5531 MUTEX_EXIT(&ring->lock);
5532
5533 return (0);
5534 }
5535
5536 static void
nxge_tx_ring_stop(mac_ring_driver_t rdriver)5537 nxge_tx_ring_stop(mac_ring_driver_t rdriver)
5538 {
5539 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5540 p_nxge_t nxgep = rhp->nxgep;
5541 uint32_t channel;
5542 p_tx_ring_t ring;
5543
5544 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5545 ring = nxgep->tx_rings->rings[channel];
5546
5547 MUTEX_ENTER(&ring->lock);
5548 ASSERT(ring->tx_ring_handle != NULL);
5549 ring->tx_ring_handle = (mac_ring_handle_t)NULL;
5550 MUTEX_EXIT(&ring->lock);
5551 }
5552
5553 int
nxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)5554 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5555 {
5556 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5557 p_nxge_t nxgep = rhp->nxgep;
5558 uint32_t channel;
5559 p_rx_rcr_ring_t ring;
5560 int i;
5561
5562 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5563 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5564
5565 MUTEX_ENTER(&ring->lock);
5566
5567 if (ring->started) {
5568 ASSERT(ring->started == B_FALSE);
5569 MUTEX_EXIT(&ring->lock);
5570 return (0);
5571 }
5572
5573 /* set rcr_ring */
5574 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5575 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5576 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5577 ring->ldvp = &nxgep->ldgvp->ldvp[i];
5578 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp;
5579 }
5580 }
5581
5582 ring->rcr_mac_handle = rhp->ring_handle;
5583 ring->rcr_gen_num = mr_gen_num;
5584 ring->started = B_TRUE;
5585 rhp->ring_gen_num = mr_gen_num;
5586 MUTEX_EXIT(&ring->lock);
5587
5588 return (0);
5589 }
5590
5591 static void
nxge_rx_ring_stop(mac_ring_driver_t rdriver)5592 nxge_rx_ring_stop(mac_ring_driver_t rdriver)
5593 {
5594 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5595 p_nxge_t nxgep = rhp->nxgep;
5596 uint32_t channel;
5597 p_rx_rcr_ring_t ring;
5598
5599 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5600 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5601
5602 MUTEX_ENTER(&ring->lock);
5603 ASSERT(ring->started == B_TRUE);
5604 ring->rcr_mac_handle = NULL;
5605 ring->ldvp = NULL;
5606 ring->ldgp = NULL;
5607 ring->started = B_FALSE;
5608 MUTEX_EXIT(&ring->lock);
5609 }
5610
5611 static int
nxge_ring_get_htable_idx(p_nxge_t nxgep,mac_ring_type_t type,uint32_t channel)5612 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel)
5613 {
5614 int i;
5615
5616 #if defined(sun4v)
5617 if (isLDOMguest(nxgep)) {
5618 return (nxge_hio_get_dc_htable_idx(nxgep,
5619 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX,
5620 channel));
5621 }
5622 #endif
5623
5624 ASSERT(nxgep->ldgvp != NULL);
5625
5626 switch (type) {
5627 case MAC_RING_TYPE_TX:
5628 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5629 if ((nxgep->ldgvp->ldvp[i].is_txdma) &&
5630 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5631 return ((int)
5632 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5633 }
5634 }
5635 break;
5636
5637 case MAC_RING_TYPE_RX:
5638 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5639 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5640 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5641 return ((int)
5642 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5643 }
5644 }
5645 }
5646
5647 return (-1);
5648 }
5649
5650 /*
5651 * Callback funtion for MAC layer to register all rings.
5652 */
5653 static void
nxge_fill_ring(void * arg,mac_ring_type_t rtype,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)5654 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
5655 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5656 {
5657 p_nxge_t nxgep = (p_nxge_t)arg;
5658 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
5659 p_nxge_intr_t intrp;
5660 uint32_t channel;
5661 int htable_idx;
5662 p_nxge_ring_handle_t rhandlep;
5663
5664 ASSERT(nxgep != NULL);
5665 ASSERT(p_cfgp != NULL);
5666 ASSERT(infop != NULL);
5667
5668 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5669 "==> nxge_fill_ring 0x%x index %d", rtype, index));
5670
5671
5672 switch (rtype) {
5673 case MAC_RING_TYPE_TX: {
5674 mac_intr_t *mintr = &infop->mri_intr;
5675
5676 NXGE_DEBUG_MSG((nxgep, TX_CTL,
5677 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
5678 rtype, index, p_cfgp->tdc.count));
5679
5680 ASSERT((index >= 0) && (index < p_cfgp->tdc.count));
5681 rhandlep = &nxgep->tx_ring_handles[index];
5682 rhandlep->nxgep = nxgep;
5683 rhandlep->index = index;
5684 rhandlep->ring_handle = rh;
5685
5686 channel = nxgep->pt_config.hw_config.tdc.start + index;
5687 rhandlep->channel = channel;
5688 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5689 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5690 channel);
5691 if (htable_idx >= 0)
5692 mintr->mi_ddi_handle = intrp->htable[htable_idx];
5693 else
5694 mintr->mi_ddi_handle = NULL;
5695
5696 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5697 infop->mri_start = nxge_tx_ring_start;
5698 infop->mri_stop = nxge_tx_ring_stop;
5699 infop->mri_tx = nxge_tx_ring_send;
5700 infop->mri_stat = nxge_tx_ring_stat;
5701 infop->mri_flags = MAC_RING_TX_SERIALIZE;
5702 break;
5703 }
5704
5705 case MAC_RING_TYPE_RX: {
5706 mac_intr_t nxge_mac_intr;
5707 int nxge_rindex;
5708 p_nxge_intr_t intrp;
5709
5710 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5711
5712 NXGE_DEBUG_MSG((nxgep, RX_CTL,
5713 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
5714 rtype, index, p_cfgp->max_rdcs));
5715
5716 /*
5717 * 'index' is the ring index within the group.
5718 * Find the ring index in the nxge instance.
5719 */
5720 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index);
5721 channel = nxgep->pt_config.hw_config.start_rdc + index;
5722 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5723
5724 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs));
5725 rhandlep = &nxgep->rx_ring_handles[nxge_rindex];
5726 rhandlep->nxgep = nxgep;
5727 rhandlep->index = nxge_rindex;
5728 rhandlep->ring_handle = rh;
5729 rhandlep->channel = channel;
5730
5731 /*
5732 * Entrypoint to enable interrupt (disable poll) and
5733 * disable interrupt (enable poll).
5734 */
5735 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr));
5736 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep;
5737 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll;
5738 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll;
5739
5740 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5741 channel);
5742 if (htable_idx >= 0)
5743 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
5744 else
5745 nxge_mac_intr.mi_ddi_handle = NULL;
5746
5747 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5748 infop->mri_start = nxge_rx_ring_start;
5749 infop->mri_stop = nxge_rx_ring_stop;
5750 infop->mri_intr = nxge_mac_intr;
5751 infop->mri_poll = nxge_rx_poll;
5752 infop->mri_stat = nxge_rx_ring_stat;
5753 infop->mri_flags = MAC_RING_RX_ENQUEUE;
5754 break;
5755 }
5756
5757 default:
5758 break;
5759 }
5760
5761 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype));
5762 }
5763
5764 static void
nxge_group_add_ring(mac_group_driver_t gh,mac_ring_driver_t rh,mac_ring_type_t type)5765 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5766 mac_ring_type_t type)
5767 {
5768 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5769 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5770 nxge_t *nxge;
5771 nxge_grp_t *grp;
5772 nxge_rdc_grp_t *rdc_grp;
5773 uint16_t channel; /* device-wise ring id */
5774 int dev_gindex;
5775 int rv;
5776
5777 nxge = rgroup->nxgep;
5778
5779 switch (type) {
5780 case MAC_RING_TYPE_TX:
5781 /*
5782 * nxge_grp_dc_add takes a channel number which is a
5783 * "devise" ring ID.
5784 */
5785 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5786
5787 /*
5788 * Remove the ring from the default group
5789 */
5790 if (rgroup->gindex != 0) {
5791 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5792 }
5793
5794 /*
5795 * nxge->tx_set.group[] is an array of groups indexed by
5796 * a "port" group ID.
5797 */
5798 grp = nxge->tx_set.group[rgroup->gindex];
5799 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5800 if (rv != 0) {
5801 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5802 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5803 }
5804 break;
5805
5806 case MAC_RING_TYPE_RX:
5807 /*
5808 * nxge->rx_set.group[] is an array of groups indexed by
5809 * a "port" group ID.
5810 */
5811 grp = nxge->rx_set.group[rgroup->gindex];
5812
5813 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5814 rgroup->gindex;
5815 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5816
5817 /*
5818 * nxge_grp_dc_add takes a channel number which is a
5819 * "devise" ring ID.
5820 */
5821 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index;
5822 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel);
5823 if (rv != 0) {
5824 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5825 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5826 }
5827
5828 rdc_grp->map |= (1 << channel);
5829 rdc_grp->max_rdcs++;
5830
5831 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5832 break;
5833 }
5834 }
5835
5836 static void
nxge_group_rem_ring(mac_group_driver_t gh,mac_ring_driver_t rh,mac_ring_type_t type)5837 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5838 mac_ring_type_t type)
5839 {
5840 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5841 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5842 nxge_t *nxge;
5843 uint16_t channel; /* device-wise ring id */
5844 nxge_rdc_grp_t *rdc_grp;
5845 int dev_gindex;
5846
5847 nxge = rgroup->nxgep;
5848
5849 switch (type) {
5850 case MAC_RING_TYPE_TX:
5851 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid +
5852 rgroup->gindex;
5853 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5854 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5855
5856 /*
5857 * Add the ring back to the default group
5858 */
5859 if (rgroup->gindex != 0) {
5860 nxge_grp_t *grp;
5861 grp = nxge->tx_set.group[0];
5862 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5863 }
5864 break;
5865
5866 case MAC_RING_TYPE_RX:
5867 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5868 rgroup->gindex;
5869 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5870 channel = rdc_grp->start_rdc + rhandle->index;
5871 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
5872
5873 rdc_grp->map &= ~(1 << channel);
5874 rdc_grp->max_rdcs--;
5875
5876 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5877 break;
5878 }
5879 }
5880
5881
5882 /*ARGSUSED*/
5883 static nxge_status_t
nxge_add_intrs(p_nxge_t nxgep)5884 nxge_add_intrs(p_nxge_t nxgep)
5885 {
5886
5887 int intr_types;
5888 int type = 0;
5889 int ddi_status = DDI_SUCCESS;
5890 nxge_status_t status = NXGE_OK;
5891
5892 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5893
5894 nxgep->nxge_intr_type.intr_registered = B_FALSE;
5895 nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5896 nxgep->nxge_intr_type.msi_intx_cnt = 0;
5897 nxgep->nxge_intr_type.intr_added = 0;
5898 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5899 nxgep->nxge_intr_type.intr_type = 0;
5900
5901 if (nxgep->niu_type == N2_NIU) {
5902 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5903 } else if (nxge_msi_enable) {
5904 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5905 }
5906
5907 /* Get the supported interrupt types */
5908 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5909 != DDI_SUCCESS) {
5910 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5911 "ddi_intr_get_supported_types failed: status 0x%08x",
5912 ddi_status));
5913 return (NXGE_ERROR | NXGE_DDI_FAILED);
5914 }
5915 nxgep->nxge_intr_type.intr_types = intr_types;
5916
5917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5918 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5919
5920 /*
5921 * Solaris MSIX is not supported yet. use MSI for now.
5922 * nxge_msi_enable (1):
5923 * 1 - MSI 2 - MSI-X others - FIXED
5924 */
5925 switch (nxge_msi_enable) {
5926 default:
5927 type = DDI_INTR_TYPE_FIXED;
5928 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5929 "use fixed (intx emulation) type %08x",
5930 type));
5931 break;
5932
5933 case 2:
5934 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5935 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5936 if (intr_types & DDI_INTR_TYPE_MSIX) {
5937 type = DDI_INTR_TYPE_MSIX;
5938 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5939 "ddi_intr_get_supported_types: MSIX 0x%08x",
5940 type));
5941 } else if (intr_types & DDI_INTR_TYPE_MSI) {
5942 type = DDI_INTR_TYPE_MSI;
5943 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5944 "ddi_intr_get_supported_types: MSI 0x%08x",
5945 type));
5946 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5947 type = DDI_INTR_TYPE_FIXED;
5948 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5949 "ddi_intr_get_supported_types: MSXED0x%08x",
5950 type));
5951 }
5952 break;
5953
5954 case 1:
5955 if (intr_types & DDI_INTR_TYPE_MSI) {
5956 type = DDI_INTR_TYPE_MSI;
5957 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5958 "ddi_intr_get_supported_types: MSI 0x%08x",
5959 type));
5960 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
5961 type = DDI_INTR_TYPE_MSIX;
5962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5963 "ddi_intr_get_supported_types: MSIX 0x%08x",
5964 type));
5965 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5966 type = DDI_INTR_TYPE_FIXED;
5967 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5968 "ddi_intr_get_supported_types: MSXED0x%08x",
5969 type));
5970 }
5971 }
5972
5973 nxgep->nxge_intr_type.intr_type = type;
5974 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5975 type == DDI_INTR_TYPE_FIXED) &&
5976 nxgep->nxge_intr_type.niu_msi_enable) {
5977 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5978 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5979 " nxge_add_intrs: "
5980 " nxge_add_intrs_adv failed: status 0x%08x",
5981 status));
5982 return (status);
5983 } else {
5984 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5985 "interrupts registered : type %d", type));
5986 nxgep->nxge_intr_type.intr_registered = B_TRUE;
5987
5988 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5989 "\nAdded advanced nxge add_intr_adv "
5990 "intr type 0x%x\n", type));
5991
5992 return (status);
5993 }
5994 }
5995
5996 if (!nxgep->nxge_intr_type.intr_registered) {
5997 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
5998 "failed to register interrupts"));
5999 return (NXGE_ERROR | NXGE_DDI_FAILED);
6000 }
6001
6002 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
6003 return (status);
6004 }
6005
6006 static nxge_status_t
nxge_add_intrs_adv(p_nxge_t nxgep)6007 nxge_add_intrs_adv(p_nxge_t nxgep)
6008 {
6009 int intr_type;
6010 p_nxge_intr_t intrp;
6011
6012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
6013
6014 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6015 intr_type = intrp->intr_type;
6016 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
6017 intr_type));
6018
6019 switch (intr_type) {
6020 case DDI_INTR_TYPE_MSI: /* 0x2 */
6021 case DDI_INTR_TYPE_MSIX: /* 0x4 */
6022 return (nxge_add_intrs_adv_type(nxgep, intr_type));
6023
6024 case DDI_INTR_TYPE_FIXED: /* 0x1 */
6025 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
6026
6027 default:
6028 return (NXGE_ERROR);
6029 }
6030 }
6031
6032
6033 /*ARGSUSED*/
6034 static nxge_status_t
nxge_add_intrs_adv_type(p_nxge_t nxgep,uint32_t int_type)6035 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
6036 {
6037 dev_info_t *dip = nxgep->dip;
6038 p_nxge_ldg_t ldgp;
6039 p_nxge_intr_t intrp;
6040 ddi_intr_handler_t *inthandler;
6041 void *arg1, *arg2;
6042 int behavior;
6043 int nintrs, navail, nrequest;
6044 int nactual, nrequired;
6045 int inum = 0;
6046 int x, y;
6047 int ddi_status = DDI_SUCCESS;
6048 nxge_status_t status = NXGE_OK;
6049
6050 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
6051 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6052 intrp->start_inum = 0;
6053
6054 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6055 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6056 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6057 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6058 "nintrs: %d", ddi_status, nintrs));
6059 return (NXGE_ERROR | NXGE_DDI_FAILED);
6060 }
6061
6062 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6063 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6064 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6065 "ddi_intr_get_navail() failed, status: 0x%x%, "
6066 "nintrs: %d", ddi_status, navail));
6067 return (NXGE_ERROR | NXGE_DDI_FAILED);
6068 }
6069
6070 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6071 "ddi_intr_get_navail() returned: nintrs %d, navail %d",
6072 nintrs, navail));
6073
6074 /* PSARC/2007/453 MSI-X interrupt limit override */
6075 if (int_type == DDI_INTR_TYPE_MSIX) {
6076 nrequest = nxge_create_msi_property(nxgep);
6077 if (nrequest < navail) {
6078 navail = nrequest;
6079 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6080 "nxge_add_intrs_adv_type: nintrs %d "
6081 "navail %d (nrequest %d)",
6082 nintrs, navail, nrequest));
6083 }
6084 }
6085
6086 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
6087 /* MSI must be power of 2 */
6088 if ((navail & 16) == 16) {
6089 navail = 16;
6090 } else if ((navail & 8) == 8) {
6091 navail = 8;
6092 } else if ((navail & 4) == 4) {
6093 navail = 4;
6094 } else if ((navail & 2) == 2) {
6095 navail = 2;
6096 } else {
6097 navail = 1;
6098 }
6099 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6100 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
6101 "navail %d", nintrs, navail));
6102 }
6103
6104 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6105 DDI_INTR_ALLOC_NORMAL);
6106 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6107 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6108 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6109 navail, &nactual, behavior);
6110 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6111 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6112 " ddi_intr_alloc() failed: %d",
6113 ddi_status));
6114 kmem_free(intrp->htable, intrp->intr_size);
6115 return (NXGE_ERROR | NXGE_DDI_FAILED);
6116 }
6117
6118 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6119 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6121 " ddi_intr_get_pri() failed: %d",
6122 ddi_status));
6123 /* Free already allocated interrupts */
6124 for (y = 0; y < nactual; y++) {
6125 (void) ddi_intr_free(intrp->htable[y]);
6126 }
6127
6128 kmem_free(intrp->htable, intrp->intr_size);
6129 return (NXGE_ERROR | NXGE_DDI_FAILED);
6130 }
6131
6132 nrequired = 0;
6133 switch (nxgep->niu_type) {
6134 default:
6135 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6136 break;
6137
6138 case N2_NIU:
6139 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6140 break;
6141 }
6142
6143 if (status != NXGE_OK) {
6144 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6145 "nxge_add_intrs_adv_typ:nxge_ldgv_init "
6146 "failed: 0x%x", status));
6147 /* Free already allocated interrupts */
6148 for (y = 0; y < nactual; y++) {
6149 (void) ddi_intr_free(intrp->htable[y]);
6150 }
6151
6152 kmem_free(intrp->htable, intrp->intr_size);
6153 return (status);
6154 }
6155
6156 ldgp = nxgep->ldgvp->ldgp;
6157 for (x = 0; x < nrequired; x++, ldgp++) {
6158 ldgp->vector = (uint8_t)x;
6159 ldgp->intdata = SID_DATA(ldgp->func, x);
6160 arg1 = ldgp->ldvp;
6161 arg2 = nxgep;
6162 if (ldgp->nldvs == 1) {
6163 inthandler = ldgp->ldvp->ldv_intr_handler;
6164 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6165 "nxge_add_intrs_adv_type: "
6166 "arg1 0x%x arg2 0x%x: "
6167 "1-1 int handler (entry %d intdata 0x%x)\n",
6168 arg1, arg2,
6169 x, ldgp->intdata));
6170 } else if (ldgp->nldvs > 1) {
6171 inthandler = ldgp->sys_intr_handler;
6172 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6173 "nxge_add_intrs_adv_type: "
6174 "arg1 0x%x arg2 0x%x: "
6175 "nldevs %d int handler "
6176 "(entry %d intdata 0x%x)\n",
6177 arg1, arg2,
6178 ldgp->nldvs, x, ldgp->intdata));
6179 } else {
6180 inthandler = NULL;
6181 }
6182
6183 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6184 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
6185 "htable 0x%llx", x, intrp->htable[x]));
6186
6187 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6188 inthandler, arg1, arg2)) != DDI_SUCCESS) {
6189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6190 "==> nxge_add_intrs_adv_type: failed #%d "
6191 "status 0x%x", x, ddi_status));
6192 for (y = 0; y < intrp->intr_added; y++) {
6193 (void) ddi_intr_remove_handler(
6194 intrp->htable[y]);
6195 }
6196 /* Free already allocated intr */
6197 for (y = 0; y < nactual; y++) {
6198 (void) ddi_intr_free(intrp->htable[y]);
6199 }
6200 kmem_free(intrp->htable, intrp->intr_size);
6201
6202 (void) nxge_ldgv_uninit(nxgep);
6203
6204 return (NXGE_ERROR | NXGE_DDI_FAILED);
6205 }
6206
6207 ldgp->htable_idx = x;
6208 intrp->intr_added++;
6209 }
6210
6211 intrp->msi_intx_cnt = nactual;
6212
6213 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6214 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6215 navail, nactual,
6216 intrp->msi_intx_cnt,
6217 intrp->intr_added));
6218
6219 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6220
6221 (void) nxge_intr_ldgv_init(nxgep);
6222
6223 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6224
6225 return (status);
6226 }
6227
6228 /*ARGSUSED*/
6229 static nxge_status_t
nxge_add_intrs_adv_type_fix(p_nxge_t nxgep,uint32_t int_type)6230 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6231 {
6232 dev_info_t *dip = nxgep->dip;
6233 p_nxge_ldg_t ldgp;
6234 p_nxge_intr_t intrp;
6235 ddi_intr_handler_t *inthandler;
6236 void *arg1, *arg2;
6237 int behavior;
6238 int nintrs, navail;
6239 int nactual, nrequired;
6240 int inum = 0;
6241 int x, y;
6242 int ddi_status = DDI_SUCCESS;
6243 nxge_status_t status = NXGE_OK;
6244
6245 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6246 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6247 intrp->start_inum = 0;
6248
6249 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6250 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6251 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6252 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6253 "nintrs: %d", status, nintrs));
6254 return (NXGE_ERROR | NXGE_DDI_FAILED);
6255 }
6256
6257 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6258 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6260 "ddi_intr_get_navail() failed, status: 0x%x%, "
6261 "nintrs: %d", ddi_status, navail));
6262 return (NXGE_ERROR | NXGE_DDI_FAILED);
6263 }
6264
6265 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6266 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6267 nintrs, navail));
6268
6269 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6270 DDI_INTR_ALLOC_NORMAL);
6271 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6272 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6273 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6274 navail, &nactual, behavior);
6275 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6276 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6277 " ddi_intr_alloc() failed: %d",
6278 ddi_status));
6279 kmem_free(intrp->htable, intrp->intr_size);
6280 return (NXGE_ERROR | NXGE_DDI_FAILED);
6281 }
6282
6283 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6284 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6285 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6286 " ddi_intr_get_pri() failed: %d",
6287 ddi_status));
6288 /* Free already allocated interrupts */
6289 for (y = 0; y < nactual; y++) {
6290 (void) ddi_intr_free(intrp->htable[y]);
6291 }
6292
6293 kmem_free(intrp->htable, intrp->intr_size);
6294 return (NXGE_ERROR | NXGE_DDI_FAILED);
6295 }
6296
6297 nrequired = 0;
6298 switch (nxgep->niu_type) {
6299 default:
6300 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6301 break;
6302
6303 case N2_NIU:
6304 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6305 break;
6306 }
6307
6308 if (status != NXGE_OK) {
6309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6310 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6311 "failed: 0x%x", status));
6312 /* Free already allocated interrupts */
6313 for (y = 0; y < nactual; y++) {
6314 (void) ddi_intr_free(intrp->htable[y]);
6315 }
6316
6317 kmem_free(intrp->htable, intrp->intr_size);
6318 return (status);
6319 }
6320
6321 ldgp = nxgep->ldgvp->ldgp;
6322 for (x = 0; x < nrequired; x++, ldgp++) {
6323 ldgp->vector = (uint8_t)x;
6324 if (nxgep->niu_type != N2_NIU) {
6325 ldgp->intdata = SID_DATA(ldgp->func, x);
6326 }
6327
6328 arg1 = ldgp->ldvp;
6329 arg2 = nxgep;
6330 if (ldgp->nldvs == 1) {
6331 inthandler = ldgp->ldvp->ldv_intr_handler;
6332 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6333 "nxge_add_intrs_adv_type_fix: "
6334 "1-1 int handler(%d) ldg %d ldv %d "
6335 "arg1 $%p arg2 $%p\n",
6336 x, ldgp->ldg, ldgp->ldvp->ldv,
6337 arg1, arg2));
6338 } else if (ldgp->nldvs > 1) {
6339 inthandler = ldgp->sys_intr_handler;
6340 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6341 "nxge_add_intrs_adv_type_fix: "
6342 "shared ldv %d int handler(%d) ldv %d ldg %d"
6343 "arg1 0x%016llx arg2 0x%016llx\n",
6344 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6345 arg1, arg2));
6346 } else {
6347 inthandler = NULL;
6348 }
6349
6350 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6351 inthandler, arg1, arg2)) != DDI_SUCCESS) {
6352 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6353 "==> nxge_add_intrs_adv_type_fix: failed #%d "
6354 "status 0x%x", x, ddi_status));
6355 for (y = 0; y < intrp->intr_added; y++) {
6356 (void) ddi_intr_remove_handler(
6357 intrp->htable[y]);
6358 }
6359 for (y = 0; y < nactual; y++) {
6360 (void) ddi_intr_free(intrp->htable[y]);
6361 }
6362 /* Free already allocated intr */
6363 kmem_free(intrp->htable, intrp->intr_size);
6364
6365 (void) nxge_ldgv_uninit(nxgep);
6366
6367 return (NXGE_ERROR | NXGE_DDI_FAILED);
6368 }
6369
6370 ldgp->htable_idx = x;
6371 intrp->intr_added++;
6372 }
6373
6374 intrp->msi_intx_cnt = nactual;
6375
6376 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6377
6378 status = nxge_intr_ldgv_init(nxgep);
6379 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6380
6381 return (status);
6382 }
6383
6384 static void
nxge_remove_intrs(p_nxge_t nxgep)6385 nxge_remove_intrs(p_nxge_t nxgep)
6386 {
6387 int i, inum;
6388 p_nxge_intr_t intrp;
6389
6390 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6391 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6392 if (!intrp->intr_registered) {
6393 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6394 "<== nxge_remove_intrs: interrupts not registered"));
6395 return;
6396 }
6397
6398 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6399
6400 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6401 (void) ddi_intr_block_disable(intrp->htable,
6402 intrp->intr_added);
6403 } else {
6404 for (i = 0; i < intrp->intr_added; i++) {
6405 (void) ddi_intr_disable(intrp->htable[i]);
6406 }
6407 }
6408
6409 for (inum = 0; inum < intrp->intr_added; inum++) {
6410 if (intrp->htable[inum]) {
6411 (void) ddi_intr_remove_handler(intrp->htable[inum]);
6412 }
6413 }
6414
6415 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6416 if (intrp->htable[inum]) {
6417 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6418 "nxge_remove_intrs: ddi_intr_free inum %d "
6419 "msi_intx_cnt %d intr_added %d",
6420 inum,
6421 intrp->msi_intx_cnt,
6422 intrp->intr_added));
6423
6424 (void) ddi_intr_free(intrp->htable[inum]);
6425 }
6426 }
6427
6428 kmem_free(intrp->htable, intrp->intr_size);
6429 intrp->intr_registered = B_FALSE;
6430 intrp->intr_enabled = B_FALSE;
6431 intrp->msi_intx_cnt = 0;
6432 intrp->intr_added = 0;
6433
6434 (void) nxge_ldgv_uninit(nxgep);
6435
6436 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6437 "#msix-request");
6438
6439 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6440 }
6441
6442 /*ARGSUSED*/
6443 static void
nxge_intrs_enable(p_nxge_t nxgep)6444 nxge_intrs_enable(p_nxge_t nxgep)
6445 {
6446 p_nxge_intr_t intrp;
6447 int i;
6448 int status;
6449
6450 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6451
6452 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6453
6454 if (!intrp->intr_registered) {
6455 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6456 "interrupts are not registered"));
6457 return;
6458 }
6459
6460 if (intrp->intr_enabled) {
6461 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6462 "<== nxge_intrs_enable: already enabled"));
6463 return;
6464 }
6465
6466 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6467 status = ddi_intr_block_enable(intrp->htable,
6468 intrp->intr_added);
6469 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6470 "block enable - status 0x%x total inums #%d\n",
6471 status, intrp->intr_added));
6472 } else {
6473 for (i = 0; i < intrp->intr_added; i++) {
6474 status = ddi_intr_enable(intrp->htable[i]);
6475 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6476 "ddi_intr_enable:enable - status 0x%x "
6477 "total inums %d enable inum #%d\n",
6478 status, intrp->intr_added, i));
6479 if (status == DDI_SUCCESS) {
6480 intrp->intr_enabled = B_TRUE;
6481 }
6482 }
6483 }
6484
6485 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6486 }
6487
6488 /*ARGSUSED*/
6489 static void
nxge_intrs_disable(p_nxge_t nxgep)6490 nxge_intrs_disable(p_nxge_t nxgep)
6491 {
6492 p_nxge_intr_t intrp;
6493 int i;
6494
6495 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6496
6497 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6498
6499 if (!intrp->intr_registered) {
6500 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6501 "interrupts are not registered"));
6502 return;
6503 }
6504
6505 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6506 (void) ddi_intr_block_disable(intrp->htable,
6507 intrp->intr_added);
6508 } else {
6509 for (i = 0; i < intrp->intr_added; i++) {
6510 (void) ddi_intr_disable(intrp->htable[i]);
6511 }
6512 }
6513
6514 intrp->intr_enabled = B_FALSE;
6515 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6516 }
6517
6518 nxge_status_t
nxge_mac_register(p_nxge_t nxgep)6519 nxge_mac_register(p_nxge_t nxgep)
6520 {
6521 mac_register_t *macp;
6522 int status;
6523
6524 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6525
6526 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6527 return (NXGE_ERROR);
6528
6529 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6530 macp->m_driver = nxgep;
6531 macp->m_dip = nxgep->dip;
6532 if (!isLDOMguest(nxgep)) {
6533 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6534 } else {
6535 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6536 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6537 (void) memset(macp->m_src_addr, 0xff, MAXMACADDRLEN);
6538 }
6539 macp->m_callbacks = &nxge_m_callbacks;
6540 macp->m_min_sdu = 0;
6541 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6542 NXGE_EHEADER_VLAN_CRC;
6543 macp->m_max_sdu = nxgep->mac.default_mtu;
6544 macp->m_margin = VLAN_TAGSZ;
6545 macp->m_priv_props = nxge_priv_props;
6546 if (isLDOMguest(nxgep))
6547 macp->m_v12n = MAC_VIRT_LEVEL1;
6548 else
6549 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1;
6550
6551 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6552 "==> nxge_mac_register: instance %d "
6553 "max_sdu %d margin %d maxframe %d (header %d)",
6554 nxgep->instance,
6555 macp->m_max_sdu, macp->m_margin,
6556 nxgep->mac.maxframesize,
6557 NXGE_EHEADER_VLAN_CRC));
6558
6559 status = mac_register(macp, &nxgep->mach);
6560 if (isLDOMguest(nxgep)) {
6561 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN);
6562 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN);
6563 }
6564 mac_free(macp);
6565
6566 if (status != 0) {
6567 cmn_err(CE_WARN,
6568 "!nxge_mac_register failed (status %d instance %d)",
6569 status, nxgep->instance);
6570 return (NXGE_ERROR);
6571 }
6572
6573 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6574 "(instance %d)", nxgep->instance));
6575
6576 return (NXGE_OK);
6577 }
6578
6579 void
nxge_err_inject(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)6580 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6581 {
6582 ssize_t size;
6583 mblk_t *nmp;
6584 uint8_t blk_id;
6585 uint8_t chan;
6586 uint32_t err_id;
6587 err_inject_t *eip;
6588
6589 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6590
6591 size = 1024;
6592 nmp = mp->b_cont;
6593 eip = (err_inject_t *)nmp->b_rptr;
6594 blk_id = eip->blk_id;
6595 err_id = eip->err_id;
6596 chan = eip->chan;
6597 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6598 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6599 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6600 switch (blk_id) {
6601 case MAC_BLK_ID:
6602 break;
6603 case TXMAC_BLK_ID:
6604 break;
6605 case RXMAC_BLK_ID:
6606 break;
6607 case MIF_BLK_ID:
6608 break;
6609 case IPP_BLK_ID:
6610 nxge_ipp_inject_err(nxgep, err_id);
6611 break;
6612 case TXC_BLK_ID:
6613 nxge_txc_inject_err(nxgep, err_id);
6614 break;
6615 case TXDMA_BLK_ID:
6616 nxge_txdma_inject_err(nxgep, err_id, chan);
6617 break;
6618 case RXDMA_BLK_ID:
6619 nxge_rxdma_inject_err(nxgep, err_id, chan);
6620 break;
6621 case ZCP_BLK_ID:
6622 nxge_zcp_inject_err(nxgep, err_id);
6623 break;
6624 case ESPC_BLK_ID:
6625 break;
6626 case FFLP_BLK_ID:
6627 break;
6628 case PHY_BLK_ID:
6629 break;
6630 case ETHER_SERDES_BLK_ID:
6631 break;
6632 case PCIE_SERDES_BLK_ID:
6633 break;
6634 case VIR_BLK_ID:
6635 break;
6636 }
6637
6638 nmp->b_wptr = nmp->b_rptr + size;
6639 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6640
6641 miocack(wq, mp, (int)size, 0);
6642 }
6643
6644 static int
nxge_init_common_dev(p_nxge_t nxgep)6645 nxge_init_common_dev(p_nxge_t nxgep)
6646 {
6647 p_nxge_hw_list_t hw_p;
6648 dev_info_t *p_dip;
6649
6650 ASSERT(nxgep != NULL);
6651
6652 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6653
6654 p_dip = nxgep->p_dip;
6655 MUTEX_ENTER(&nxge_common_lock);
6656 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6657 "==> nxge_init_common_dev:func # %d",
6658 nxgep->function_num));
6659 /*
6660 * Loop through existing per neptune hardware list.
6661 */
6662 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6663 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6664 "==> nxge_init_common_device:func # %d "
6665 "hw_p $%p parent dip $%p",
6666 nxgep->function_num,
6667 hw_p,
6668 p_dip));
6669 if (hw_p->parent_devp == p_dip) {
6670 nxgep->nxge_hw_p = hw_p;
6671 hw_p->ndevs++;
6672 hw_p->nxge_p[nxgep->function_num] = nxgep;
6673 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6674 "==> nxge_init_common_device:func # %d "
6675 "hw_p $%p parent dip $%p "
6676 "ndevs %d (found)",
6677 nxgep->function_num,
6678 hw_p,
6679 p_dip,
6680 hw_p->ndevs));
6681 break;
6682 }
6683 }
6684
6685 if (hw_p == NULL) {
6686
6687 char **prop_val;
6688 uint_t prop_len;
6689 int i;
6690
6691 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6692 "==> nxge_init_common_device:func # %d "
6693 "parent dip $%p (new)",
6694 nxgep->function_num,
6695 p_dip));
6696 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6697 hw_p->parent_devp = p_dip;
6698 hw_p->magic = NXGE_NEPTUNE_MAGIC;
6699 nxgep->nxge_hw_p = hw_p;
6700 hw_p->ndevs++;
6701 hw_p->nxge_p[nxgep->function_num] = nxgep;
6702 hw_p->next = nxge_hw_list;
6703 if (nxgep->niu_type == N2_NIU) {
6704 hw_p->niu_type = N2_NIU;
6705 hw_p->platform_type = P_NEPTUNE_NIU;
6706 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
6707 } else {
6708 hw_p->niu_type = NIU_TYPE_NONE;
6709 hw_p->platform_type = P_NEPTUNE_NONE;
6710 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
6711 }
6712
6713 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) *
6714 hw_p->tcam_size, KM_SLEEP);
6715
6716 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6717 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6718 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6719 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6720
6721 nxge_hw_list = hw_p;
6722
6723 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0,
6724 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
6725 for (i = 0; i < prop_len; i++) {
6726 if ((strcmp((caddr_t)prop_val[i],
6727 NXGE_ROCK_COMPATIBLE) == 0)) {
6728 hw_p->platform_type = P_NEPTUNE_ROCK;
6729 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6730 "ROCK hw_p->platform_type %d",
6731 hw_p->platform_type));
6732 break;
6733 }
6734 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6735 "nxge_init_common_dev: read compatible"
6736 " property[%d] val[%s]",
6737 i, (caddr_t)prop_val[i]));
6738 }
6739 }
6740
6741 ddi_prop_free(prop_val);
6742
6743 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6744 }
6745
6746 MUTEX_EXIT(&nxge_common_lock);
6747
6748 nxgep->platform_type = hw_p->platform_type;
6749 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d",
6750 nxgep->platform_type));
6751 if (nxgep->niu_type != N2_NIU) {
6752 nxgep->niu_type = hw_p->niu_type;
6753 }
6754
6755 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6756 "==> nxge_init_common_device (nxge_hw_list) $%p",
6757 nxge_hw_list));
6758 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6759
6760 return (NXGE_OK);
6761 }
6762
6763 static void
nxge_uninit_common_dev(p_nxge_t nxgep)6764 nxge_uninit_common_dev(p_nxge_t nxgep)
6765 {
6766 p_nxge_hw_list_t hw_p, h_hw_p;
6767 p_nxge_dma_pt_cfg_t p_dma_cfgp;
6768 p_nxge_hw_pt_cfg_t p_cfgp;
6769 dev_info_t *p_dip;
6770
6771 ASSERT(nxgep != NULL);
6772
6773 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6774 if (nxgep->nxge_hw_p == NULL) {
6775 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6776 "<== nxge_uninit_common_device (no common)"));
6777 return;
6778 }
6779
6780 MUTEX_ENTER(&nxge_common_lock);
6781 h_hw_p = nxge_hw_list;
6782 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6783 p_dip = hw_p->parent_devp;
6784 if (nxgep->nxge_hw_p == hw_p &&
6785 p_dip == nxgep->p_dip &&
6786 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6787 hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6788
6789 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6790 "==> nxge_uninit_common_device:func # %d "
6791 "hw_p $%p parent dip $%p "
6792 "ndevs %d (found)",
6793 nxgep->function_num,
6794 hw_p,
6795 p_dip,
6796 hw_p->ndevs));
6797
6798 /*
6799 * Release the RDC table, a shared resoruce
6800 * of the nxge hardware. The RDC table was
6801 * assigned to this instance of nxge in
6802 * nxge_use_cfg_dma_config().
6803 */
6804 if (!isLDOMguest(nxgep)) {
6805 p_dma_cfgp =
6806 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6807 p_cfgp =
6808 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6809 (void) nxge_fzc_rdc_tbl_unbind(nxgep,
6810 p_cfgp->def_mac_rxdma_grpid);
6811
6812 /* Cleanup any outstanding groups. */
6813 nxge_grp_cleanup(nxgep);
6814 }
6815
6816 if (hw_p->ndevs) {
6817 hw_p->ndevs--;
6818 }
6819 hw_p->nxge_p[nxgep->function_num] = NULL;
6820 if (!hw_p->ndevs) {
6821 KMEM_FREE(hw_p->tcam,
6822 sizeof (tcam_flow_spec_t) *
6823 hw_p->tcam_size);
6824 MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6825 MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6826 MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6827 MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6828 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6829 "==> nxge_uninit_common_device: "
6830 "func # %d "
6831 "hw_p $%p parent dip $%p "
6832 "ndevs %d (last)",
6833 nxgep->function_num,
6834 hw_p,
6835 p_dip,
6836 hw_p->ndevs));
6837
6838 nxge_hio_uninit(nxgep);
6839
6840 if (hw_p == nxge_hw_list) {
6841 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6842 "==> nxge_uninit_common_device:"
6843 "remove head func # %d "
6844 "hw_p $%p parent dip $%p "
6845 "ndevs %d (head)",
6846 nxgep->function_num,
6847 hw_p,
6848 p_dip,
6849 hw_p->ndevs));
6850 nxge_hw_list = hw_p->next;
6851 } else {
6852 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6853 "==> nxge_uninit_common_device:"
6854 "remove middle func # %d "
6855 "hw_p $%p parent dip $%p "
6856 "ndevs %d (middle)",
6857 nxgep->function_num,
6858 hw_p,
6859 p_dip,
6860 hw_p->ndevs));
6861 h_hw_p->next = hw_p->next;
6862 }
6863
6864 nxgep->nxge_hw_p = NULL;
6865 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6866 }
6867 break;
6868 } else {
6869 h_hw_p = hw_p;
6870 }
6871 }
6872
6873 MUTEX_EXIT(&nxge_common_lock);
6874 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6875 "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6876 nxge_hw_list));
6877
6878 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6879 }
6880
6881 /*
6882 * Determines the number of ports from the niu_type or the platform type.
6883 * Returns the number of ports, or returns zero on failure.
6884 */
6885
6886 int
nxge_get_nports(p_nxge_t nxgep)6887 nxge_get_nports(p_nxge_t nxgep)
6888 {
6889 int nports = 0;
6890
6891 switch (nxgep->niu_type) {
6892 case N2_NIU:
6893 case NEPTUNE_2_10GF:
6894 nports = 2;
6895 break;
6896 case NEPTUNE_4_1GC:
6897 case NEPTUNE_2_10GF_2_1GC:
6898 case NEPTUNE_1_10GF_3_1GC:
6899 case NEPTUNE_1_1GC_1_10GF_2_1GC:
6900 case NEPTUNE_2_10GF_2_1GRF:
6901 nports = 4;
6902 break;
6903 default:
6904 switch (nxgep->platform_type) {
6905 case P_NEPTUNE_NIU:
6906 case P_NEPTUNE_ATLAS_2PORT:
6907 nports = 2;
6908 break;
6909 case P_NEPTUNE_ATLAS_4PORT:
6910 case P_NEPTUNE_MARAMBA_P0:
6911 case P_NEPTUNE_MARAMBA_P1:
6912 case P_NEPTUNE_ROCK:
6913 case P_NEPTUNE_ALONSO:
6914 nports = 4;
6915 break;
6916 default:
6917 break;
6918 }
6919 break;
6920 }
6921
6922 return (nports);
6923 }
6924
6925 /*
6926 * The following two functions are to support
6927 * PSARC/2007/453 MSI-X interrupt limit override.
6928 */
6929 static int
nxge_create_msi_property(p_nxge_t nxgep)6930 nxge_create_msi_property(p_nxge_t nxgep)
6931 {
6932 int nmsi;
6933 extern int ncpus;
6934
6935 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6936
6937 switch (nxgep->mac.portmode) {
6938 case PORT_10G_COPPER:
6939 case PORT_10G_FIBER:
6940 case PORT_10G_TN1010:
6941 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6942 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6943 /*
6944 * The maximum MSI-X requested will be 8.
6945 * If the # of CPUs is less than 8, we will request
6946 * # MSI-X based on the # of CPUs (default).
6947 */
6948 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6949 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
6950 nxge_msix_10g_intrs));
6951 if ((nxge_msix_10g_intrs == 0) ||
6952 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
6953 nmsi = NXGE_MSIX_REQUEST_10G;
6954 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6955 "==>nxge_create_msi_property (10G): reset to 8"));
6956 } else {
6957 nmsi = nxge_msix_10g_intrs;
6958 }
6959
6960 /*
6961 * If # of interrupts requested is 8 (default),
6962 * the checking of the number of cpus will be
6963 * be maintained.
6964 */
6965 if ((nmsi == NXGE_MSIX_REQUEST_10G) &&
6966 (ncpus < nmsi)) {
6967 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6968 "==>nxge_create_msi_property (10G): reset to 8"));
6969 nmsi = ncpus;
6970 }
6971 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6972 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6973 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6974 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6975 break;
6976
6977 default:
6978 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6979 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6980 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6981 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
6982 nxge_msix_1g_intrs));
6983 if ((nxge_msix_1g_intrs == 0) ||
6984 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
6985 nmsi = NXGE_MSIX_REQUEST_1G;
6986 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6987 "==>nxge_create_msi_property (1G): reset to 2"));
6988 } else {
6989 nmsi = nxge_msix_1g_intrs;
6990 }
6991 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6992 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
6993 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6994 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6995 break;
6996 }
6997
6998 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
6999 return (nmsi);
7000 }
7001
7002 /*
7003 * The following is a software around for the Neptune hardware's
7004 * interrupt bugs; The Neptune hardware may generate spurious interrupts when
7005 * an interrupr handler is removed.
7006 */
7007 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98
7008 #define NXGE_PIM_RESET (1ULL << 29)
7009 #define NXGE_GLU_RESET (1ULL << 30)
7010 #define NXGE_NIU_RESET (1ULL << 31)
7011 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \
7012 NXGE_GLU_RESET | \
7013 NXGE_NIU_RESET)
7014
7015 #define NXGE_WAIT_QUITE_TIME 200000
7016 #define NXGE_WAIT_QUITE_RETRY 40
7017 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */
7018
7019 static void
nxge_niu_peu_reset(p_nxge_t nxgep)7020 nxge_niu_peu_reset(p_nxge_t nxgep)
7021 {
7022 uint32_t rvalue;
7023 p_nxge_hw_list_t hw_p;
7024 p_nxge_t fnxgep;
7025 int i, j;
7026
7027 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
7028 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
7029 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7030 "==> nxge_niu_peu_reset: NULL hardware pointer"));
7031 return;
7032 }
7033
7034 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7035 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
7036 hw_p->flags, nxgep->nxge_link_poll_timerid,
7037 nxgep->nxge_timerid));
7038
7039 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
7040 /*
7041 * Make sure other instances from the same hardware
7042 * stop sending PIO and in quiescent state.
7043 */
7044 for (i = 0; i < NXGE_MAX_PORTS; i++) {
7045 fnxgep = hw_p->nxge_p[i];
7046 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7047 "==> nxge_niu_peu_reset: checking entry %d "
7048 "nxgep $%p", i, fnxgep));
7049 #ifdef NXGE_DEBUG
7050 if (fnxgep) {
7051 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7052 "==> nxge_niu_peu_reset: entry %d (function %d) "
7053 "link timer id %d hw timer id %d",
7054 i, fnxgep->function_num,
7055 fnxgep->nxge_link_poll_timerid,
7056 fnxgep->nxge_timerid));
7057 }
7058 #endif
7059 if (fnxgep && fnxgep != nxgep &&
7060 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
7061 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7062 "==> nxge_niu_peu_reset: checking $%p "
7063 "(function %d) timer ids",
7064 fnxgep, fnxgep->function_num));
7065 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
7066 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7067 "==> nxge_niu_peu_reset: waiting"));
7068 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7069 if (!fnxgep->nxge_timerid &&
7070 !fnxgep->nxge_link_poll_timerid) {
7071 break;
7072 }
7073 }
7074 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7075 if (fnxgep->nxge_timerid ||
7076 fnxgep->nxge_link_poll_timerid) {
7077 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7078 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7079 "<== nxge_niu_peu_reset: cannot reset "
7080 "hardware (devices are still in use)"));
7081 return;
7082 }
7083 }
7084 }
7085
7086 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
7087 hw_p->flags |= COMMON_RESET_NIU_PCI;
7088 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
7089 NXGE_PCI_PORT_LOGIC_OFFSET);
7090 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7091 "nxge_niu_peu_reset: read offset 0x%x (%d) "
7092 "(data 0x%x)",
7093 NXGE_PCI_PORT_LOGIC_OFFSET,
7094 NXGE_PCI_PORT_LOGIC_OFFSET,
7095 rvalue));
7096
7097 rvalue |= NXGE_PCI_RESET_ALL;
7098 pci_config_put32(nxgep->dev_regs->nxge_pciregh,
7099 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
7100 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7101 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
7102 rvalue));
7103
7104 NXGE_DELAY(NXGE_PCI_RESET_WAIT);
7105 }
7106
7107 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7108 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
7109 }
7110
7111 static void
nxge_set_pci_replay_timeout(p_nxge_t nxgep)7112 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
7113 {
7114 p_dev_regs_t dev_regs;
7115 uint32_t value;
7116
7117 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
7118
7119 if (!nxge_set_replay_timer) {
7120 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7121 "==> nxge_set_pci_replay_timeout: will not change "
7122 "the timeout"));
7123 return;
7124 }
7125
7126 dev_regs = nxgep->dev_regs;
7127 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7128 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
7129 dev_regs, dev_regs->nxge_pciregh));
7130
7131 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
7132 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7133 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
7134 "no PCI handle",
7135 dev_regs));
7136 return;
7137 }
7138 value = (pci_config_get32(dev_regs->nxge_pciregh,
7139 PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
7140 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
7141
7142 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7143 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
7144 "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
7145 pci_config_get32(dev_regs->nxge_pciregh,
7146 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
7147 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
7148
7149 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
7150 value);
7151
7152 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7153 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
7154 pci_config_get32(dev_regs->nxge_pciregh,
7155 PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
7156
7157 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
7158 }
7159
7160 /*
7161 * quiesce(9E) entry point.
7162 *
7163 * This function is called when the system is single-threaded at high
7164 * PIL with preemption disabled. Therefore, this function must not be
7165 * blocked.
7166 *
7167 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7168 * DDI_FAILURE indicates an error condition and should almost never happen.
7169 */
7170 static int
nxge_quiesce(dev_info_t * dip)7171 nxge_quiesce(dev_info_t *dip)
7172 {
7173 int instance = ddi_get_instance(dip);
7174 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
7175
7176 if (nxgep == NULL)
7177 return (DDI_FAILURE);
7178
7179 /* Turn off debugging */
7180 nxge_debug_level = NO_DEBUG;
7181 nxgep->nxge_debug_level = NO_DEBUG;
7182 npi_debug_level = NO_DEBUG;
7183
7184 /*
7185 * Stop link monitor only when linkchkmod is interrupt based
7186 */
7187 if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
7188 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
7189 }
7190
7191 (void) nxge_intr_hw_disable(nxgep);
7192
7193 /*
7194 * Reset the receive MAC side.
7195 */
7196 (void) nxge_rx_mac_disable(nxgep);
7197
7198 /* Disable and soft reset the IPP */
7199 if (!isLDOMguest(nxgep))
7200 (void) nxge_ipp_disable(nxgep);
7201
7202 /*
7203 * Reset the transmit/receive DMA side.
7204 */
7205 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
7206 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
7207
7208 /*
7209 * Reset the transmit MAC side.
7210 */
7211 (void) nxge_tx_mac_disable(nxgep);
7212
7213 return (DDI_SUCCESS);
7214 }
7215