1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
24 */
25
26 /*
27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
28 */
29 #include <sys/nxge/nxge_impl.h>
30 #include <sys/nxge/nxge_hio.h>
31 #include <sys/nxge/nxge_rxdma.h>
32 #include <sys/pcie.h>
33
34 uint32_t nxge_use_partition = 0; /* debug partition flag */
35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */
36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */
37 /*
38 * PSARC/2007/453 MSI-X interrupt limit override
39 */
40 uint32_t nxge_msi_enable = 2;
41
42 /*
43 * Software workaround for a Neptune (PCI-E)
44 * hardware interrupt bug which the hardware
45 * may generate spurious interrupts after the
46 * device interrupt handler was removed. If this flag
47 * is enabled, the driver will reset the
48 * hardware when devices are being detached.
49 */
50 uint32_t nxge_peu_reset_enable = 0;
51
52 /*
53 * Software workaround for the hardware
54 * checksum bugs that affect packet transmission
55 * and receive:
56 *
57 * Usage of nxge_cksum_offload:
58 *
59 * (1) nxge_cksum_offload = 0 (default):
60 * - transmits packets:
61 * TCP: uses the hardware checksum feature.
62 * UDP: driver will compute the software checksum
63 * based on the partial checksum computed
64 * by the IP layer.
65 * - receives packets
66 * TCP: marks packets checksum flags based on hardware result.
67 * UDP: will not mark checksum flags.
68 *
69 * (2) nxge_cksum_offload = 1:
70 * - transmit packets:
71 * TCP/UDP: uses the hardware checksum feature.
72 * - receives packets
73 * TCP/UDP: marks packet checksum flags based on hardware result.
74 *
75 * (3) nxge_cksum_offload = 2:
76 * - The driver will not register its checksum capability.
77 * Checksum for both TCP and UDP will be computed
78 * by the stack.
79 * - The software LSO is not allowed in this case.
80 *
81 * (4) nxge_cksum_offload > 2:
82 * - Will be treated as it is set to 2
83 * (stack will compute the checksum).
84 *
85 * (5) If the hardware bug is fixed, this workaround
86 * needs to be updated accordingly to reflect
87 * the new hardware revision.
88 */
89 uint32_t nxge_cksum_offload = 0;
90
91 /*
92 * Globals: tunable parameters (/etc/system or adb)
93 *
94 */
95 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
96 uint32_t nxge_rbr_spare_size = 0;
97 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT;
98 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
99 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
100 boolean_t nxge_no_msg = B_TRUE; /* control message display */
101 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */
102 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX;
103 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN;
104 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN;
105 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU;
106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL;
107
108 /* MAX LSO size */
109 #define NXGE_LSO_MAXLEN 65535
110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN;
111
112
113 /*
114 * Add tunable to reduce the amount of time spent in the
115 * ISR doing Rx Processing.
116 */
117 uint32_t nxge_max_rx_pkts = 1024;
118
119 /*
120 * Tunables to manage the receive buffer blocks.
121 *
122 * nxge_rx_threshold_hi: copy all buffers.
123 * nxge_rx_bcopy_size_type: receive buffer block size type.
124 * nxge_rx_threshold_lo: copy only up to tunable block size type.
125 */
126 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
127 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
128 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
129
130 /* Use kmem_alloc() to allocate data buffers. */
131 #if defined(__sparc)
132 uint32_t nxge_use_kmem_alloc = 1;
133 #elif defined(__i386)
134 uint32_t nxge_use_kmem_alloc = 0;
135 #else
136 uint32_t nxge_use_kmem_alloc = 1;
137 #endif
138
139 rtrace_t npi_rtracebuf;
140
141 /*
142 * The hardware sometimes fails to allow enough time for the link partner
143 * to send an acknowledgement for packets that the hardware sent to it. The
144 * hardware resends the packets earlier than it should be in those instances.
145 * This behavior caused some switches to acknowledge the wrong packets
146 * and it triggered the fatal error.
147 * This software workaround is to set the replay timer to a value
148 * suggested by the hardware team.
149 *
150 * PCI config space replay timer register:
151 * The following replay timeout value is 0xc
152 * for bit 14:18.
153 */
154 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8
155 #define PCI_REPLAY_TIMEOUT_SHIFT 14
156
157 uint32_t nxge_set_replay_timer = 1;
158 uint32_t nxge_replay_timeout = 0xc;
159
160 /*
161 * The transmit serialization sometimes causes
162 * longer sleep before calling the driver transmit
163 * function as it sleeps longer than it should.
164 * The performace group suggests that a time wait tunable
165 * can be used to set the maximum wait time when needed
166 * and the default is set to 1 tick.
167 */
168 uint32_t nxge_tx_serial_maxsleep = 1;
169
170 #if defined(sun4v)
171 /*
172 * Hypervisor N2/NIU services information.
173 */
174 /*
175 * The following is the default API supported:
176 * major 1 and minor 1.
177 *
178 * Please update the MAX_NIU_MAJORS,
179 * MAX_NIU_MINORS, and minor number supported
180 * when the newer Hypervior API interfaces
181 * are added. Also, please update nxge_hsvc_register()
182 * if needed.
183 */
184 static hsvc_info_t niu_hsvc = {
185 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
186 NIU_MINOR_VER, "nxge"
187 };
188
189 static int nxge_hsvc_register(p_nxge_t);
190 #endif
191
192 /*
193 * Function Prototypes
194 */
195 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
196 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
197 static void nxge_unattach(p_nxge_t);
198 static int nxge_quiesce(dev_info_t *);
199
200 #if NXGE_PROPERTY
201 static void nxge_remove_hard_properties(p_nxge_t);
202 #endif
203
204 /*
205 * These two functions are required by nxge_hio.c
206 */
207 extern int nxge_m_mmac_remove(void *arg, int slot);
208 extern void nxge_grp_cleanup(p_nxge_t nxge);
209
210 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
211
212 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
213 static void nxge_destroy_mutexes(p_nxge_t);
214
215 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
216 static void nxge_unmap_regs(p_nxge_t nxgep);
217 #ifdef NXGE_DEBUG
218 static void nxge_test_map_regs(p_nxge_t nxgep);
219 #endif
220
221 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
222 static void nxge_remove_intrs(p_nxge_t nxgep);
223
224 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
225 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
226 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
227 static void nxge_intrs_enable(p_nxge_t nxgep);
228 static void nxge_intrs_disable(p_nxge_t nxgep);
229
230 static void nxge_suspend(p_nxge_t);
231 static nxge_status_t nxge_resume(p_nxge_t);
232
233 static nxge_status_t nxge_setup_dev(p_nxge_t);
234 static void nxge_destroy_dev(p_nxge_t);
235
236 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
237 static void nxge_free_mem_pool(p_nxge_t);
238
239 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
240 static void nxge_free_rx_mem_pool(p_nxge_t);
241
242 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
243 static void nxge_free_tx_mem_pool(p_nxge_t);
244
245 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
246 struct ddi_dma_attr *,
247 size_t, ddi_device_acc_attr_t *, uint_t,
248 p_nxge_dma_common_t);
249
250 static void nxge_dma_mem_free(p_nxge_dma_common_t);
251 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
252
253 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
254 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
255 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
256
257 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
258 p_nxge_dma_common_t *, size_t);
259 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
260
261 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
262 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
263 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
264
265 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
266 p_nxge_dma_common_t *,
267 size_t);
268 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
269
270 static int nxge_init_common_dev(p_nxge_t);
271 static void nxge_uninit_common_dev(p_nxge_t);
272 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
273 char *, caddr_t);
274 #if defined(sun4v)
275 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep);
276 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm);
277 #endif
278
279 /*
280 * The next declarations are for the GLDv3 interface.
281 */
282 static int nxge_m_start(void *);
283 static void nxge_m_stop(void *);
284 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
285 static int nxge_m_promisc(void *, boolean_t);
286 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
287 nxge_status_t nxge_mac_register(p_nxge_t);
288 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
289 int slot, int rdctbl, boolean_t usetbl);
290 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot,
291 boolean_t factory);
292
293 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *);
294 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
295 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
296 uint_t, const void *);
297 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
298 uint_t, void *);
299 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t,
300 mac_prop_info_handle_t);
301 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t);
302 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
303 const void *);
304 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *);
305 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int,
306 mac_ring_info_t *, mac_ring_handle_t);
307 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t,
308 mac_ring_type_t);
309 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t,
310 mac_ring_type_t);
311
312 static void nxge_niu_peu_reset(p_nxge_t nxgep);
313 static void nxge_set_pci_replay_timeout(nxge_t *);
314
315 char *nxge_priv_props[] = {
316 "_adv_10gfdx_cap",
317 "_adv_pause_cap",
318 "_function_number",
319 "_fw_version",
320 "_port_mode",
321 "_hot_swap_phy",
322 "_rxdma_intr_time",
323 "_rxdma_intr_pkts",
324 "_class_opt_ipv4_tcp",
325 "_class_opt_ipv4_udp",
326 "_class_opt_ipv4_ah",
327 "_class_opt_ipv4_sctp",
328 "_class_opt_ipv6_tcp",
329 "_class_opt_ipv6_udp",
330 "_class_opt_ipv6_ah",
331 "_class_opt_ipv6_sctp",
332 "_soft_lso_enable",
333 NULL
334 };
335
336 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL
337 #define MAX_DUMP_SZ 256
338
339 #define NXGE_M_CALLBACK_FLAGS \
340 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
341
342 mac_callbacks_t nxge_m_callbacks = {
343 NXGE_M_CALLBACK_FLAGS,
344 nxge_m_stat,
345 nxge_m_start,
346 nxge_m_stop,
347 nxge_m_promisc,
348 nxge_m_multicst,
349 NULL,
350 NULL,
351 NULL,
352 nxge_m_ioctl,
353 nxge_m_getcapab,
354 NULL,
355 NULL,
356 nxge_m_setprop,
357 nxge_m_getprop,
358 nxge_m_propinfo
359 };
360
361 void
362 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
363
364 /* PSARC/2007/453 MSI-X interrupt limit override. */
365 #define NXGE_MSIX_REQUEST_10G 8
366 #define NXGE_MSIX_REQUEST_1G 2
367 static int nxge_create_msi_property(p_nxge_t);
368 /*
369 * For applications that care about the
370 * latency, it was requested by PAE and the
371 * customers that the driver has tunables that
372 * allow the user to tune it to a higher number
373 * interrupts to spread the interrupts among
374 * multiple channels. The DDI framework limits
375 * the maximum number of MSI-X resources to allocate
376 * to 8 (ddi_msix_alloc_limit). If more than 8
377 * is set, ddi_msix_alloc_limit must be set accordingly.
378 * The default number of MSI interrupts are set to
379 * 8 for 10G and 2 for 1G link.
380 */
381 #define NXGE_MSIX_MAX_ALLOWED 32
382 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G;
383 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G;
384
385 /*
386 * These global variables control the message
387 * output.
388 */
389 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
390 uint64_t nxge_debug_level;
391
392 /*
393 * This list contains the instance structures for the Neptune
394 * devices present in the system. The lock exists to guarantee
395 * mutually exclusive access to the list.
396 */
397 void *nxge_list = NULL;
398 void *nxge_hw_list = NULL;
399 nxge_os_mutex_t nxge_common_lock;
400 nxge_os_mutex_t nxgedebuglock;
401
402 extern uint64_t npi_debug_level;
403
404 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *);
405 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *);
406 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t);
407 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t);
408 extern void nxge_fm_init(p_nxge_t,
409 ddi_device_acc_attr_t *,
410 ddi_dma_attr_t *);
411 extern void nxge_fm_fini(p_nxge_t);
412 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
413
414 /*
415 * Count used to maintain the number of buffers being used
416 * by Neptune instances and loaned up to the upper layers.
417 */
418 uint32_t nxge_mblks_pending = 0;
419
420 /*
421 * Device register access attributes for PIO.
422 */
423 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
424 DDI_DEVICE_ATTR_V1,
425 DDI_STRUCTURE_LE_ACC,
426 DDI_STRICTORDER_ACC,
427 DDI_DEFAULT_ACC
428 };
429
430 /*
431 * Device descriptor access attributes for DMA.
432 */
433 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
434 DDI_DEVICE_ATTR_V0,
435 DDI_STRUCTURE_LE_ACC,
436 DDI_STRICTORDER_ACC
437 };
438
439 /*
440 * Device buffer access attributes for DMA.
441 */
442 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
443 DDI_DEVICE_ATTR_V0,
444 DDI_STRUCTURE_BE_ACC,
445 DDI_STRICTORDER_ACC
446 };
447
448 ddi_dma_attr_t nxge_desc_dma_attr = {
449 DMA_ATTR_V0, /* version number. */
450 0, /* low address */
451 0xffffffffffffffff, /* high address */
452 0xffffffffffffffff, /* address counter max */
453 #ifndef NIU_PA_WORKAROUND
454 0x100000, /* alignment */
455 #else
456 0x2000,
457 #endif
458 0xfc00fc, /* dlim_burstsizes */
459 0x1, /* minimum transfer size */
460 0xffffffffffffffff, /* maximum transfer size */
461 0xffffffffffffffff, /* maximum segment size */
462 1, /* scatter/gather list length */
463 (unsigned int) 1, /* granularity */
464 0 /* attribute flags */
465 };
466
467 ddi_dma_attr_t nxge_tx_dma_attr = {
468 DMA_ATTR_V0, /* version number. */
469 0, /* low address */
470 0xffffffffffffffff, /* high address */
471 0xffffffffffffffff, /* address counter max */
472 #if defined(_BIG_ENDIAN)
473 0x2000, /* alignment */
474 #else
475 0x1000, /* alignment */
476 #endif
477 0xfc00fc, /* dlim_burstsizes */
478 0x1, /* minimum transfer size */
479 0xffffffffffffffff, /* maximum transfer size */
480 0xffffffffffffffff, /* maximum segment size */
481 5, /* scatter/gather list length */
482 (unsigned int) 1, /* granularity */
483 0 /* attribute flags */
484 };
485
486 ddi_dma_attr_t nxge_rx_dma_attr = {
487 DMA_ATTR_V0, /* version number. */
488 0, /* low address */
489 0xffffffffffffffff, /* high address */
490 0xffffffffffffffff, /* address counter max */
491 0x2000, /* alignment */
492 0xfc00fc, /* dlim_burstsizes */
493 0x1, /* minimum transfer size */
494 0xffffffffffffffff, /* maximum transfer size */
495 0xffffffffffffffff, /* maximum segment size */
496 1, /* scatter/gather list length */
497 (unsigned int) 1, /* granularity */
498 DDI_DMA_RELAXED_ORDERING /* attribute flags */
499 };
500
501 ddi_dma_lim_t nxge_dma_limits = {
502 (uint_t)0, /* dlim_addr_lo */
503 (uint_t)0xffffffff, /* dlim_addr_hi */
504 (uint_t)0xffffffff, /* dlim_cntr_max */
505 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
506 0x1, /* dlim_minxfer */
507 1024 /* dlim_speed */
508 };
509
510 dma_method_t nxge_force_dma = DVMA;
511
512 /*
513 * dma chunk sizes.
514 *
515 * Try to allocate the largest possible size
516 * so that fewer number of dma chunks would be managed
517 */
518 #ifdef NIU_PA_WORKAROUND
519 size_t alloc_sizes [] = {0x2000};
520 #else
521 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
522 0x10000, 0x20000, 0x40000, 0x80000,
523 0x100000, 0x200000, 0x400000, 0x800000,
524 0x1000000, 0x2000000, 0x4000000};
525 #endif
526
527 /*
528 * Translate "dev_t" to a pointer to the associated "dev_info_t".
529 */
530
531 extern void nxge_get_environs(nxge_t *);
532
533 static int
nxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)534 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
535 {
536 p_nxge_t nxgep = NULL;
537 int instance;
538 int status = DDI_SUCCESS;
539 uint8_t portn;
540 nxge_mmac_t *mmac_info;
541
542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
543
544 /*
545 * Get the device instance since we'll need to setup
546 * or retrieve a soft state for this instance.
547 */
548 instance = ddi_get_instance(dip);
549
550 switch (cmd) {
551 case DDI_ATTACH:
552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
553 break;
554
555 case DDI_RESUME:
556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
557 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
558 if (nxgep == NULL) {
559 status = DDI_FAILURE;
560 break;
561 }
562 if (nxgep->dip != dip) {
563 status = DDI_FAILURE;
564 break;
565 }
566 if (nxgep->suspended == DDI_PM_SUSPEND) {
567 status = ddi_dev_is_needed(nxgep->dip, 0, 1);
568 } else {
569 status = nxge_resume(nxgep);
570 }
571 goto nxge_attach_exit;
572
573 case DDI_PM_RESUME:
574 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
575 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
576 if (nxgep == NULL) {
577 status = DDI_FAILURE;
578 break;
579 }
580 if (nxgep->dip != dip) {
581 status = DDI_FAILURE;
582 break;
583 }
584 status = nxge_resume(nxgep);
585 goto nxge_attach_exit;
586
587 default:
588 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
589 status = DDI_FAILURE;
590 goto nxge_attach_exit;
591 }
592
593
594 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
595 status = DDI_FAILURE;
596 goto nxge_attach_exit;
597 }
598
599 nxgep = ddi_get_soft_state(nxge_list, instance);
600 if (nxgep == NULL) {
601 status = NXGE_ERROR;
602 goto nxge_attach_fail2;
603 }
604
605 nxgep->nxge_magic = NXGE_MAGIC;
606
607 nxgep->drv_state = 0;
608 nxgep->dip = dip;
609 nxgep->instance = instance;
610 nxgep->p_dip = ddi_get_parent(dip);
611 nxgep->nxge_debug_level = nxge_debug_level;
612 npi_debug_level = nxge_debug_level;
613
614 /* Are we a guest running in a Hybrid I/O environment? */
615 nxge_get_environs(nxgep);
616
617 status = nxge_map_regs(nxgep);
618
619 if (status != NXGE_OK) {
620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
621 goto nxge_attach_fail3;
622 }
623
624 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr);
625
626 /* Create & initialize the per-Neptune data structure */
627 /* (even if we're a guest). */
628 status = nxge_init_common_dev(nxgep);
629 if (status != NXGE_OK) {
630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
631 "nxge_init_common_dev failed"));
632 goto nxge_attach_fail4;
633 }
634
635 /*
636 * Software workaround: set the replay timer.
637 */
638 if (nxgep->niu_type != N2_NIU) {
639 nxge_set_pci_replay_timeout(nxgep);
640 }
641
642 #if defined(sun4v)
643 /* This is required by nxge_hio_init(), which follows. */
644 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
645 goto nxge_attach_fail4;
646 #endif
647
648 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
650 "nxge_hio_init failed"));
651 goto nxge_attach_fail4;
652 }
653
654 if (nxgep->niu_type == NEPTUNE_2_10GF) {
655 if (nxgep->function_num > 1) {
656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
657 " function %d. Only functions 0 and 1 are "
658 "supported for this card.", nxgep->function_num));
659 status = NXGE_ERROR;
660 goto nxge_attach_fail4;
661 }
662 }
663
664 if (isLDOMguest(nxgep)) {
665 /*
666 * Use the function number here.
667 */
668 nxgep->mac.portnum = nxgep->function_num;
669 nxgep->mac.porttype = PORT_TYPE_LOGICAL;
670
671 /* XXX We'll set the MAC address counts to 1 for now. */
672 mmac_info = &nxgep->nxge_mmac_info;
673 mmac_info->num_mmac = 1;
674 mmac_info->naddrfree = 1;
675 } else {
676 portn = NXGE_GET_PORT_NUM(nxgep->function_num);
677 nxgep->mac.portnum = portn;
678 if ((portn == 0) || (portn == 1))
679 nxgep->mac.porttype = PORT_TYPE_XMAC;
680 else
681 nxgep->mac.porttype = PORT_TYPE_BMAC;
682 /*
683 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
684 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
685 * The two types of MACs have different characterizations.
686 */
687 mmac_info = &nxgep->nxge_mmac_info;
688 if (nxgep->function_num < 2) {
689 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
690 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
691 } else {
692 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
693 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
694 }
695 }
696 /*
697 * Setup the Ndd parameters for the this instance.
698 */
699 nxge_init_param(nxgep);
700
701 /*
702 * Setup Register Tracing Buffer.
703 */
704 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
705
706 /* init stats ptr */
707 nxge_init_statsp(nxgep);
708
709 /*
710 * Copy the vpd info from eeprom to a local data
711 * structure, and then check its validity.
712 */
713 if (!isLDOMguest(nxgep)) {
714 int *regp;
715 uint_t reglen;
716 int rv;
717
718 nxge_vpd_info_get(nxgep);
719
720 /* Find the NIU config handle. */
721 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
722 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
723 "reg", ®p, ®len);
724
725 if (rv != DDI_PROP_SUCCESS) {
726 goto nxge_attach_fail5;
727 }
728 /*
729 * The address_hi, that is the first int, in the reg
730 * property consists of config handle, but need to remove
731 * the bits 28-31 which are OBP specific info.
732 */
733 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
734 ddi_prop_free(regp);
735 }
736
737 /*
738 * Set the defaults for the MTU size.
739 */
740 nxge_hw_id_init(nxgep);
741
742 if (isLDOMguest(nxgep)) {
743 uchar_t *prop_val;
744 uint_t prop_len;
745 uint32_t max_frame_size;
746
747 extern void nxge_get_logical_props(p_nxge_t);
748
749 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
750 nxgep->mac.portmode = PORT_LOGICAL;
751 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
752 "phy-type", "virtual transceiver");
753
754 nxgep->nports = 1;
755 nxgep->board_ver = 0; /* XXX What? */
756
757 /*
758 * local-mac-address property gives us info on which
759 * specific MAC address the Hybrid resource is associated
760 * with.
761 */
762 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
763 "local-mac-address", &prop_val,
764 &prop_len) != DDI_PROP_SUCCESS) {
765 goto nxge_attach_fail5;
766 }
767 if (prop_len != ETHERADDRL) {
768 ddi_prop_free(prop_val);
769 goto nxge_attach_fail5;
770 }
771 ether_copy(prop_val, nxgep->hio_mac_addr);
772 ddi_prop_free(prop_val);
773 nxge_get_logical_props(nxgep);
774
775 /*
776 * Enable Jumbo property based on the "max-frame-size"
777 * property value.
778 */
779 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY,
780 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
781 "max-frame-size", NXGE_MTU_DEFAULT_MAX);
782 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
783 (max_frame_size <= TX_JUMBO_MTU)) {
784 nxgep->mac.is_jumbo = B_TRUE;
785 nxgep->mac.maxframesize = (uint16_t)max_frame_size;
786 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
787 NXGE_EHEADER_VLAN_CRC;
788 }
789 } else {
790 status = nxge_xcvr_find(nxgep);
791
792 if (status != NXGE_OK) {
793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
794 " Couldn't determine card type"
795 " .... exit "));
796 goto nxge_attach_fail5;
797 }
798
799 status = nxge_get_config_properties(nxgep);
800
801 if (status != NXGE_OK) {
802 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
803 "get_hw create failed"));
804 goto nxge_attach_fail;
805 }
806 }
807
808 /*
809 * Setup the Kstats for the driver.
810 */
811 nxge_setup_kstats(nxgep);
812
813 if (!isLDOMguest(nxgep))
814 nxge_setup_param(nxgep);
815
816 status = nxge_setup_system_dma_pages(nxgep);
817 if (status != NXGE_OK) {
818 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
819 goto nxge_attach_fail;
820 }
821
822
823 if (!isLDOMguest(nxgep))
824 nxge_hw_init_niu_common(nxgep);
825
826 status = nxge_setup_mutexes(nxgep);
827 if (status != NXGE_OK) {
828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
829 goto nxge_attach_fail;
830 }
831
832 #if defined(sun4v)
833 if (isLDOMguest(nxgep)) {
834 /* Find our VR & channel sets. */
835 status = nxge_hio_vr_add(nxgep);
836 if (status != DDI_SUCCESS) {
837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
838 "nxge_hio_vr_add failed"));
839 (void) hsvc_unregister(&nxgep->niu_hsvc);
840 nxgep->niu_hsvc_available = B_FALSE;
841 goto nxge_attach_fail;
842 }
843 goto nxge_attach_exit;
844 }
845 #endif
846
847 status = nxge_setup_dev(nxgep);
848 if (status != DDI_SUCCESS) {
849 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
850 goto nxge_attach_fail;
851 }
852
853 status = nxge_add_intrs(nxgep);
854 if (status != DDI_SUCCESS) {
855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
856 goto nxge_attach_fail;
857 }
858
859 /* If a guest, register with vio_net instead. */
860 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
861 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
862 "unable to register to mac layer (%d)", status));
863 goto nxge_attach_fail;
864 }
865
866 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
867
868 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
869 "registered to mac (instance %d)", instance));
870
871 /* nxge_link_monitor calls xcvr.check_link recursively */
872 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
873
874 goto nxge_attach_exit;
875
876 nxge_attach_fail:
877 nxge_unattach(nxgep);
878 goto nxge_attach_fail1;
879
880 nxge_attach_fail5:
881 /*
882 * Tear down the ndd parameters setup.
883 */
884 nxge_destroy_param(nxgep);
885
886 /*
887 * Tear down the kstat setup.
888 */
889 nxge_destroy_kstats(nxgep);
890
891 nxge_attach_fail4:
892 if (nxgep->nxge_hw_p) {
893 nxge_uninit_common_dev(nxgep);
894 nxgep->nxge_hw_p = NULL;
895 }
896
897 nxge_attach_fail3:
898 /*
899 * Unmap the register setup.
900 */
901 nxge_unmap_regs(nxgep);
902
903 nxge_fm_fini(nxgep);
904
905 nxge_attach_fail2:
906 ddi_soft_state_free(nxge_list, nxgep->instance);
907
908 nxge_attach_fail1:
909 if (status != NXGE_OK)
910 status = (NXGE_ERROR | NXGE_DDI_FAILED);
911 nxgep = NULL;
912
913 nxge_attach_exit:
914 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
915 status));
916
917 return (status);
918 }
919
920 static int
nxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)921 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
922 {
923 int status = DDI_SUCCESS;
924 int instance;
925 p_nxge_t nxgep = NULL;
926
927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
928 instance = ddi_get_instance(dip);
929 nxgep = ddi_get_soft_state(nxge_list, instance);
930 if (nxgep == NULL) {
931 status = DDI_FAILURE;
932 goto nxge_detach_exit;
933 }
934
935 switch (cmd) {
936 case DDI_DETACH:
937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
938 break;
939
940 case DDI_PM_SUSPEND:
941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
942 nxgep->suspended = DDI_PM_SUSPEND;
943 nxge_suspend(nxgep);
944 break;
945
946 case DDI_SUSPEND:
947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
948 if (nxgep->suspended != DDI_PM_SUSPEND) {
949 nxgep->suspended = DDI_SUSPEND;
950 nxge_suspend(nxgep);
951 }
952 break;
953
954 default:
955 status = DDI_FAILURE;
956 }
957
958 if (cmd != DDI_DETACH)
959 goto nxge_detach_exit;
960
961 /*
962 * Stop the xcvr polling.
963 */
964 nxgep->suspended = cmd;
965
966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
967
968 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
970 "<== nxge_detach status = 0x%08X", status));
971 return (DDI_FAILURE);
972 }
973
974 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
975 "<== nxge_detach (mac_unregister) status = 0x%08X", status));
976
977 nxge_unattach(nxgep);
978 nxgep = NULL;
979
980 nxge_detach_exit:
981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
982 status));
983
984 return (status);
985 }
986
987 static void
nxge_unattach(p_nxge_t nxgep)988 nxge_unattach(p_nxge_t nxgep)
989 {
990 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
991
992 if (nxgep == NULL || nxgep->dev_regs == NULL) {
993 return;
994 }
995
996 nxgep->nxge_magic = 0;
997
998 if (nxgep->nxge_timerid) {
999 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1000 nxgep->nxge_timerid = 0;
1001 }
1002
1003 /*
1004 * If this flag is set, it will affect the Neptune
1005 * only.
1006 */
1007 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
1008 nxge_niu_peu_reset(nxgep);
1009 }
1010
1011 #if defined(sun4v)
1012 if (isLDOMguest(nxgep)) {
1013 (void) nxge_hio_vr_release(nxgep);
1014 }
1015 #endif
1016
1017 if (nxgep->nxge_hw_p) {
1018 nxge_uninit_common_dev(nxgep);
1019 nxgep->nxge_hw_p = NULL;
1020 }
1021
1022 #if defined(sun4v)
1023 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
1024 (void) hsvc_unregister(&nxgep->niu_hsvc);
1025 nxgep->niu_hsvc_available = B_FALSE;
1026 }
1027 #endif
1028 /*
1029 * Stop any further interrupts.
1030 */
1031 nxge_remove_intrs(nxgep);
1032
1033 /*
1034 * Stop the device and free resources.
1035 */
1036 if (!isLDOMguest(nxgep)) {
1037 nxge_destroy_dev(nxgep);
1038 }
1039
1040 /*
1041 * Tear down the ndd parameters setup.
1042 */
1043 nxge_destroy_param(nxgep);
1044
1045 /*
1046 * Tear down the kstat setup.
1047 */
1048 nxge_destroy_kstats(nxgep);
1049
1050 /*
1051 * Free any memory allocated for PHY properties
1052 */
1053 if (nxgep->phy_prop.cnt > 0) {
1054 KMEM_FREE(nxgep->phy_prop.arr,
1055 sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt);
1056 nxgep->phy_prop.cnt = 0;
1057 }
1058
1059 /*
1060 * Destroy all mutexes.
1061 */
1062 nxge_destroy_mutexes(nxgep);
1063
1064 /*
1065 * Remove the list of ndd parameters which
1066 * were setup during attach.
1067 */
1068 if (nxgep->dip) {
1069 NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1070 " nxge_unattach: remove all properties"));
1071
1072 (void) ddi_prop_remove_all(nxgep->dip);
1073 }
1074
1075 #if NXGE_PROPERTY
1076 nxge_remove_hard_properties(nxgep);
1077 #endif
1078
1079 /*
1080 * Unmap the register setup.
1081 */
1082 nxge_unmap_regs(nxgep);
1083
1084 nxge_fm_fini(nxgep);
1085
1086 ddi_soft_state_free(nxge_list, nxgep->instance);
1087
1088 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1089 }
1090
1091 #if defined(sun4v)
1092 int
nxge_hsvc_register(nxge_t * nxgep)1093 nxge_hsvc_register(nxge_t *nxgep)
1094 {
1095 nxge_status_t status;
1096 int i, j;
1097
1098 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register"));
1099 if (nxgep->niu_type != N2_NIU) {
1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register"));
1101 return (DDI_SUCCESS);
1102 }
1103
1104 /*
1105 * Currently, the NIU Hypervisor API supports two major versions:
1106 * version 1 and 2.
1107 * If Hypervisor introduces a higher major or minor version,
1108 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
1109 */
1110 nxgep->niu_hsvc_available = B_FALSE;
1111 bcopy(&niu_hsvc, &nxgep->niu_hsvc,
1112 sizeof (hsvc_info_t));
1113
1114 for (i = NIU_MAJOR_HI; i > 0; i--) {
1115 nxgep->niu_hsvc.hsvc_major = i;
1116 for (j = NIU_MINOR_HI; j >= 0; j--) {
1117 nxgep->niu_hsvc.hsvc_minor = j;
1118 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1119 "nxge_hsvc_register: %s: negotiating "
1120 "hypervisor services revision %d "
1121 "group: 0x%lx major: 0x%lx "
1122 "minor: 0x%lx",
1123 nxgep->niu_hsvc.hsvc_modname,
1124 nxgep->niu_hsvc.hsvc_rev,
1125 nxgep->niu_hsvc.hsvc_group,
1126 nxgep->niu_hsvc.hsvc_major,
1127 nxgep->niu_hsvc.hsvc_minor,
1128 nxgep->niu_min_ver));
1129
1130 if ((status = hsvc_register(&nxgep->niu_hsvc,
1131 &nxgep->niu_min_ver)) == 0) {
1132 /* Use the supported minor */
1133 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver;
1134 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1135 "nxge_hsvc_register: %s: negotiated "
1136 "hypervisor services revision %d "
1137 "group: 0x%lx major: 0x%lx "
1138 "minor: 0x%lx (niu_min_ver 0x%lx)",
1139 nxgep->niu_hsvc.hsvc_modname,
1140 nxgep->niu_hsvc.hsvc_rev,
1141 nxgep->niu_hsvc.hsvc_group,
1142 nxgep->niu_hsvc.hsvc_major,
1143 nxgep->niu_hsvc.hsvc_minor,
1144 nxgep->niu_min_ver));
1145
1146 nxgep->niu_hsvc_available = B_TRUE;
1147 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1148 "<== nxge_hsvc_register: "
1149 "NIU Hypervisor service enabled"));
1150 return (DDI_SUCCESS);
1151 }
1152
1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1154 "nxge_hsvc_register: %s: negotiated failed - "
1155 "try lower major number "
1156 "hypervisor services revision %d "
1157 "group: 0x%lx major: 0x%lx minor: 0x%lx "
1158 "errno: %d",
1159 nxgep->niu_hsvc.hsvc_modname,
1160 nxgep->niu_hsvc.hsvc_rev,
1161 nxgep->niu_hsvc.hsvc_group,
1162 nxgep->niu_hsvc.hsvc_major,
1163 nxgep->niu_hsvc.hsvc_minor, status));
1164 }
1165 }
1166
1167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1168 "nxge_hsvc_register: %s: cannot negotiate "
1169 "hypervisor services revision %d group: 0x%lx "
1170 "major: 0x%lx minor: 0x%lx errno: %d",
1171 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1172 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1173 niu_hsvc.hsvc_minor, status));
1174
1175 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1176 "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
1177
1178 return (DDI_FAILURE);
1179 }
1180 #endif
1181
1182 static char n2_siu_name[] = "niu";
1183
1184 static nxge_status_t
nxge_map_regs(p_nxge_t nxgep)1185 nxge_map_regs(p_nxge_t nxgep)
1186 {
1187 int ddi_status = DDI_SUCCESS;
1188 p_dev_regs_t dev_regs;
1189 char buf[MAXPATHLEN + 1];
1190 char *devname;
1191 #ifdef NXGE_DEBUG
1192 char *sysname;
1193 #endif
1194 off_t regsize;
1195 nxge_status_t status = NXGE_OK;
1196 #if !defined(_BIG_ENDIAN)
1197 off_t pci_offset;
1198 uint16_t pcie_devctl;
1199 #endif
1200
1201 if (isLDOMguest(nxgep)) {
1202 return (nxge_guest_regs_map(nxgep));
1203 }
1204
1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1206 nxgep->dev_regs = NULL;
1207 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1208 dev_regs->nxge_regh = NULL;
1209 dev_regs->nxge_pciregh = NULL;
1210 dev_regs->nxge_msix_regh = NULL;
1211 dev_regs->nxge_vir_regh = NULL;
1212 dev_regs->nxge_vir2_regh = NULL;
1213 nxgep->niu_type = NIU_TYPE_NONE;
1214
1215 devname = ddi_pathname(nxgep->dip, buf);
1216 ASSERT(strlen(devname) > 0);
1217 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1218 "nxge_map_regs: pathname devname %s", devname));
1219
1220 /*
1221 * The driver is running on a N2-NIU system if devname is something
1222 * like "/niu@80/network@0"
1223 */
1224 if (strstr(devname, n2_siu_name)) {
1225 /* N2/NIU */
1226 nxgep->niu_type = N2_NIU;
1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1228 "nxge_map_regs: N2/NIU devname %s", devname));
1229 /*
1230 * Get function number:
1231 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
1232 */
1233 nxgep->function_num =
1234 (devname[strlen(devname) -1] == '1' ? 1 : 0);
1235 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1236 "nxge_map_regs: N2/NIU function number %d",
1237 nxgep->function_num));
1238 } else {
1239 int *prop_val;
1240 uint_t prop_len;
1241 uint8_t func_num;
1242
1243 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1244 0, "reg",
1245 &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1246 NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1247 "Reg property not found"));
1248 ddi_status = DDI_FAILURE;
1249 goto nxge_map_regs_fail0;
1250
1251 } else {
1252 func_num = (prop_val[0] >> 8) & 0x7;
1253 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1254 "Reg property found: fun # %d",
1255 func_num));
1256 nxgep->function_num = func_num;
1257 if (isLDOMguest(nxgep)) {
1258 nxgep->function_num /= 2;
1259 return (NXGE_OK);
1260 }
1261 ddi_prop_free(prop_val);
1262 }
1263 }
1264
1265 switch (nxgep->niu_type) {
1266 default:
1267 (void) ddi_dev_regsize(nxgep->dip, 0, ®size);
1268 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1269 "nxge_map_regs: pci config size 0x%x", regsize));
1270
1271 ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1272 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1273 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1274 if (ddi_status != DDI_SUCCESS) {
1275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1276 "ddi_map_regs, nxge bus config regs failed"));
1277 goto nxge_map_regs_fail0;
1278 }
1279 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1280 "nxge_map_reg: PCI config addr 0x%0llx "
1281 " handle 0x%0llx", dev_regs->nxge_pciregp,
1282 dev_regs->nxge_pciregh));
1283 /*
1284 * IMP IMP
1285 * workaround for bit swapping bug in HW
1286 * which ends up in no-snoop = yes
1287 * resulting, in DMA not synched properly
1288 */
1289 #if !defined(_BIG_ENDIAN)
1290 /* workarounds for x86 systems */
1291 pci_offset = 0x80 + PCIE_DEVCTL;
1292 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh,
1293 pci_offset);
1294 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP;
1295 pcie_devctl |= PCIE_DEVCTL_RO_EN;
1296 pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1297 pcie_devctl);
1298 #endif
1299
1300 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1301 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1302 "nxge_map_regs: pio size 0x%x", regsize));
1303 /* set up the device mapped register */
1304 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1305 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1306 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1307 if (ddi_status != DDI_SUCCESS) {
1308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1309 "ddi_map_regs for Neptune global reg failed"));
1310 goto nxge_map_regs_fail1;
1311 }
1312
1313 /* set up the msi/msi-x mapped register */
1314 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1315 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1316 "nxge_map_regs: msix size 0x%x", regsize));
1317 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1318 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1319 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1320 if (ddi_status != DDI_SUCCESS) {
1321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1322 "ddi_map_regs for msi reg failed"));
1323 goto nxge_map_regs_fail2;
1324 }
1325
1326 /* set up the vio region mapped register */
1327 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1328 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1329 "nxge_map_regs: vio size 0x%x", regsize));
1330 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1331 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1332 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1333
1334 if (ddi_status != DDI_SUCCESS) {
1335 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1336 "ddi_map_regs for nxge vio reg failed"));
1337 goto nxge_map_regs_fail3;
1338 }
1339 nxgep->dev_regs = dev_regs;
1340
1341 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1342 NPI_PCI_ADD_HANDLE_SET(nxgep,
1343 (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1344 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1345 NPI_MSI_ADD_HANDLE_SET(nxgep,
1346 (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1347
1348 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1349 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1350
1351 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1352 NPI_REG_ADD_HANDLE_SET(nxgep,
1353 (npi_reg_ptr_t)dev_regs->nxge_regp);
1354
1355 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1356 NPI_VREG_ADD_HANDLE_SET(nxgep,
1357 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1358
1359 break;
1360
1361 case N2_NIU:
1362 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1363 /*
1364 * Set up the device mapped register (FWARC 2006/556)
1365 * (changed back to 1: reg starts at 1!)
1366 */
1367 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1368 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1369 "nxge_map_regs: dev size 0x%x", regsize));
1370 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1371 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1372 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1373
1374 if (ddi_status != DDI_SUCCESS) {
1375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1376 "ddi_map_regs for N2/NIU, global reg failed "));
1377 goto nxge_map_regs_fail1;
1378 }
1379
1380 /* set up the first vio region mapped register */
1381 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1383 "nxge_map_regs: vio (1) size 0x%x", regsize));
1384 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1385 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1386 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1387
1388 if (ddi_status != DDI_SUCCESS) {
1389 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1390 "ddi_map_regs for nxge vio reg failed"));
1391 goto nxge_map_regs_fail2;
1392 }
1393 /* set up the second vio region mapped register */
1394 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1395 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1396 "nxge_map_regs: vio (3) size 0x%x", regsize));
1397 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1398 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1399 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1400
1401 if (ddi_status != DDI_SUCCESS) {
1402 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1403 "ddi_map_regs for nxge vio2 reg failed"));
1404 goto nxge_map_regs_fail3;
1405 }
1406 nxgep->dev_regs = dev_regs;
1407
1408 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1409 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1410
1411 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1412 NPI_REG_ADD_HANDLE_SET(nxgep,
1413 (npi_reg_ptr_t)dev_regs->nxge_regp);
1414
1415 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1416 NPI_VREG_ADD_HANDLE_SET(nxgep,
1417 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1418
1419 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1420 NPI_V2REG_ADD_HANDLE_SET(nxgep,
1421 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1422
1423 break;
1424 }
1425
1426 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1427 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1428
1429 goto nxge_map_regs_exit;
1430 nxge_map_regs_fail3:
1431 if (dev_regs->nxge_msix_regh) {
1432 ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1433 }
1434 if (dev_regs->nxge_vir_regh) {
1435 ddi_regs_map_free(&dev_regs->nxge_regh);
1436 }
1437 nxge_map_regs_fail2:
1438 if (dev_regs->nxge_regh) {
1439 ddi_regs_map_free(&dev_regs->nxge_regh);
1440 }
1441 nxge_map_regs_fail1:
1442 if (dev_regs->nxge_pciregh) {
1443 ddi_regs_map_free(&dev_regs->nxge_pciregh);
1444 }
1445 nxge_map_regs_fail0:
1446 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1447 kmem_free(dev_regs, sizeof (dev_regs_t));
1448
1449 nxge_map_regs_exit:
1450 if (ddi_status != DDI_SUCCESS)
1451 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1452 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1453 return (status);
1454 }
1455
1456 static void
nxge_unmap_regs(p_nxge_t nxgep)1457 nxge_unmap_regs(p_nxge_t nxgep)
1458 {
1459 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1460
1461 if (isLDOMguest(nxgep)) {
1462 nxge_guest_regs_map_free(nxgep);
1463 return;
1464 }
1465
1466 if (nxgep->dev_regs) {
1467 if (nxgep->dev_regs->nxge_pciregh) {
1468 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1469 "==> nxge_unmap_regs: bus"));
1470 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1471 nxgep->dev_regs->nxge_pciregh = NULL;
1472 }
1473 if (nxgep->dev_regs->nxge_regh) {
1474 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1475 "==> nxge_unmap_regs: device registers"));
1476 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1477 nxgep->dev_regs->nxge_regh = NULL;
1478 }
1479 if (nxgep->dev_regs->nxge_msix_regh) {
1480 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1481 "==> nxge_unmap_regs: device interrupts"));
1482 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1483 nxgep->dev_regs->nxge_msix_regh = NULL;
1484 }
1485 if (nxgep->dev_regs->nxge_vir_regh) {
1486 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1487 "==> nxge_unmap_regs: vio region"));
1488 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1489 nxgep->dev_regs->nxge_vir_regh = NULL;
1490 }
1491 if (nxgep->dev_regs->nxge_vir2_regh) {
1492 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1493 "==> nxge_unmap_regs: vio2 region"));
1494 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1495 nxgep->dev_regs->nxge_vir2_regh = NULL;
1496 }
1497
1498 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1499 nxgep->dev_regs = NULL;
1500 }
1501
1502 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1503 }
1504
1505 static nxge_status_t
nxge_setup_mutexes(p_nxge_t nxgep)1506 nxge_setup_mutexes(p_nxge_t nxgep)
1507 {
1508 int ddi_status = DDI_SUCCESS;
1509 nxge_status_t status = NXGE_OK;
1510 nxge_classify_t *classify_ptr;
1511 int partition;
1512
1513 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1514
1515 /*
1516 * Get the interrupt cookie so the mutexes can be
1517 * Initialized.
1518 */
1519 if (isLDOMguest(nxgep)) {
1520 nxgep->interrupt_cookie = 0;
1521 } else {
1522 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1523 &nxgep->interrupt_cookie);
1524
1525 if (ddi_status != DDI_SUCCESS) {
1526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1527 "<== nxge_setup_mutexes: failed 0x%x",
1528 ddi_status));
1529 goto nxge_setup_mutexes_exit;
1530 }
1531 }
1532
1533 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1534 MUTEX_INIT(&nxgep->poll_lock, NULL,
1535 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1536
1537 /*
1538 * Initialize mutexes for this device.
1539 */
1540 MUTEX_INIT(nxgep->genlock, NULL,
1541 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1542 MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1543 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1544 MUTEX_INIT(&nxgep->mif_lock, NULL,
1545 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1546 MUTEX_INIT(&nxgep->group_lock, NULL,
1547 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1548 RW_INIT(&nxgep->filter_lock, NULL,
1549 RW_DRIVER, (void *)nxgep->interrupt_cookie);
1550
1551 classify_ptr = &nxgep->classifier;
1552 /*
1553 * FFLP Mutexes are never used in interrupt context
1554 * as fflp operation can take very long time to
1555 * complete and hence not suitable to invoke from interrupt
1556 * handlers.
1557 */
1558 MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1559 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1560 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1561 MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1562 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1563 for (partition = 0; partition < MAX_PARTITION; partition++) {
1564 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1565 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1566 }
1567 }
1568
1569 nxge_setup_mutexes_exit:
1570 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1571 "<== nxge_setup_mutexes status = %x", status));
1572
1573 if (ddi_status != DDI_SUCCESS)
1574 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1575
1576 return (status);
1577 }
1578
1579 static void
nxge_destroy_mutexes(p_nxge_t nxgep)1580 nxge_destroy_mutexes(p_nxge_t nxgep)
1581 {
1582 int partition;
1583 nxge_classify_t *classify_ptr;
1584
1585 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1586 RW_DESTROY(&nxgep->filter_lock);
1587 MUTEX_DESTROY(&nxgep->group_lock);
1588 MUTEX_DESTROY(&nxgep->mif_lock);
1589 MUTEX_DESTROY(&nxgep->ouraddr_lock);
1590 MUTEX_DESTROY(nxgep->genlock);
1591
1592 classify_ptr = &nxgep->classifier;
1593 MUTEX_DESTROY(&classify_ptr->tcam_lock);
1594
1595 /* Destroy all polling resources. */
1596 MUTEX_DESTROY(&nxgep->poll_lock);
1597 cv_destroy(&nxgep->poll_cv);
1598
1599 /* free data structures, based on HW type */
1600 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1601 MUTEX_DESTROY(&classify_ptr->fcram_lock);
1602 for (partition = 0; partition < MAX_PARTITION; partition++) {
1603 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1604 }
1605 }
1606
1607 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1608 }
1609
1610 nxge_status_t
nxge_init(p_nxge_t nxgep)1611 nxge_init(p_nxge_t nxgep)
1612 {
1613 nxge_status_t status = NXGE_OK;
1614
1615 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1616
1617 if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1618 return (status);
1619 }
1620
1621 /*
1622 * Allocate system memory for the receive/transmit buffer blocks
1623 * and receive/transmit descriptor rings.
1624 */
1625 status = nxge_alloc_mem_pool(nxgep);
1626 if (status != NXGE_OK) {
1627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1628 goto nxge_init_fail1;
1629 }
1630
1631 if (!isLDOMguest(nxgep)) {
1632 /*
1633 * Initialize and enable the TXC registers.
1634 * (Globally enable the Tx controller,
1635 * enable the port, configure the dma channel bitmap,
1636 * configure the max burst size).
1637 */
1638 status = nxge_txc_init(nxgep);
1639 if (status != NXGE_OK) {
1640 NXGE_ERROR_MSG((nxgep,
1641 NXGE_ERR_CTL, "init txc failed\n"));
1642 goto nxge_init_fail2;
1643 }
1644 }
1645
1646 /*
1647 * Initialize and enable TXDMA channels.
1648 */
1649 status = nxge_init_txdma_channels(nxgep);
1650 if (status != NXGE_OK) {
1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1652 goto nxge_init_fail3;
1653 }
1654
1655 /*
1656 * Initialize and enable RXDMA channels.
1657 */
1658 status = nxge_init_rxdma_channels(nxgep);
1659 if (status != NXGE_OK) {
1660 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1661 goto nxge_init_fail4;
1662 }
1663
1664 /*
1665 * The guest domain is now done.
1666 */
1667 if (isLDOMguest(nxgep)) {
1668 nxgep->drv_state |= STATE_HW_INITIALIZED;
1669 goto nxge_init_exit;
1670 }
1671
1672 /*
1673 * Initialize TCAM and FCRAM (Neptune).
1674 */
1675 status = nxge_classify_init(nxgep);
1676 if (status != NXGE_OK) {
1677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1678 goto nxge_init_fail5;
1679 }
1680
1681 /*
1682 * Initialize ZCP
1683 */
1684 status = nxge_zcp_init(nxgep);
1685 if (status != NXGE_OK) {
1686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1687 goto nxge_init_fail5;
1688 }
1689
1690 /*
1691 * Initialize IPP.
1692 */
1693 status = nxge_ipp_init(nxgep);
1694 if (status != NXGE_OK) {
1695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1696 goto nxge_init_fail5;
1697 }
1698
1699 /*
1700 * Initialize the MAC block.
1701 */
1702 status = nxge_mac_init(nxgep);
1703 if (status != NXGE_OK) {
1704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1705 goto nxge_init_fail5;
1706 }
1707
1708 /*
1709 * Enable the interrrupts for DDI.
1710 */
1711 nxge_intrs_enable(nxgep);
1712
1713 nxgep->drv_state |= STATE_HW_INITIALIZED;
1714
1715 goto nxge_init_exit;
1716
1717 nxge_init_fail5:
1718 nxge_uninit_rxdma_channels(nxgep);
1719 nxge_init_fail4:
1720 nxge_uninit_txdma_channels(nxgep);
1721 nxge_init_fail3:
1722 if (!isLDOMguest(nxgep)) {
1723 (void) nxge_txc_uninit(nxgep);
1724 }
1725 nxge_init_fail2:
1726 nxge_free_mem_pool(nxgep);
1727 nxge_init_fail1:
1728 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1729 "<== nxge_init status (failed) = 0x%08x", status));
1730 return (status);
1731
1732 nxge_init_exit:
1733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1734 status));
1735 return (status);
1736 }
1737
1738
1739 timeout_id_t
nxge_start_timer(p_nxge_t nxgep,fptrv_t func,int msec)1740 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1741 {
1742 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1743 return (timeout(func, (caddr_t)nxgep,
1744 drv_usectohz(1000 * msec)));
1745 }
1746 return (NULL);
1747 }
1748
1749 /*ARGSUSED*/
1750 void
nxge_stop_timer(p_nxge_t nxgep,timeout_id_t timerid)1751 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1752 {
1753 if (timerid) {
1754 (void) untimeout(timerid);
1755 }
1756 }
1757
1758 void
nxge_uninit(p_nxge_t nxgep)1759 nxge_uninit(p_nxge_t nxgep)
1760 {
1761 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1762
1763 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1764 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1765 "==> nxge_uninit: not initialized"));
1766 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1767 "<== nxge_uninit"));
1768 return;
1769 }
1770
1771 if (!isLDOMguest(nxgep)) {
1772 /*
1773 * Reset the receive MAC side.
1774 */
1775 (void) nxge_rx_mac_disable(nxgep);
1776
1777 /*
1778 * Drain the IPP.
1779 */
1780 (void) nxge_ipp_drain(nxgep);
1781 }
1782
1783 /* stop timer */
1784 if (nxgep->nxge_timerid) {
1785 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1786 nxgep->nxge_timerid = 0;
1787 }
1788
1789 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1790 (void) nxge_intr_hw_disable(nxgep);
1791
1792
1793 /* Disable and soft reset the IPP */
1794 if (!isLDOMguest(nxgep))
1795 (void) nxge_ipp_disable(nxgep);
1796
1797 /* Free classification resources */
1798 (void) nxge_classify_uninit(nxgep);
1799
1800 /*
1801 * Reset the transmit/receive DMA side.
1802 */
1803 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1804 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1805
1806 nxge_uninit_txdma_channels(nxgep);
1807 nxge_uninit_rxdma_channels(nxgep);
1808
1809 /*
1810 * Reset the transmit MAC side.
1811 */
1812 (void) nxge_tx_mac_disable(nxgep);
1813
1814 nxge_free_mem_pool(nxgep);
1815
1816 /*
1817 * Start the timer if the reset flag is not set.
1818 * If this reset flag is set, the link monitor
1819 * will not be started in order to stop furthur bus
1820 * activities coming from this interface.
1821 * The driver will start the monitor function
1822 * if the interface was initialized again later.
1823 */
1824 if (!nxge_peu_reset_enable) {
1825 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1826 }
1827
1828 nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1829
1830 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1831 "nxge_mblks_pending %d", nxge_mblks_pending));
1832 }
1833
1834 void
nxge_get64(p_nxge_t nxgep,p_mblk_t mp)1835 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1836 {
1837 uint64_t reg;
1838 uint64_t regdata;
1839 int i, retry;
1840
1841 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t));
1842 regdata = 0;
1843 retry = 1;
1844
1845 for (i = 0; i < retry; i++) {
1846 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data);
1847 }
1848 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t));
1849 }
1850
1851 void
nxge_put64(p_nxge_t nxgep,p_mblk_t mp)1852 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1853 {
1854 uint64_t reg;
1855 uint64_t buf[2];
1856
1857 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1858 reg = buf[0];
1859
1860 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1861 }
1862
1863 /*ARGSUSED*/
1864 /*VARARGS*/
1865 void
nxge_debug_msg(p_nxge_t nxgep,uint64_t level,char * fmt,...)1866 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1867 {
1868 char msg_buffer[1048];
1869 char prefix_buffer[32];
1870 int instance;
1871 uint64_t debug_level;
1872 int cmn_level = CE_CONT;
1873 va_list ap;
1874
1875 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1876 /* In case a developer has changed nxge_debug_level. */
1877 if (nxgep->nxge_debug_level != nxge_debug_level)
1878 nxgep->nxge_debug_level = nxge_debug_level;
1879 }
1880
1881 debug_level = (nxgep == NULL) ? nxge_debug_level :
1882 nxgep->nxge_debug_level;
1883
1884 if ((level & debug_level) ||
1885 (level == NXGE_NOTE) ||
1886 (level == NXGE_ERR_CTL)) {
1887 /* do the msg processing */
1888 MUTEX_ENTER(&nxgedebuglock);
1889
1890 if ((level & NXGE_NOTE)) {
1891 cmn_level = CE_NOTE;
1892 }
1893
1894 if (level & NXGE_ERR_CTL) {
1895 cmn_level = CE_WARN;
1896 }
1897
1898 va_start(ap, fmt);
1899 (void) vsprintf(msg_buffer, fmt, ap);
1900 va_end(ap);
1901 if (nxgep == NULL) {
1902 instance = -1;
1903 (void) sprintf(prefix_buffer, "%s :", "nxge");
1904 } else {
1905 instance = nxgep->instance;
1906 (void) sprintf(prefix_buffer,
1907 "%s%d :", "nxge", instance);
1908 }
1909
1910 MUTEX_EXIT(&nxgedebuglock);
1911 cmn_err(cmn_level, "!%s %s\n",
1912 prefix_buffer, msg_buffer);
1913
1914 }
1915 }
1916
1917 char *
nxge_dump_packet(char * addr,int size)1918 nxge_dump_packet(char *addr, int size)
1919 {
1920 uchar_t *ap = (uchar_t *)addr;
1921 int i;
1922 static char etherbuf[1024];
1923 char *cp = etherbuf;
1924 char digits[] = "0123456789abcdef";
1925
1926 if (!size)
1927 size = 60;
1928
1929 if (size > MAX_DUMP_SZ) {
1930 /* Dump the leading bytes */
1931 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1932 if (*ap > 0x0f)
1933 *cp++ = digits[*ap >> 4];
1934 *cp++ = digits[*ap++ & 0xf];
1935 *cp++ = ':';
1936 }
1937 for (i = 0; i < 20; i++)
1938 *cp++ = '.';
1939 /* Dump the last MAX_DUMP_SZ/2 bytes */
1940 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1941 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1942 if (*ap > 0x0f)
1943 *cp++ = digits[*ap >> 4];
1944 *cp++ = digits[*ap++ & 0xf];
1945 *cp++ = ':';
1946 }
1947 } else {
1948 for (i = 0; i < size; i++) {
1949 if (*ap > 0x0f)
1950 *cp++ = digits[*ap >> 4];
1951 *cp++ = digits[*ap++ & 0xf];
1952 *cp++ = ':';
1953 }
1954 }
1955 *--cp = 0;
1956 return (etherbuf);
1957 }
1958
1959 #ifdef NXGE_DEBUG
1960 static void
nxge_test_map_regs(p_nxge_t nxgep)1961 nxge_test_map_regs(p_nxge_t nxgep)
1962 {
1963 ddi_acc_handle_t cfg_handle;
1964 p_pci_cfg_t cfg_ptr;
1965 ddi_acc_handle_t dev_handle;
1966 char *dev_ptr;
1967 ddi_acc_handle_t pci_config_handle;
1968 uint32_t regval;
1969 int i;
1970
1971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1972
1973 dev_handle = nxgep->dev_regs->nxge_regh;
1974 dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1975
1976 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1977 cfg_handle = nxgep->dev_regs->nxge_pciregh;
1978 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1979
1980 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1981 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1982 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1983 "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1984 &cfg_ptr->vendorid));
1985 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1986 "\tvendorid 0x%x devid 0x%x",
1987 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1988 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0)));
1989 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1990 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1991 "bar1c 0x%x",
1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0),
1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1995 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1996 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1997 "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1998 "base 28 0x%x bar2c 0x%x\n",
1999 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
2000 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
2001 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
2002 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
2003 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2004 "\nNeptune PCI BAR: base30 0x%x\n",
2005 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
2006
2007 cfg_handle = nxgep->dev_regs->nxge_pciregh;
2008 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
2009 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2010 "first 0x%llx second 0x%llx third 0x%llx "
2011 "last 0x%llx ",
2012 NXGE_PIO_READ64(dev_handle,
2013 (uint64_t *)(dev_ptr + 0), 0),
2014 NXGE_PIO_READ64(dev_handle,
2015 (uint64_t *)(dev_ptr + 8), 0),
2016 NXGE_PIO_READ64(dev_handle,
2017 (uint64_t *)(dev_ptr + 16), 0),
2018 NXGE_PIO_READ64(cfg_handle,
2019 (uint64_t *)(dev_ptr + 24), 0)));
2020 }
2021 }
2022
2023 #endif
2024
2025 static void
nxge_suspend(p_nxge_t nxgep)2026 nxge_suspend(p_nxge_t nxgep)
2027 {
2028 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
2029
2030 nxge_intrs_disable(nxgep);
2031 nxge_destroy_dev(nxgep);
2032
2033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
2034 }
2035
2036 static nxge_status_t
nxge_resume(p_nxge_t nxgep)2037 nxge_resume(p_nxge_t nxgep)
2038 {
2039 nxge_status_t status = NXGE_OK;
2040
2041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
2042
2043 nxgep->suspended = DDI_RESUME;
2044 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
2045 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
2046 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
2047 (void) nxge_rx_mac_enable(nxgep);
2048 (void) nxge_tx_mac_enable(nxgep);
2049 nxge_intrs_enable(nxgep);
2050 nxgep->suspended = 0;
2051
2052 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2053 "<== nxge_resume status = 0x%x", status));
2054 return (status);
2055 }
2056
2057 static nxge_status_t
nxge_setup_dev(p_nxge_t nxgep)2058 nxge_setup_dev(p_nxge_t nxgep)
2059 {
2060 nxge_status_t status = NXGE_OK;
2061
2062 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
2063 nxgep->mac.portnum));
2064
2065 status = nxge_link_init(nxgep);
2066
2067 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
2068 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2069 "port%d Bad register acc handle", nxgep->mac.portnum));
2070 status = NXGE_ERROR;
2071 }
2072
2073 if (status != NXGE_OK) {
2074 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2075 " nxge_setup_dev status "
2076 "(xcvr init 0x%08x)", status));
2077 goto nxge_setup_dev_exit;
2078 }
2079
2080 nxge_setup_dev_exit:
2081 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2082 "<== nxge_setup_dev port %d status = 0x%08x",
2083 nxgep->mac.portnum, status));
2084
2085 return (status);
2086 }
2087
2088 static void
nxge_destroy_dev(p_nxge_t nxgep)2089 nxge_destroy_dev(p_nxge_t nxgep)
2090 {
2091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2092
2093 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2094
2095 (void) nxge_hw_stop(nxgep);
2096
2097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2098 }
2099
2100 static nxge_status_t
nxge_setup_system_dma_pages(p_nxge_t nxgep)2101 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2102 {
2103 int ddi_status = DDI_SUCCESS;
2104 uint_t count;
2105 ddi_dma_cookie_t cookie;
2106 uint_t iommu_pagesize;
2107 nxge_status_t status = NXGE_OK;
2108
2109 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2110 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2111 if (nxgep->niu_type != N2_NIU) {
2112 iommu_pagesize = dvma_pagesize(nxgep->dip);
2113 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2114 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2115 " default_block_size %d iommu_pagesize %d",
2116 nxgep->sys_page_sz,
2117 ddi_ptob(nxgep->dip, (ulong_t)1),
2118 nxgep->rx_default_block_size,
2119 iommu_pagesize));
2120
2121 if (iommu_pagesize != 0) {
2122 if (nxgep->sys_page_sz == iommu_pagesize) {
2123 if (iommu_pagesize > 0x4000)
2124 nxgep->sys_page_sz = 0x4000;
2125 } else {
2126 if (nxgep->sys_page_sz > iommu_pagesize)
2127 nxgep->sys_page_sz = iommu_pagesize;
2128 }
2129 }
2130 }
2131 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2132 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2133 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2134 "default_block_size %d page mask %d",
2135 nxgep->sys_page_sz,
2136 ddi_ptob(nxgep->dip, (ulong_t)1),
2137 nxgep->rx_default_block_size,
2138 nxgep->sys_page_mask));
2139
2140
2141 switch (nxgep->sys_page_sz) {
2142 default:
2143 nxgep->sys_page_sz = 0x1000;
2144 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2145 nxgep->rx_default_block_size = 0x1000;
2146 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2147 break;
2148 case 0x1000:
2149 nxgep->rx_default_block_size = 0x1000;
2150 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2151 break;
2152 case 0x2000:
2153 nxgep->rx_default_block_size = 0x2000;
2154 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2155 break;
2156 case 0x4000:
2157 nxgep->rx_default_block_size = 0x4000;
2158 nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2159 break;
2160 case 0x8000:
2161 nxgep->rx_default_block_size = 0x8000;
2162 nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2163 break;
2164 }
2165
2166 #ifndef USE_RX_BIG_BUF
2167 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2168 #else
2169 nxgep->rx_default_block_size = 0x2000;
2170 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2171 #endif
2172 /*
2173 * Get the system DMA burst size.
2174 */
2175 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2176 DDI_DMA_DONTWAIT, 0,
2177 &nxgep->dmasparehandle);
2178 if (ddi_status != DDI_SUCCESS) {
2179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2180 "ddi_dma_alloc_handle: failed "
2181 " status 0x%x", ddi_status));
2182 goto nxge_get_soft_properties_exit;
2183 }
2184
2185 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2186 (caddr_t)nxgep->dmasparehandle,
2187 sizeof (nxgep->dmasparehandle),
2188 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2189 DDI_DMA_DONTWAIT, 0,
2190 &cookie, &count);
2191 if (ddi_status != DDI_DMA_MAPPED) {
2192 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2193 "Binding spare handle to find system"
2194 " burstsize failed."));
2195 ddi_status = DDI_FAILURE;
2196 goto nxge_get_soft_properties_fail1;
2197 }
2198
2199 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2200 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2201
2202 nxge_get_soft_properties_fail1:
2203 ddi_dma_free_handle(&nxgep->dmasparehandle);
2204
2205 nxge_get_soft_properties_exit:
2206
2207 if (ddi_status != DDI_SUCCESS)
2208 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2209
2210 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2211 "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2212 return (status);
2213 }
2214
2215 static nxge_status_t
nxge_alloc_mem_pool(p_nxge_t nxgep)2216 nxge_alloc_mem_pool(p_nxge_t nxgep)
2217 {
2218 nxge_status_t status = NXGE_OK;
2219
2220 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2221
2222 status = nxge_alloc_rx_mem_pool(nxgep);
2223 if (status != NXGE_OK) {
2224 return (NXGE_ERROR);
2225 }
2226
2227 status = nxge_alloc_tx_mem_pool(nxgep);
2228 if (status != NXGE_OK) {
2229 nxge_free_rx_mem_pool(nxgep);
2230 return (NXGE_ERROR);
2231 }
2232
2233 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2234 return (NXGE_OK);
2235 }
2236
2237 static void
nxge_free_mem_pool(p_nxge_t nxgep)2238 nxge_free_mem_pool(p_nxge_t nxgep)
2239 {
2240 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2241
2242 nxge_free_rx_mem_pool(nxgep);
2243 nxge_free_tx_mem_pool(nxgep);
2244
2245 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2246 }
2247
2248 nxge_status_t
nxge_alloc_rx_mem_pool(p_nxge_t nxgep)2249 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2250 {
2251 uint32_t rdc_max;
2252 p_nxge_dma_pt_cfg_t p_all_cfgp;
2253 p_nxge_hw_pt_cfg_t p_cfgp;
2254 p_nxge_dma_pool_t dma_poolp;
2255 p_nxge_dma_common_t *dma_buf_p;
2256 p_nxge_dma_pool_t dma_cntl_poolp;
2257 p_nxge_dma_common_t *dma_cntl_p;
2258 uint32_t *num_chunks; /* per dma */
2259 nxge_status_t status = NXGE_OK;
2260
2261 uint32_t nxge_port_rbr_size;
2262 uint32_t nxge_port_rbr_spare_size;
2263 uint32_t nxge_port_rcr_size;
2264 uint32_t rx_cntl_alloc_size;
2265
2266 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2267
2268 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2269 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2270 rdc_max = NXGE_MAX_RDCS;
2271
2272 /*
2273 * Allocate memory for the common DMA data structures.
2274 */
2275 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2276 KM_SLEEP);
2277 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2278 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2279
2280 dma_cntl_poolp = (p_nxge_dma_pool_t)
2281 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2282 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2283 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2284
2285 num_chunks = (uint32_t *)KMEM_ZALLOC(
2286 sizeof (uint32_t) * rdc_max, KM_SLEEP);
2287
2288 /*
2289 * Assume that each DMA channel will be configured with
2290 * the default block size.
2291 * rbr block counts are modulo the batch count (16).
2292 */
2293 nxge_port_rbr_size = p_all_cfgp->rbr_size;
2294 nxge_port_rcr_size = p_all_cfgp->rcr_size;
2295
2296 if (!nxge_port_rbr_size) {
2297 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2298 }
2299 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2300 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2301 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2302 }
2303
2304 p_all_cfgp->rbr_size = nxge_port_rbr_size;
2305 nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2306
2307 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2308 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2309 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2310 }
2311 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2312 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2313 "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2314 "set to default %d",
2315 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2316 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2317 }
2318 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2319 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2320 "nxge_alloc_rx_mem_pool: RCR too high %d, "
2321 "set to default %d",
2322 nxge_port_rcr_size, RCR_DEFAULT_MAX));
2323 nxge_port_rcr_size = RCR_DEFAULT_MAX;
2324 }
2325
2326 /*
2327 * N2/NIU has limitation on the descriptor sizes (contiguous
2328 * memory allocation on data buffers to 4M (contig_mem_alloc)
2329 * and little endian for control buffers (must use the ddi/dki mem alloc
2330 * function).
2331 */
2332 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2333 if (nxgep->niu_type == N2_NIU) {
2334 nxge_port_rbr_spare_size = 0;
2335 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2336 (!ISP2(nxge_port_rbr_size))) {
2337 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2338 }
2339 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2340 (!ISP2(nxge_port_rcr_size))) {
2341 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2342 }
2343 }
2344 #endif
2345
2346 /*
2347 * Addresses of receive block ring, receive completion ring and the
2348 * mailbox must be all cache-aligned (64 bytes).
2349 */
2350 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2351 rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2352 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2353 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2354
2355 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2356 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2357 "nxge_port_rcr_size = %d "
2358 "rx_cntl_alloc_size = %d",
2359 nxge_port_rbr_size, nxge_port_rbr_spare_size,
2360 nxge_port_rcr_size,
2361 rx_cntl_alloc_size));
2362
2363 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2364 if (nxgep->niu_type == N2_NIU) {
2365 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2366 (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2367
2368 if (!ISP2(rx_buf_alloc_size)) {
2369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2370 "==> nxge_alloc_rx_mem_pool: "
2371 " must be power of 2"));
2372 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2373 goto nxge_alloc_rx_mem_pool_exit;
2374 }
2375
2376 if (rx_buf_alloc_size > (1 << 22)) {
2377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2378 "==> nxge_alloc_rx_mem_pool: "
2379 " limit size to 4M"));
2380 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2381 goto nxge_alloc_rx_mem_pool_exit;
2382 }
2383
2384 if (rx_cntl_alloc_size < 0x2000) {
2385 rx_cntl_alloc_size = 0x2000;
2386 }
2387 }
2388 #endif
2389 nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2390 nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2391 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2392 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2393
2394 dma_poolp->ndmas = p_cfgp->max_rdcs;
2395 dma_poolp->num_chunks = num_chunks;
2396 dma_poolp->buf_allocated = B_TRUE;
2397 nxgep->rx_buf_pool_p = dma_poolp;
2398 dma_poolp->dma_buf_pool_p = dma_buf_p;
2399
2400 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2401 dma_cntl_poolp->buf_allocated = B_TRUE;
2402 nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2403 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2404
2405 /* Allocate the receive rings, too. */
2406 nxgep->rx_rbr_rings =
2407 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2408 nxgep->rx_rbr_rings->rbr_rings =
2409 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2410 nxgep->rx_rcr_rings =
2411 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2412 nxgep->rx_rcr_rings->rcr_rings =
2413 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2414 nxgep->rx_mbox_areas_p =
2415 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2416 nxgep->rx_mbox_areas_p->rxmbox_areas =
2417 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2418
2419 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2420 p_cfgp->max_rdcs;
2421
2422 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2423 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2424
2425 nxge_alloc_rx_mem_pool_exit:
2426 return (status);
2427 }
2428
2429 /*
2430 * nxge_alloc_rxb
2431 *
2432 * Allocate buffers for an RDC.
2433 *
2434 * Arguments:
2435 * nxgep
2436 * channel The channel to map into our kernel space.
2437 *
2438 * Notes:
2439 *
2440 * NPI function calls:
2441 *
2442 * NXGE function calls:
2443 *
2444 * Registers accessed:
2445 *
2446 * Context:
2447 *
2448 * Taking apart:
2449 *
2450 * Open questions:
2451 *
2452 */
2453 nxge_status_t
nxge_alloc_rxb(p_nxge_t nxgep,int channel)2454 nxge_alloc_rxb(
2455 p_nxge_t nxgep,
2456 int channel)
2457 {
2458 size_t rx_buf_alloc_size;
2459 nxge_status_t status = NXGE_OK;
2460
2461 nxge_dma_common_t **data;
2462 nxge_dma_common_t **control;
2463 uint32_t *num_chunks;
2464
2465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2466
2467 /*
2468 * Allocate memory for the receive buffers and descriptor rings.
2469 * Replace these allocation functions with the interface functions
2470 * provided by the partition manager if/when they are available.
2471 */
2472
2473 /*
2474 * Allocate memory for the receive buffer blocks.
2475 */
2476 rx_buf_alloc_size = (nxgep->rx_default_block_size *
2477 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2478
2479 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2480 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2481
2482 if ((status = nxge_alloc_rx_buf_dma(
2483 nxgep, channel, data, rx_buf_alloc_size,
2484 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2485 return (status);
2486 }
2487
2488 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2489 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2490
2491 /*
2492 * Allocate memory for descriptor rings and mailbox.
2493 */
2494 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2495
2496 if ((status = nxge_alloc_rx_cntl_dma(
2497 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2498 != NXGE_OK) {
2499 nxge_free_rx_cntl_dma(nxgep, *control);
2500 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2501 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2502 return (status);
2503 }
2504
2505 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2506 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2507
2508 return (status);
2509 }
2510
2511 void
nxge_free_rxb(p_nxge_t nxgep,int channel)2512 nxge_free_rxb(
2513 p_nxge_t nxgep,
2514 int channel)
2515 {
2516 nxge_dma_common_t *data;
2517 nxge_dma_common_t *control;
2518 uint32_t num_chunks;
2519
2520 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2521
2522 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2523 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2524 nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2525
2526 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2527 nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2528
2529 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2530 nxge_free_rx_cntl_dma(nxgep, control);
2531
2532 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2533
2534 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2535 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2536
2537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2538 }
2539
2540 static void
nxge_free_rx_mem_pool(p_nxge_t nxgep)2541 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2542 {
2543 int rdc_max = NXGE_MAX_RDCS;
2544
2545 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2546
2547 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2548 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2549 "<== nxge_free_rx_mem_pool "
2550 "(null rx buf pool or buf not allocated"));
2551 return;
2552 }
2553 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2554 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2555 "<== nxge_free_rx_mem_pool "
2556 "(null rx cntl buf pool or cntl buf not allocated"));
2557 return;
2558 }
2559
2560 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2561 sizeof (p_nxge_dma_common_t) * rdc_max);
2562 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2563
2564 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2565 sizeof (uint32_t) * rdc_max);
2566 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2567 sizeof (p_nxge_dma_common_t) * rdc_max);
2568 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2569
2570 nxgep->rx_buf_pool_p = 0;
2571 nxgep->rx_cntl_pool_p = 0;
2572
2573 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2574 sizeof (p_rx_rbr_ring_t) * rdc_max);
2575 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2576 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2577 sizeof (p_rx_rcr_ring_t) * rdc_max);
2578 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2579 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2580 sizeof (p_rx_mbox_t) * rdc_max);
2581 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2582
2583 nxgep->rx_rbr_rings = 0;
2584 nxgep->rx_rcr_rings = 0;
2585 nxgep->rx_mbox_areas_p = 0;
2586
2587 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2588 }
2589
2590
2591 static nxge_status_t
nxge_alloc_rx_buf_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2592 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2593 p_nxge_dma_common_t *dmap,
2594 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2595 {
2596 p_nxge_dma_common_t rx_dmap;
2597 nxge_status_t status = NXGE_OK;
2598 size_t total_alloc_size;
2599 size_t allocated = 0;
2600 int i, size_index, array_size;
2601 boolean_t use_kmem_alloc = B_FALSE;
2602
2603 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2604
2605 rx_dmap = (p_nxge_dma_common_t)
2606 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2607 KM_SLEEP);
2608
2609 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2610 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2611 dma_channel, alloc_size, block_size, dmap));
2612
2613 total_alloc_size = alloc_size;
2614
2615 #if defined(RX_USE_RECLAIM_POST)
2616 total_alloc_size = alloc_size + alloc_size/4;
2617 #endif
2618
2619 i = 0;
2620 size_index = 0;
2621 array_size = sizeof (alloc_sizes)/sizeof (size_t);
2622 while ((size_index < array_size) &&
2623 (alloc_sizes[size_index] < alloc_size))
2624 size_index++;
2625 if (size_index >= array_size) {
2626 size_index = array_size - 1;
2627 }
2628
2629 /* For Neptune, use kmem_alloc if the kmem flag is set. */
2630 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2631 use_kmem_alloc = B_TRUE;
2632 #if defined(__i386) || defined(__amd64)
2633 size_index = 0;
2634 #endif
2635 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2636 "==> nxge_alloc_rx_buf_dma: "
2637 "Neptune use kmem_alloc() - size_index %d",
2638 size_index));
2639 }
2640
2641 while ((allocated < total_alloc_size) &&
2642 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2643 rx_dmap[i].dma_chunk_index = i;
2644 rx_dmap[i].block_size = block_size;
2645 rx_dmap[i].alength = alloc_sizes[size_index];
2646 rx_dmap[i].orig_alength = rx_dmap[i].alength;
2647 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2648 rx_dmap[i].dma_channel = dma_channel;
2649 rx_dmap[i].contig_alloc_type = B_FALSE;
2650 rx_dmap[i].kmem_alloc_type = B_FALSE;
2651 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2652
2653 /*
2654 * N2/NIU: data buffers must be contiguous as the driver
2655 * needs to call Hypervisor api to set up
2656 * logical pages.
2657 */
2658 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2659 rx_dmap[i].contig_alloc_type = B_TRUE;
2660 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2661 } else if (use_kmem_alloc) {
2662 /* For Neptune, use kmem_alloc */
2663 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2664 "==> nxge_alloc_rx_buf_dma: "
2665 "Neptune use kmem_alloc()"));
2666 rx_dmap[i].kmem_alloc_type = B_TRUE;
2667 rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2668 }
2669
2670 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2671 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2672 "i %d nblocks %d alength %d",
2673 dma_channel, i, &rx_dmap[i], block_size,
2674 i, rx_dmap[i].nblocks,
2675 rx_dmap[i].alength));
2676 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2677 &nxge_rx_dma_attr,
2678 rx_dmap[i].alength,
2679 &nxge_dev_buf_dma_acc_attr,
2680 DDI_DMA_READ | DDI_DMA_STREAMING,
2681 (p_nxge_dma_common_t)(&rx_dmap[i]));
2682 if (status != NXGE_OK) {
2683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2684 "nxge_alloc_rx_buf_dma: Alloc Failed: "
2685 "dma %d size_index %d size requested %d",
2686 dma_channel,
2687 size_index,
2688 rx_dmap[i].alength));
2689 size_index--;
2690 } else {
2691 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2692 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2693 " nxge_alloc_rx_buf_dma DONE alloc mem: "
2694 "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2695 "buf_alloc_state %d alloc_type %d",
2696 dma_channel,
2697 &rx_dmap[i],
2698 rx_dmap[i].kaddrp,
2699 rx_dmap[i].alength,
2700 rx_dmap[i].buf_alloc_state,
2701 rx_dmap[i].buf_alloc_type));
2702 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2703 " alloc_rx_buf_dma allocated rdc %d "
2704 "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2705 dma_channel, i, rx_dmap[i].alength,
2706 rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2707 rx_dmap[i].kaddrp));
2708 i++;
2709 allocated += alloc_sizes[size_index];
2710 }
2711 }
2712
2713 if (allocated < total_alloc_size) {
2714 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2715 "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2716 "allocated 0x%x requested 0x%x",
2717 dma_channel,
2718 allocated, total_alloc_size));
2719 status = NXGE_ERROR;
2720 goto nxge_alloc_rx_mem_fail1;
2721 }
2722
2723 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2724 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2725 "allocated 0x%x requested 0x%x",
2726 dma_channel,
2727 allocated, total_alloc_size));
2728
2729 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2730 " alloc_rx_buf_dma rdc %d allocated %d chunks",
2731 dma_channel, i));
2732 *num_chunks = i;
2733 *dmap = rx_dmap;
2734
2735 goto nxge_alloc_rx_mem_exit;
2736
2737 nxge_alloc_rx_mem_fail1:
2738 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2739
2740 nxge_alloc_rx_mem_exit:
2741 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2742 "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2743
2744 return (status);
2745 }
2746
2747 /*ARGSUSED*/
2748 static void
nxge_free_rx_buf_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap,uint32_t num_chunks)2749 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2750 uint32_t num_chunks)
2751 {
2752 int i;
2753
2754 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2755 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2756
2757 if (dmap == 0)
2758 return;
2759
2760 for (i = 0; i < num_chunks; i++) {
2761 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2762 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2763 i, dmap));
2764 nxge_dma_free_rx_data_buf(dmap++);
2765 }
2766
2767 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2768 }
2769
2770 /*ARGSUSED*/
2771 static nxge_status_t
nxge_alloc_rx_cntl_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t size)2772 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2773 p_nxge_dma_common_t *dmap, size_t size)
2774 {
2775 p_nxge_dma_common_t rx_dmap;
2776 nxge_status_t status = NXGE_OK;
2777
2778 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2779
2780 rx_dmap = (p_nxge_dma_common_t)
2781 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2782
2783 rx_dmap->contig_alloc_type = B_FALSE;
2784 rx_dmap->kmem_alloc_type = B_FALSE;
2785
2786 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2787 &nxge_desc_dma_attr,
2788 size,
2789 &nxge_dev_desc_dma_acc_attr,
2790 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2791 rx_dmap);
2792 if (status != NXGE_OK) {
2793 goto nxge_alloc_rx_cntl_dma_fail1;
2794 }
2795
2796 *dmap = rx_dmap;
2797 goto nxge_alloc_rx_cntl_dma_exit;
2798
2799 nxge_alloc_rx_cntl_dma_fail1:
2800 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2801
2802 nxge_alloc_rx_cntl_dma_exit:
2803 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2804 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2805
2806 return (status);
2807 }
2808
2809 /*ARGSUSED*/
2810 static void
nxge_free_rx_cntl_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap)2811 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2812 {
2813 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2814
2815 if (dmap == 0)
2816 return;
2817
2818 nxge_dma_mem_free(dmap);
2819
2820 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2821 }
2822
2823 typedef struct {
2824 size_t tx_size;
2825 size_t cr_size;
2826 size_t threshhold;
2827 } nxge_tdc_sizes_t;
2828
2829 static
2830 nxge_status_t
nxge_tdc_sizes(nxge_t * nxgep,nxge_tdc_sizes_t * sizes)2831 nxge_tdc_sizes(
2832 nxge_t *nxgep,
2833 nxge_tdc_sizes_t *sizes)
2834 {
2835 uint32_t threshhold; /* The bcopy() threshhold */
2836 size_t tx_size; /* Transmit buffer size */
2837 size_t cr_size; /* Completion ring size */
2838
2839 /*
2840 * Assume that each DMA channel will be configured with the
2841 * default transmit buffer size for copying transmit data.
2842 * (If a packet is bigger than this, it will not be copied.)
2843 */
2844 if (nxgep->niu_type == N2_NIU) {
2845 threshhold = TX_BCOPY_SIZE;
2846 } else {
2847 threshhold = nxge_bcopy_thresh;
2848 }
2849 tx_size = nxge_tx_ring_size * threshhold;
2850
2851 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2852 cr_size += sizeof (txdma_mailbox_t);
2853
2854 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2855 if (nxgep->niu_type == N2_NIU) {
2856 if (!ISP2(tx_size)) {
2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2858 "==> nxge_tdc_sizes: Tx size"
2859 " must be power of 2"));
2860 return (NXGE_ERROR);
2861 }
2862
2863 if (tx_size > (1 << 22)) {
2864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2865 "==> nxge_tdc_sizes: Tx size"
2866 " limited to 4M"));
2867 return (NXGE_ERROR);
2868 }
2869
2870 if (cr_size < 0x2000)
2871 cr_size = 0x2000;
2872 }
2873 #endif
2874
2875 sizes->threshhold = threshhold;
2876 sizes->tx_size = tx_size;
2877 sizes->cr_size = cr_size;
2878
2879 return (NXGE_OK);
2880 }
2881 /*
2882 * nxge_alloc_txb
2883 *
2884 * Allocate buffers for an TDC.
2885 *
2886 * Arguments:
2887 * nxgep
2888 * channel The channel to map into our kernel space.
2889 *
2890 * Notes:
2891 *
2892 * NPI function calls:
2893 *
2894 * NXGE function calls:
2895 *
2896 * Registers accessed:
2897 *
2898 * Context:
2899 *
2900 * Taking apart:
2901 *
2902 * Open questions:
2903 *
2904 */
2905 nxge_status_t
nxge_alloc_txb(p_nxge_t nxgep,int channel)2906 nxge_alloc_txb(
2907 p_nxge_t nxgep,
2908 int channel)
2909 {
2910 nxge_dma_common_t **dma_buf_p;
2911 nxge_dma_common_t **dma_cntl_p;
2912 uint32_t *num_chunks;
2913 nxge_status_t status = NXGE_OK;
2914
2915 nxge_tdc_sizes_t sizes;
2916
2917 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2918
2919 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2920 return (NXGE_ERROR);
2921
2922 /*
2923 * Allocate memory for transmit buffers and descriptor rings.
2924 * Replace these allocation functions with the interface functions
2925 * provided by the partition manager Real Soon Now.
2926 */
2927 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2928 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2929
2930 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2931
2932 /*
2933 * Allocate memory for transmit buffers and descriptor rings.
2934 * Replace allocation functions with interface functions provided
2935 * by the partition manager when it is available.
2936 *
2937 * Allocate memory for the transmit buffer pool.
2938 */
2939 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2940 "sizes: tx: %ld, cr:%ld, th:%ld",
2941 sizes.tx_size, sizes.cr_size, sizes.threshhold));
2942
2943 *num_chunks = 0;
2944 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2945 sizes.tx_size, sizes.threshhold, num_chunks);
2946 if (status != NXGE_OK) {
2947 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2948 return (status);
2949 }
2950
2951 /*
2952 * Allocate memory for descriptor rings and mailbox.
2953 */
2954 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2955 sizes.cr_size);
2956 if (status != NXGE_OK) {
2957 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2958 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2959 return (status);
2960 }
2961
2962 return (NXGE_OK);
2963 }
2964
2965 void
nxge_free_txb(p_nxge_t nxgep,int channel)2966 nxge_free_txb(
2967 p_nxge_t nxgep,
2968 int channel)
2969 {
2970 nxge_dma_common_t *data;
2971 nxge_dma_common_t *control;
2972 uint32_t num_chunks;
2973
2974 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2975
2976 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2977 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2978 nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2979
2980 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2981 nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2982
2983 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2984 nxge_free_tx_cntl_dma(nxgep, control);
2985
2986 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2987
2988 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2989 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2990
2991 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2992 }
2993
2994 /*
2995 * nxge_alloc_tx_mem_pool
2996 *
2997 * This function allocates all of the per-port TDC control data structures.
2998 * The per-channel (TDC) data structures are allocated when needed.
2999 *
3000 * Arguments:
3001 * nxgep
3002 *
3003 * Notes:
3004 *
3005 * Context:
3006 * Any domain
3007 */
3008 nxge_status_t
nxge_alloc_tx_mem_pool(p_nxge_t nxgep)3009 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
3010 {
3011 nxge_hw_pt_cfg_t *p_cfgp;
3012 nxge_dma_pool_t *dma_poolp;
3013 nxge_dma_common_t **dma_buf_p;
3014 nxge_dma_pool_t *dma_cntl_poolp;
3015 nxge_dma_common_t **dma_cntl_p;
3016 uint32_t *num_chunks; /* per dma */
3017 int tdc_max;
3018
3019 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
3020
3021 p_cfgp = &nxgep->pt_config.hw_config;
3022 tdc_max = NXGE_MAX_TDCS;
3023
3024 /*
3025 * Allocate memory for each transmit DMA channel.
3026 */
3027 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
3028 KM_SLEEP);
3029 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3030 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3031
3032 dma_cntl_poolp = (p_nxge_dma_pool_t)
3033 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
3034 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3035 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3036
3037 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
3038 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3039 "nxge_alloc_tx_mem_pool: TDC too high %d, "
3040 "set to default %d",
3041 nxge_tx_ring_size, TDC_DEFAULT_MAX));
3042 nxge_tx_ring_size = TDC_DEFAULT_MAX;
3043 }
3044
3045 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3046 /*
3047 * N2/NIU has limitation on the descriptor sizes (contiguous
3048 * memory allocation on data buffers to 4M (contig_mem_alloc)
3049 * and little endian for control buffers (must use the ddi/dki mem alloc
3050 * function). The transmit ring is limited to 8K (includes the
3051 * mailbox).
3052 */
3053 if (nxgep->niu_type == N2_NIU) {
3054 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
3055 (!ISP2(nxge_tx_ring_size))) {
3056 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
3057 }
3058 }
3059 #endif
3060
3061 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
3062
3063 num_chunks = (uint32_t *)KMEM_ZALLOC(
3064 sizeof (uint32_t) * tdc_max, KM_SLEEP);
3065
3066 dma_poolp->ndmas = p_cfgp->tdc.owned;
3067 dma_poolp->num_chunks = num_chunks;
3068 dma_poolp->dma_buf_pool_p = dma_buf_p;
3069 nxgep->tx_buf_pool_p = dma_poolp;
3070
3071 dma_poolp->buf_allocated = B_TRUE;
3072
3073 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
3074 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
3075 nxgep->tx_cntl_pool_p = dma_cntl_poolp;
3076
3077 dma_cntl_poolp->buf_allocated = B_TRUE;
3078
3079 nxgep->tx_rings =
3080 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
3081 nxgep->tx_rings->rings =
3082 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
3083 nxgep->tx_mbox_areas_p =
3084 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
3085 nxgep->tx_mbox_areas_p->txmbox_areas_p =
3086 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
3087
3088 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3089
3090 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3091 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3092 tdc_max, dma_poolp->ndmas));
3093
3094 return (NXGE_OK);
3095 }
3096
3097 nxge_status_t
nxge_alloc_tx_buf_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)3098 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3099 p_nxge_dma_common_t *dmap, size_t alloc_size,
3100 size_t block_size, uint32_t *num_chunks)
3101 {
3102 p_nxge_dma_common_t tx_dmap;
3103 nxge_status_t status = NXGE_OK;
3104 size_t total_alloc_size;
3105 size_t allocated = 0;
3106 int i, size_index, array_size;
3107
3108 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3109
3110 tx_dmap = (p_nxge_dma_common_t)
3111 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3112 KM_SLEEP);
3113
3114 total_alloc_size = alloc_size;
3115 i = 0;
3116 size_index = 0;
3117 array_size = sizeof (alloc_sizes) / sizeof (size_t);
3118 while ((size_index < array_size) &&
3119 (alloc_sizes[size_index] < alloc_size))
3120 size_index++;
3121 if (size_index >= array_size) {
3122 size_index = array_size - 1;
3123 }
3124
3125 while ((allocated < total_alloc_size) &&
3126 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3127
3128 tx_dmap[i].dma_chunk_index = i;
3129 tx_dmap[i].block_size = block_size;
3130 tx_dmap[i].alength = alloc_sizes[size_index];
3131 tx_dmap[i].orig_alength = tx_dmap[i].alength;
3132 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3133 tx_dmap[i].dma_channel = dma_channel;
3134 tx_dmap[i].contig_alloc_type = B_FALSE;
3135 tx_dmap[i].kmem_alloc_type = B_FALSE;
3136
3137 /*
3138 * N2/NIU: data buffers must be contiguous as the driver
3139 * needs to call Hypervisor api to set up
3140 * logical pages.
3141 */
3142 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3143 tx_dmap[i].contig_alloc_type = B_TRUE;
3144 }
3145
3146 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3147 &nxge_tx_dma_attr,
3148 tx_dmap[i].alength,
3149 &nxge_dev_buf_dma_acc_attr,
3150 DDI_DMA_WRITE | DDI_DMA_STREAMING,
3151 (p_nxge_dma_common_t)(&tx_dmap[i]));
3152 if (status != NXGE_OK) {
3153 size_index--;
3154 } else {
3155 i++;
3156 allocated += alloc_sizes[size_index];
3157 }
3158 }
3159
3160 if (allocated < total_alloc_size) {
3161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3162 "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3163 "allocated 0x%x requested 0x%x",
3164 dma_channel,
3165 allocated, total_alloc_size));
3166 status = NXGE_ERROR;
3167 goto nxge_alloc_tx_mem_fail1;
3168 }
3169
3170 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3171 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3172 "allocated 0x%x requested 0x%x",
3173 dma_channel,
3174 allocated, total_alloc_size));
3175
3176 *num_chunks = i;
3177 *dmap = tx_dmap;
3178 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3179 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3180 *dmap, i));
3181 goto nxge_alloc_tx_mem_exit;
3182
3183 nxge_alloc_tx_mem_fail1:
3184 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3185
3186 nxge_alloc_tx_mem_exit:
3187 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3188 "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3189
3190 return (status);
3191 }
3192
3193 /*ARGSUSED*/
3194 static void
nxge_free_tx_buf_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap,uint32_t num_chunks)3195 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3196 uint32_t num_chunks)
3197 {
3198 int i;
3199
3200 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3201
3202 if (dmap == 0)
3203 return;
3204
3205 for (i = 0; i < num_chunks; i++) {
3206 nxge_dma_mem_free(dmap++);
3207 }
3208
3209 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3210 }
3211
3212 /*ARGSUSED*/
3213 nxge_status_t
nxge_alloc_tx_cntl_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t size)3214 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3215 p_nxge_dma_common_t *dmap, size_t size)
3216 {
3217 p_nxge_dma_common_t tx_dmap;
3218 nxge_status_t status = NXGE_OK;
3219
3220 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3221 tx_dmap = (p_nxge_dma_common_t)
3222 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3223
3224 tx_dmap->contig_alloc_type = B_FALSE;
3225 tx_dmap->kmem_alloc_type = B_FALSE;
3226
3227 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3228 &nxge_desc_dma_attr,
3229 size,
3230 &nxge_dev_desc_dma_acc_attr,
3231 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3232 tx_dmap);
3233 if (status != NXGE_OK) {
3234 goto nxge_alloc_tx_cntl_dma_fail1;
3235 }
3236
3237 *dmap = tx_dmap;
3238 goto nxge_alloc_tx_cntl_dma_exit;
3239
3240 nxge_alloc_tx_cntl_dma_fail1:
3241 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3242
3243 nxge_alloc_tx_cntl_dma_exit:
3244 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3245 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3246
3247 return (status);
3248 }
3249
3250 /*ARGSUSED*/
3251 static void
nxge_free_tx_cntl_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap)3252 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3253 {
3254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3255
3256 if (dmap == 0)
3257 return;
3258
3259 nxge_dma_mem_free(dmap);
3260
3261 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3262 }
3263
3264 /*
3265 * nxge_free_tx_mem_pool
3266 *
3267 * This function frees all of the per-port TDC control data structures.
3268 * The per-channel (TDC) data structures are freed when the channel
3269 * is stopped.
3270 *
3271 * Arguments:
3272 * nxgep
3273 *
3274 * Notes:
3275 *
3276 * Context:
3277 * Any domain
3278 */
3279 static void
nxge_free_tx_mem_pool(p_nxge_t nxgep)3280 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3281 {
3282 int tdc_max = NXGE_MAX_TDCS;
3283
3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3285
3286 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3288 "<== nxge_free_tx_mem_pool "
3289 "(null tx buf pool or buf not allocated"));
3290 return;
3291 }
3292 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3293 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3294 "<== nxge_free_tx_mem_pool "
3295 "(null tx cntl buf pool or cntl buf not allocated"));
3296 return;
3297 }
3298
3299 /* 1. Free the mailboxes. */
3300 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3301 sizeof (p_tx_mbox_t) * tdc_max);
3302 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3303
3304 nxgep->tx_mbox_areas_p = 0;
3305
3306 /* 2. Free the transmit ring arrays. */
3307 KMEM_FREE(nxgep->tx_rings->rings,
3308 sizeof (p_tx_ring_t) * tdc_max);
3309 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3310
3311 nxgep->tx_rings = 0;
3312
3313 /* 3. Free the completion ring data structures. */
3314 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3315 sizeof (p_nxge_dma_common_t) * tdc_max);
3316 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3317
3318 nxgep->tx_cntl_pool_p = 0;
3319
3320 /* 4. Free the data ring data structures. */
3321 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3322 sizeof (uint32_t) * tdc_max);
3323 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3324 sizeof (p_nxge_dma_common_t) * tdc_max);
3325 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3326
3327 nxgep->tx_buf_pool_p = 0;
3328
3329 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3330 }
3331
3332 /*ARGSUSED*/
3333 static nxge_status_t
nxge_dma_mem_alloc(p_nxge_t nxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_nxge_dma_common_t dma_p)3334 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3335 struct ddi_dma_attr *dma_attrp,
3336 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3337 p_nxge_dma_common_t dma_p)
3338 {
3339 caddr_t kaddrp;
3340 int ddi_status = DDI_SUCCESS;
3341 boolean_t contig_alloc_type;
3342 boolean_t kmem_alloc_type;
3343
3344 contig_alloc_type = dma_p->contig_alloc_type;
3345
3346 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3347 /*
3348 * contig_alloc_type for contiguous memory only allowed
3349 * for N2/NIU.
3350 */
3351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3352 "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3353 dma_p->contig_alloc_type));
3354 return (NXGE_ERROR | NXGE_DDI_FAILED);
3355 }
3356
3357 dma_p->dma_handle = NULL;
3358 dma_p->acc_handle = NULL;
3359 dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3360 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3361 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3362 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3363 if (ddi_status != DDI_SUCCESS) {
3364 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3365 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3366 return (NXGE_ERROR | NXGE_DDI_FAILED);
3367 }
3368
3369 kmem_alloc_type = dma_p->kmem_alloc_type;
3370
3371 switch (contig_alloc_type) {
3372 case B_FALSE:
3373 switch (kmem_alloc_type) {
3374 case B_FALSE:
3375 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3376 length,
3377 acc_attr_p,
3378 xfer_flags,
3379 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3380 &dma_p->acc_handle);
3381 if (ddi_status != DDI_SUCCESS) {
3382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3383 "nxge_dma_mem_alloc: "
3384 "ddi_dma_mem_alloc failed"));
3385 ddi_dma_free_handle(&dma_p->dma_handle);
3386 dma_p->dma_handle = NULL;
3387 return (NXGE_ERROR | NXGE_DDI_FAILED);
3388 }
3389 if (dma_p->alength < length) {
3390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3391 "nxge_dma_mem_alloc:di_dma_mem_alloc "
3392 "< length."));
3393 ddi_dma_mem_free(&dma_p->acc_handle);
3394 ddi_dma_free_handle(&dma_p->dma_handle);
3395 dma_p->acc_handle = NULL;
3396 dma_p->dma_handle = NULL;
3397 return (NXGE_ERROR);
3398 }
3399
3400 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3401 NULL,
3402 kaddrp, dma_p->alength, xfer_flags,
3403 DDI_DMA_DONTWAIT,
3404 0, &dma_p->dma_cookie, &dma_p->ncookies);
3405 if (ddi_status != DDI_DMA_MAPPED) {
3406 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3407 "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3408 "failed "
3409 "(staus 0x%x ncookies %d.)", ddi_status,
3410 dma_p->ncookies));
3411 if (dma_p->acc_handle) {
3412 ddi_dma_mem_free(&dma_p->acc_handle);
3413 dma_p->acc_handle = NULL;
3414 }
3415 ddi_dma_free_handle(&dma_p->dma_handle);
3416 dma_p->dma_handle = NULL;
3417 return (NXGE_ERROR | NXGE_DDI_FAILED);
3418 }
3419
3420 if (dma_p->ncookies != 1) {
3421 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3422 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3423 "> 1 cookie"
3424 "(staus 0x%x ncookies %d.)", ddi_status,
3425 dma_p->ncookies));
3426 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3427 if (dma_p->acc_handle) {
3428 ddi_dma_mem_free(&dma_p->acc_handle);
3429 dma_p->acc_handle = NULL;
3430 }
3431 ddi_dma_free_handle(&dma_p->dma_handle);
3432 dma_p->dma_handle = NULL;
3433 dma_p->acc_handle = NULL;
3434 return (NXGE_ERROR);
3435 }
3436 break;
3437
3438 case B_TRUE:
3439 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3440 if (kaddrp == NULL) {
3441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3442 "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3443 "kmem alloc failed"));
3444 return (NXGE_ERROR);
3445 }
3446
3447 dma_p->alength = length;
3448 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3449 NULL, kaddrp, dma_p->alength, xfer_flags,
3450 DDI_DMA_DONTWAIT, 0,
3451 &dma_p->dma_cookie, &dma_p->ncookies);
3452 if (ddi_status != DDI_DMA_MAPPED) {
3453 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3454 "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3455 "(kmem_alloc) failed kaddrp $%p length %d "
3456 "(staus 0x%x (%d) ncookies %d.)",
3457 kaddrp, length,
3458 ddi_status, ddi_status, dma_p->ncookies));
3459 KMEM_FREE(kaddrp, length);
3460 dma_p->acc_handle = NULL;
3461 ddi_dma_free_handle(&dma_p->dma_handle);
3462 dma_p->dma_handle = NULL;
3463 dma_p->kaddrp = NULL;
3464 return (NXGE_ERROR | NXGE_DDI_FAILED);
3465 }
3466
3467 if (dma_p->ncookies != 1) {
3468 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3469 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3470 "(kmem_alloc) > 1 cookie"
3471 "(staus 0x%x ncookies %d.)", ddi_status,
3472 dma_p->ncookies));
3473 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3474 KMEM_FREE(kaddrp, length);
3475 ddi_dma_free_handle(&dma_p->dma_handle);
3476 dma_p->dma_handle = NULL;
3477 dma_p->acc_handle = NULL;
3478 dma_p->kaddrp = NULL;
3479 return (NXGE_ERROR);
3480 }
3481
3482 dma_p->kaddrp = kaddrp;
3483
3484 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3485 "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3486 "kaddr $%p alength %d",
3487 dma_p,
3488 kaddrp,
3489 dma_p->alength));
3490 break;
3491 }
3492 break;
3493
3494 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3495 case B_TRUE:
3496 kaddrp = (caddr_t)contig_mem_alloc(length);
3497 if (kaddrp == NULL) {
3498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3499 "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3500 ddi_dma_free_handle(&dma_p->dma_handle);
3501 return (NXGE_ERROR | NXGE_DDI_FAILED);
3502 }
3503
3504 dma_p->alength = length;
3505 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3506 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3507 &dma_p->dma_cookie, &dma_p->ncookies);
3508 if (ddi_status != DDI_DMA_MAPPED) {
3509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3510 "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3511 "(status 0x%x ncookies %d.)", ddi_status,
3512 dma_p->ncookies));
3513
3514 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3515 "==> nxge_dma_mem_alloc: (not mapped)"
3516 "length %lu (0x%x) "
3517 "free contig kaddrp $%p "
3518 "va_to_pa $%p",
3519 length, length,
3520 kaddrp,
3521 va_to_pa(kaddrp)));
3522
3523
3524 contig_mem_free((void *)kaddrp, length);
3525 ddi_dma_free_handle(&dma_p->dma_handle);
3526
3527 dma_p->dma_handle = NULL;
3528 dma_p->acc_handle = NULL;
3529 dma_p->alength = NULL;
3530 dma_p->kaddrp = NULL;
3531
3532 return (NXGE_ERROR | NXGE_DDI_FAILED);
3533 }
3534
3535 if (dma_p->ncookies != 1 ||
3536 (dma_p->dma_cookie.dmac_laddress == NULL)) {
3537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3538 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3539 "cookie or "
3540 "dmac_laddress is NULL $%p size %d "
3541 " (status 0x%x ncookies %d.)",
3542 ddi_status,
3543 dma_p->dma_cookie.dmac_laddress,
3544 dma_p->dma_cookie.dmac_size,
3545 dma_p->ncookies));
3546
3547 contig_mem_free((void *)kaddrp, length);
3548 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3549 ddi_dma_free_handle(&dma_p->dma_handle);
3550
3551 dma_p->alength = 0;
3552 dma_p->dma_handle = NULL;
3553 dma_p->acc_handle = NULL;
3554 dma_p->kaddrp = NULL;
3555
3556 return (NXGE_ERROR | NXGE_DDI_FAILED);
3557 }
3558 break;
3559
3560 #else
3561 case B_TRUE:
3562 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3563 "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3564 return (NXGE_ERROR | NXGE_DDI_FAILED);
3565 #endif
3566 }
3567
3568 dma_p->kaddrp = kaddrp;
3569 dma_p->last_kaddrp = (unsigned char *)kaddrp +
3570 dma_p->alength - RXBUF_64B_ALIGNED;
3571 #if defined(__i386)
3572 dma_p->ioaddr_pp =
3573 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
3574 #else
3575 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3576 #endif
3577 dma_p->last_ioaddr_pp =
3578 #if defined(__i386)
3579 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
3580 #else
3581 (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3582 #endif
3583 dma_p->alength - RXBUF_64B_ALIGNED;
3584
3585 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3586
3587 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3588 dma_p->orig_ioaddr_pp =
3589 (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3590 dma_p->orig_alength = length;
3591 dma_p->orig_kaddrp = kaddrp;
3592 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3593 #endif
3594
3595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3596 "dma buffer allocated: dma_p $%p "
3597 "return dmac_ladress from cookie $%p cookie dmac_size %d "
3598 "dma_p->ioaddr_p $%p "
3599 "dma_p->orig_ioaddr_p $%p "
3600 "orig_vatopa $%p "
3601 "alength %d (0x%x) "
3602 "kaddrp $%p "
3603 "length %d (0x%x)",
3604 dma_p,
3605 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3606 dma_p->ioaddr_pp,
3607 dma_p->orig_ioaddr_pp,
3608 dma_p->orig_vatopa,
3609 dma_p->alength, dma_p->alength,
3610 kaddrp,
3611 length, length));
3612
3613 return (NXGE_OK);
3614 }
3615
3616 static void
nxge_dma_mem_free(p_nxge_dma_common_t dma_p)3617 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3618 {
3619 if (dma_p->dma_handle != NULL) {
3620 if (dma_p->ncookies) {
3621 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3622 dma_p->ncookies = 0;
3623 }
3624 ddi_dma_free_handle(&dma_p->dma_handle);
3625 dma_p->dma_handle = NULL;
3626 }
3627
3628 if (dma_p->acc_handle != NULL) {
3629 ddi_dma_mem_free(&dma_p->acc_handle);
3630 dma_p->acc_handle = NULL;
3631 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3632 }
3633
3634 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3635 if (dma_p->contig_alloc_type &&
3636 dma_p->orig_kaddrp && dma_p->orig_alength) {
3637 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3638 "kaddrp $%p (orig_kaddrp $%p)"
3639 "mem type %d ",
3640 "orig_alength %d "
3641 "alength 0x%x (%d)",
3642 dma_p->kaddrp,
3643 dma_p->orig_kaddrp,
3644 dma_p->contig_alloc_type,
3645 dma_p->orig_alength,
3646 dma_p->alength, dma_p->alength));
3647
3648 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3649 dma_p->orig_alength = NULL;
3650 dma_p->orig_kaddrp = NULL;
3651 dma_p->contig_alloc_type = B_FALSE;
3652 }
3653 #endif
3654 dma_p->kaddrp = NULL;
3655 dma_p->alength = NULL;
3656 }
3657
3658 static void
nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)3659 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3660 {
3661 uint64_t kaddr;
3662 uint32_t buf_size;
3663
3664 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3665
3666 if (dma_p->dma_handle != NULL) {
3667 if (dma_p->ncookies) {
3668 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3669 dma_p->ncookies = 0;
3670 }
3671 ddi_dma_free_handle(&dma_p->dma_handle);
3672 dma_p->dma_handle = NULL;
3673 }
3674
3675 if (dma_p->acc_handle != NULL) {
3676 ddi_dma_mem_free(&dma_p->acc_handle);
3677 dma_p->acc_handle = NULL;
3678 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3679 }
3680
3681 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3682 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3683 dma_p,
3684 dma_p->buf_alloc_state));
3685
3686 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3687 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3688 "<== nxge_dma_free_rx_data_buf: "
3689 "outstanding data buffers"));
3690 return;
3691 }
3692
3693 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3694 if (dma_p->contig_alloc_type &&
3695 dma_p->orig_kaddrp && dma_p->orig_alength) {
3696 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3697 "kaddrp $%p (orig_kaddrp $%p)"
3698 "mem type %d ",
3699 "orig_alength %d "
3700 "alength 0x%x (%d)",
3701 dma_p->kaddrp,
3702 dma_p->orig_kaddrp,
3703 dma_p->contig_alloc_type,
3704 dma_p->orig_alength,
3705 dma_p->alength, dma_p->alength));
3706
3707 kaddr = (uint64_t)dma_p->orig_kaddrp;
3708 buf_size = dma_p->orig_alength;
3709 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3710 dma_p->orig_alength = NULL;
3711 dma_p->orig_kaddrp = NULL;
3712 dma_p->contig_alloc_type = B_FALSE;
3713 dma_p->kaddrp = NULL;
3714 dma_p->alength = NULL;
3715 return;
3716 }
3717 #endif
3718
3719 if (dma_p->kmem_alloc_type) {
3720 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3721 "nxge_dma_free_rx_data_buf: free kmem "
3722 "kaddrp $%p (orig_kaddrp $%p)"
3723 "alloc type %d "
3724 "orig_alength %d "
3725 "alength 0x%x (%d)",
3726 dma_p->kaddrp,
3727 dma_p->orig_kaddrp,
3728 dma_p->kmem_alloc_type,
3729 dma_p->orig_alength,
3730 dma_p->alength, dma_p->alength));
3731 #if defined(__i386)
3732 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp;
3733 #else
3734 kaddr = (uint64_t)dma_p->kaddrp;
3735 #endif
3736 buf_size = dma_p->orig_alength;
3737 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3738 "nxge_dma_free_rx_data_buf: free dmap $%p "
3739 "kaddr $%p buf_size %d",
3740 dma_p,
3741 kaddr, buf_size));
3742 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3743 dma_p->alength = 0;
3744 dma_p->orig_alength = 0;
3745 dma_p->kaddrp = NULL;
3746 dma_p->kmem_alloc_type = B_FALSE;
3747 }
3748
3749 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3750 }
3751
3752 /*
3753 * nxge_m_start() -- start transmitting and receiving.
3754 *
3755 * This function is called by the MAC layer when the first
3756 * stream is open to prepare the hardware ready for sending
3757 * and transmitting packets.
3758 */
3759 static int
nxge_m_start(void * arg)3760 nxge_m_start(void *arg)
3761 {
3762 p_nxge_t nxgep = (p_nxge_t)arg;
3763
3764 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3765
3766 /*
3767 * Are we already started?
3768 */
3769 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3770 return (0);
3771 }
3772
3773 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3774 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3775 }
3776
3777 /*
3778 * Make sure RX MAC is disabled while we initialize.
3779 */
3780 if (!isLDOMguest(nxgep)) {
3781 (void) nxge_rx_mac_disable(nxgep);
3782 }
3783
3784 /*
3785 * Grab the global lock.
3786 */
3787 MUTEX_ENTER(nxgep->genlock);
3788
3789 /*
3790 * Initialize the driver and hardware.
3791 */
3792 if (nxge_init(nxgep) != NXGE_OK) {
3793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3794 "<== nxge_m_start: initialization failed"));
3795 MUTEX_EXIT(nxgep->genlock);
3796 return (EIO);
3797 }
3798
3799 /*
3800 * Start timer to check the system error and tx hangs
3801 */
3802 if (!isLDOMguest(nxgep))
3803 nxgep->nxge_timerid = nxge_start_timer(nxgep,
3804 nxge_check_hw_state, NXGE_CHECK_TIMER);
3805 #if defined(sun4v)
3806 else
3807 nxge_hio_start_timer(nxgep);
3808 #endif
3809
3810 nxgep->link_notify = B_TRUE;
3811 nxgep->link_check_count = 0;
3812 nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3813
3814 /*
3815 * Let the global lock go, since we are intialized.
3816 */
3817 MUTEX_EXIT(nxgep->genlock);
3818
3819 /*
3820 * Let the MAC start receiving packets, now that
3821 * we are initialized.
3822 */
3823 if (!isLDOMguest(nxgep)) {
3824 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
3825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3826 "<== nxge_m_start: enable of RX mac failed"));
3827 return (EIO);
3828 }
3829
3830 /*
3831 * Enable hardware interrupts.
3832 */
3833 nxge_intr_hw_enable(nxgep);
3834 }
3835 #if defined(sun4v)
3836 else {
3837 /*
3838 * In guest domain we enable RDCs and their interrupts as
3839 * the last step.
3840 */
3841 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) {
3842 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3843 "<== nxge_m_start: enable of RDCs failed"));
3844 return (EIO);
3845 }
3846
3847 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) {
3848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3849 "<== nxge_m_start: intrs enable for RDCs failed"));
3850 return (EIO);
3851 }
3852 }
3853 #endif
3854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3855 return (0);
3856 }
3857
3858 static boolean_t
nxge_check_groups_stopped(p_nxge_t nxgep)3859 nxge_check_groups_stopped(p_nxge_t nxgep)
3860 {
3861 int i;
3862
3863 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
3864 if (nxgep->rx_hio_groups[i].started)
3865 return (B_FALSE);
3866 }
3867
3868 return (B_TRUE);
3869 }
3870
3871 /*
3872 * nxge_m_stop(): stop transmitting and receiving.
3873 */
3874 static void
nxge_m_stop(void * arg)3875 nxge_m_stop(void *arg)
3876 {
3877 p_nxge_t nxgep = (p_nxge_t)arg;
3878 boolean_t groups_stopped;
3879
3880 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3881
3882 /*
3883 * Are the groups stopped?
3884 */
3885 groups_stopped = nxge_check_groups_stopped(nxgep);
3886 ASSERT(groups_stopped == B_TRUE);
3887 if (!groups_stopped) {
3888 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n",
3889 nxgep->instance);
3890 return;
3891 }
3892
3893 if (!isLDOMguest(nxgep)) {
3894 /*
3895 * Disable the RX mac.
3896 */
3897 (void) nxge_rx_mac_disable(nxgep);
3898
3899 /*
3900 * Wait for the IPP to drain.
3901 */
3902 (void) nxge_ipp_drain(nxgep);
3903
3904 /*
3905 * Disable hardware interrupts.
3906 */
3907 nxge_intr_hw_disable(nxgep);
3908 }
3909 #if defined(sun4v)
3910 else {
3911 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE);
3912 }
3913 #endif
3914
3915 /*
3916 * Grab the global lock.
3917 */
3918 MUTEX_ENTER(nxgep->genlock);
3919
3920 nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3921 if (nxgep->nxge_timerid) {
3922 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3923 nxgep->nxge_timerid = 0;
3924 }
3925
3926 /*
3927 * Clean up.
3928 */
3929 nxge_uninit(nxgep);
3930
3931 nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3932
3933 /*
3934 * Let go of the global lock.
3935 */
3936 MUTEX_EXIT(nxgep->genlock);
3937 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3938 }
3939
3940 static int
nxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)3941 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3942 {
3943 p_nxge_t nxgep = (p_nxge_t)arg;
3944 struct ether_addr addrp;
3945
3946 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3947 "==> nxge_m_multicst: add %d", add));
3948
3949 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3950 if (add) {
3951 if (nxge_add_mcast_addr(nxgep, &addrp)) {
3952 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3953 "<== nxge_m_multicst: add multicast failed"));
3954 return (EINVAL);
3955 }
3956 } else {
3957 if (nxge_del_mcast_addr(nxgep, &addrp)) {
3958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3959 "<== nxge_m_multicst: del multicast failed"));
3960 return (EINVAL);
3961 }
3962 }
3963
3964 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3965
3966 return (0);
3967 }
3968
3969 static int
nxge_m_promisc(void * arg,boolean_t on)3970 nxge_m_promisc(void *arg, boolean_t on)
3971 {
3972 p_nxge_t nxgep = (p_nxge_t)arg;
3973
3974 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3975 "==> nxge_m_promisc: on %d", on));
3976
3977 if (nxge_set_promisc(nxgep, on)) {
3978 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3979 "<== nxge_m_promisc: set promisc failed"));
3980 return (EINVAL);
3981 }
3982
3983 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3984 "<== nxge_m_promisc: on %d", on));
3985
3986 return (0);
3987 }
3988
3989 static void
nxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3990 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3991 {
3992 p_nxge_t nxgep = (p_nxge_t)arg;
3993 struct iocblk *iocp;
3994 boolean_t need_privilege;
3995 int err;
3996 int cmd;
3997
3998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3999
4000 iocp = (struct iocblk *)mp->b_rptr;
4001 iocp->ioc_error = 0;
4002 need_privilege = B_TRUE;
4003 cmd = iocp->ioc_cmd;
4004 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
4005 switch (cmd) {
4006 default:
4007 miocnak(wq, mp, 0, EINVAL);
4008 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
4009 return;
4010
4011 case LB_GET_INFO_SIZE:
4012 case LB_GET_INFO:
4013 case LB_GET_MODE:
4014 need_privilege = B_FALSE;
4015 break;
4016 case LB_SET_MODE:
4017 break;
4018
4019
4020 case NXGE_GET_MII:
4021 case NXGE_PUT_MII:
4022 case NXGE_GET64:
4023 case NXGE_PUT64:
4024 case NXGE_GET_TX_RING_SZ:
4025 case NXGE_GET_TX_DESC:
4026 case NXGE_TX_SIDE_RESET:
4027 case NXGE_RX_SIDE_RESET:
4028 case NXGE_GLOBAL_RESET:
4029 case NXGE_RESET_MAC:
4030 case NXGE_TX_REGS_DUMP:
4031 case NXGE_RX_REGS_DUMP:
4032 case NXGE_INT_REGS_DUMP:
4033 case NXGE_VIR_INT_REGS_DUMP:
4034 case NXGE_PUT_TCAM:
4035 case NXGE_GET_TCAM:
4036 case NXGE_RTRACE:
4037 case NXGE_RDUMP:
4038 case NXGE_RX_CLASS:
4039 case NXGE_RX_HASH:
4040
4041 need_privilege = B_FALSE;
4042 break;
4043 case NXGE_INJECT_ERR:
4044 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
4045 nxge_err_inject(nxgep, wq, mp);
4046 break;
4047 }
4048
4049 if (need_privilege) {
4050 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
4051 if (err != 0) {
4052 miocnak(wq, mp, 0, err);
4053 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4054 "<== nxge_m_ioctl: no priv"));
4055 return;
4056 }
4057 }
4058
4059 switch (cmd) {
4060
4061 case LB_GET_MODE:
4062 case LB_SET_MODE:
4063 case LB_GET_INFO_SIZE:
4064 case LB_GET_INFO:
4065 nxge_loopback_ioctl(nxgep, wq, mp, iocp);
4066 break;
4067
4068 case NXGE_GET_MII:
4069 case NXGE_PUT_MII:
4070 case NXGE_PUT_TCAM:
4071 case NXGE_GET_TCAM:
4072 case NXGE_GET64:
4073 case NXGE_PUT64:
4074 case NXGE_GET_TX_RING_SZ:
4075 case NXGE_GET_TX_DESC:
4076 case NXGE_TX_SIDE_RESET:
4077 case NXGE_RX_SIDE_RESET:
4078 case NXGE_GLOBAL_RESET:
4079 case NXGE_RESET_MAC:
4080 case NXGE_TX_REGS_DUMP:
4081 case NXGE_RX_REGS_DUMP:
4082 case NXGE_INT_REGS_DUMP:
4083 case NXGE_VIR_INT_REGS_DUMP:
4084 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4085 "==> nxge_m_ioctl: cmd 0x%x", cmd));
4086 nxge_hw_ioctl(nxgep, wq, mp, iocp);
4087 break;
4088 case NXGE_RX_CLASS:
4089 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0)
4090 miocnak(wq, mp, 0, EINVAL);
4091 else
4092 miocack(wq, mp, sizeof (rx_class_cfg_t), 0);
4093 break;
4094 case NXGE_RX_HASH:
4095
4096 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0)
4097 miocnak(wq, mp, 0, EINVAL);
4098 else
4099 miocack(wq, mp, sizeof (cfg_cmd_t), 0);
4100 break;
4101 }
4102
4103 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
4104 }
4105
4106 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
4107
4108 void
nxge_mmac_kstat_update(p_nxge_t nxgep,int slot,boolean_t factory)4109 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory)
4110 {
4111 p_nxge_mmac_stats_t mmac_stats;
4112 int i;
4113 nxge_mmac_t *mmac_info;
4114
4115 mmac_info = &nxgep->nxge_mmac_info;
4116
4117 mmac_stats = &nxgep->statsp->mmac_stats;
4118 mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
4119 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
4120
4121 for (i = 0; i < ETHERADDRL; i++) {
4122 if (factory) {
4123 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4124 = mmac_info->factory_mac_pool[slot][
4125 (ETHERADDRL-1) - i];
4126 } else {
4127 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4128 = mmac_info->mac_pool[slot].addr[
4129 (ETHERADDRL - 1) - i];
4130 }
4131 }
4132 }
4133
4134 /*
4135 * nxge_altmac_set() -- Set an alternate MAC address
4136 */
4137 static int
nxge_altmac_set(p_nxge_t nxgep,uint8_t * maddr,int slot,int rdctbl,boolean_t usetbl)4138 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot,
4139 int rdctbl, boolean_t usetbl)
4140 {
4141 uint8_t addrn;
4142 uint8_t portn;
4143 npi_mac_addr_t altmac;
4144 hostinfo_t mac_rdc;
4145 p_nxge_class_pt_cfg_t clscfgp;
4146
4147
4148 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4149 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4150 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4151
4152 portn = nxgep->mac.portnum;
4153 addrn = (uint8_t)slot - 1;
4154
4155 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
4156 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS)
4157 return (EIO);
4158
4159 /*
4160 * Set the rdc table number for the host info entry
4161 * for this mac address slot.
4162 */
4163 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4164 mac_rdc.value = 0;
4165 if (usetbl)
4166 mac_rdc.bits.w0.rdc_tbl_num = rdctbl;
4167 else
4168 mac_rdc.bits.w0.rdc_tbl_num =
4169 clscfgp->mac_host_info[addrn].rdctbl;
4170 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4171
4172 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4173 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4174 return (EIO);
4175 }
4176
4177 /*
4178 * Enable comparison with the alternate MAC address.
4179 * While the first alternate addr is enabled by bit 1 of register
4180 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4181 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4182 * accordingly before calling npi_mac_altaddr_entry.
4183 */
4184 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4185 addrn = (uint8_t)slot - 1;
4186 else
4187 addrn = (uint8_t)slot;
4188
4189 if (npi_mac_altaddr_enable(nxgep->npi_handle,
4190 nxgep->function_num, addrn) != NPI_SUCCESS) {
4191 return (EIO);
4192 }
4193
4194 return (0);
4195 }
4196
4197 /*
4198 * nxeg_m_mmac_add_g() - find an unused address slot, set the address
4199 * value to the one specified, enable the port to start filtering on
4200 * the new MAC address. Returns 0 on success.
4201 */
4202 int
nxge_m_mmac_add_g(void * arg,const uint8_t * maddr,int rdctbl,boolean_t usetbl)4203 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
4204 boolean_t usetbl)
4205 {
4206 p_nxge_t nxgep = arg;
4207 int slot;
4208 nxge_mmac_t *mmac_info;
4209 int err;
4210 nxge_status_t status;
4211
4212 mutex_enter(nxgep->genlock);
4213
4214 /*
4215 * Make sure that nxge is initialized, if _start() has
4216 * not been called.
4217 */
4218 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4219 status = nxge_init(nxgep);
4220 if (status != NXGE_OK) {
4221 mutex_exit(nxgep->genlock);
4222 return (ENXIO);
4223 }
4224 }
4225
4226 mmac_info = &nxgep->nxge_mmac_info;
4227 if (mmac_info->naddrfree == 0) {
4228 mutex_exit(nxgep->genlock);
4229 return (ENOSPC);
4230 }
4231
4232 /*
4233 * Search for the first available slot. Because naddrfree
4234 * is not zero, we are guaranteed to find one.
4235 * Each of the first two ports of Neptune has 16 alternate
4236 * MAC slots but only the first 7 (of 15) slots have assigned factory
4237 * MAC addresses. We first search among the slots without bundled
4238 * factory MACs. If we fail to find one in that range, then we
4239 * search the slots with bundled factory MACs. A factory MAC
4240 * will be wasted while the slot is used with a user MAC address.
4241 * But the slot could be used by factory MAC again after calling
4242 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4243 */
4244 for (slot = 0; slot <= mmac_info->num_mmac; slot++) {
4245 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4246 break;
4247 }
4248
4249 ASSERT(slot <= mmac_info->num_mmac);
4250
4251 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl,
4252 usetbl)) != 0) {
4253 mutex_exit(nxgep->genlock);
4254 return (err);
4255 }
4256
4257 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4258 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4259 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4260 mmac_info->naddrfree--;
4261 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4262
4263 mutex_exit(nxgep->genlock);
4264 return (0);
4265 }
4266
4267 /*
4268 * Remove the specified mac address and update the HW not to filter
4269 * the mac address anymore.
4270 */
4271 int
nxge_m_mmac_remove(void * arg,int slot)4272 nxge_m_mmac_remove(void *arg, int slot)
4273 {
4274 p_nxge_t nxgep = arg;
4275 nxge_mmac_t *mmac_info;
4276 uint8_t addrn;
4277 uint8_t portn;
4278 int err = 0;
4279 nxge_status_t status;
4280
4281 mutex_enter(nxgep->genlock);
4282
4283 /*
4284 * Make sure that nxge is initialized, if _start() has
4285 * not been called.
4286 */
4287 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4288 status = nxge_init(nxgep);
4289 if (status != NXGE_OK) {
4290 mutex_exit(nxgep->genlock);
4291 return (ENXIO);
4292 }
4293 }
4294
4295 mmac_info = &nxgep->nxge_mmac_info;
4296 if (slot < 1 || slot > mmac_info->num_mmac) {
4297 mutex_exit(nxgep->genlock);
4298 return (EINVAL);
4299 }
4300
4301 portn = nxgep->mac.portnum;
4302 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4303 addrn = (uint8_t)slot - 1;
4304 else
4305 addrn = (uint8_t)slot;
4306
4307 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4308 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4309 == NPI_SUCCESS) {
4310 mmac_info->naddrfree++;
4311 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4312 /*
4313 * Regardless if the MAC we just stopped filtering
4314 * is a user addr or a facory addr, we must set
4315 * the MMAC_VENDOR_ADDR flag if this slot has an
4316 * associated factory MAC to indicate that a factory
4317 * MAC is available.
4318 */
4319 if (slot <= mmac_info->num_factory_mmac) {
4320 mmac_info->mac_pool[slot].flags
4321 |= MMAC_VENDOR_ADDR;
4322 }
4323 /*
4324 * Clear mac_pool[slot].addr so that kstat shows 0
4325 * alternate MAC address if the slot is not used.
4326 * (But nxge_m_mmac_get returns the factory MAC even
4327 * when the slot is not used!)
4328 */
4329 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4330 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4331 } else {
4332 err = EIO;
4333 }
4334 } else {
4335 err = EINVAL;
4336 }
4337
4338 mutex_exit(nxgep->genlock);
4339 return (err);
4340 }
4341
4342 /*
4343 * The callback to query all the factory addresses. naddr must be the same as
4344 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
4345 * mcm_addr is the space allocated for keep all the addresses, whose size is
4346 * naddr * MAXMACADDRLEN.
4347 */
4348 static void
nxge_m_getfactaddr(void * arg,uint_t naddr,uint8_t * addr)4349 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr)
4350 {
4351 nxge_t *nxgep = arg;
4352 nxge_mmac_t *mmac_info;
4353 int i;
4354
4355 mutex_enter(nxgep->genlock);
4356
4357 mmac_info = &nxgep->nxge_mmac_info;
4358 ASSERT(naddr == mmac_info->num_factory_mmac);
4359
4360 for (i = 0; i < naddr; i++) {
4361 bcopy(mmac_info->factory_mac_pool[i + 1],
4362 addr + i * MAXMACADDRLEN, ETHERADDRL);
4363 }
4364
4365 mutex_exit(nxgep->genlock);
4366 }
4367
4368
4369 static boolean_t
nxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4370 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4371 {
4372 nxge_t *nxgep = arg;
4373 uint32_t *txflags = cap_data;
4374
4375 switch (cap) {
4376 case MAC_CAPAB_HCKSUM:
4377 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4378 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4379 if (nxge_cksum_offload <= 1) {
4380 *txflags = HCKSUM_INET_PARTIAL;
4381 }
4382 break;
4383
4384 case MAC_CAPAB_MULTIFACTADDR: {
4385 mac_capab_multifactaddr_t *mfacp = cap_data;
4386
4387 if (!isLDOMguest(nxgep)) {
4388 mutex_enter(nxgep->genlock);
4389 mfacp->mcm_naddr =
4390 nxgep->nxge_mmac_info.num_factory_mmac;
4391 mfacp->mcm_getaddr = nxge_m_getfactaddr;
4392 mutex_exit(nxgep->genlock);
4393 }
4394 break;
4395 }
4396
4397 case MAC_CAPAB_LSO: {
4398 mac_capab_lso_t *cap_lso = cap_data;
4399
4400 if (nxgep->soft_lso_enable) {
4401 if (nxge_cksum_offload <= 1) {
4402 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4403 if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4404 nxge_lso_max = NXGE_LSO_MAXLEN;
4405 }
4406 cap_lso->lso_basic_tcp_ipv4.lso_max =
4407 nxge_lso_max;
4408 }
4409 break;
4410 } else {
4411 return (B_FALSE);
4412 }
4413 }
4414
4415 case MAC_CAPAB_RINGS: {
4416 mac_capab_rings_t *cap_rings = cap_data;
4417 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
4418
4419 mutex_enter(nxgep->genlock);
4420 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
4421 if (isLDOMguest(nxgep)) {
4422 cap_rings->mr_group_type =
4423 MAC_GROUP_TYPE_STATIC;
4424 cap_rings->mr_rnum =
4425 NXGE_HIO_SHARE_MAX_CHANNELS;
4426 cap_rings->mr_rget = nxge_fill_ring;
4427 cap_rings->mr_gnum = 1;
4428 cap_rings->mr_gget = nxge_hio_group_get;
4429 cap_rings->mr_gaddring = NULL;
4430 cap_rings->mr_gremring = NULL;
4431 } else {
4432 /*
4433 * Service Domain.
4434 */
4435 cap_rings->mr_group_type =
4436 MAC_GROUP_TYPE_DYNAMIC;
4437 cap_rings->mr_rnum = p_cfgp->max_rdcs;
4438 cap_rings->mr_rget = nxge_fill_ring;
4439 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids;
4440 cap_rings->mr_gget = nxge_hio_group_get;
4441 cap_rings->mr_gaddring = nxge_group_add_ring;
4442 cap_rings->mr_gremring = nxge_group_rem_ring;
4443 }
4444
4445 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4446 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
4447 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids));
4448 } else {
4449 /*
4450 * TX Rings.
4451 */
4452 if (isLDOMguest(nxgep)) {
4453 cap_rings->mr_group_type =
4454 MAC_GROUP_TYPE_STATIC;
4455 cap_rings->mr_rnum =
4456 NXGE_HIO_SHARE_MAX_CHANNELS;
4457 cap_rings->mr_rget = nxge_fill_ring;
4458 cap_rings->mr_gnum = 0;
4459 cap_rings->mr_gget = NULL;
4460 cap_rings->mr_gaddring = NULL;
4461 cap_rings->mr_gremring = NULL;
4462 } else {
4463 /*
4464 * Service Domain.
4465 */
4466 cap_rings->mr_group_type =
4467 MAC_GROUP_TYPE_DYNAMIC;
4468 cap_rings->mr_rnum = p_cfgp->tdc.count;
4469 cap_rings->mr_rget = nxge_fill_ring;
4470
4471 /*
4472 * Share capable.
4473 *
4474 * Do not report the default group: hence -1
4475 */
4476 cap_rings->mr_gnum =
4477 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1;
4478 cap_rings->mr_gget = nxge_hio_group_get;
4479 cap_rings->mr_gaddring = nxge_group_add_ring;
4480 cap_rings->mr_gremring = nxge_group_rem_ring;
4481 }
4482
4483 NXGE_DEBUG_MSG((nxgep, TX_CTL,
4484 "==> nxge_m_getcapab: tx rings # of rings %d",
4485 p_cfgp->tdc.count));
4486 }
4487 mutex_exit(nxgep->genlock);
4488 break;
4489 }
4490
4491 #if defined(sun4v)
4492 case MAC_CAPAB_SHARES: {
4493 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4494
4495 /*
4496 * Only the service domain driver responds to
4497 * this capability request.
4498 */
4499 mutex_enter(nxgep->genlock);
4500 if (isLDOMservice(nxgep)) {
4501 mshares->ms_snum = 3;
4502 mshares->ms_handle = (void *)nxgep;
4503 mshares->ms_salloc = nxge_hio_share_alloc;
4504 mshares->ms_sfree = nxge_hio_share_free;
4505 mshares->ms_sadd = nxge_hio_share_add_group;
4506 mshares->ms_sremove = nxge_hio_share_rem_group;
4507 mshares->ms_squery = nxge_hio_share_query;
4508 mshares->ms_sbind = nxge_hio_share_bind;
4509 mshares->ms_sunbind = nxge_hio_share_unbind;
4510 mutex_exit(nxgep->genlock);
4511 } else {
4512 mutex_exit(nxgep->genlock);
4513 return (B_FALSE);
4514 }
4515 break;
4516 }
4517 #endif
4518 default:
4519 return (B_FALSE);
4520 }
4521 return (B_TRUE);
4522 }
4523
4524 static boolean_t
nxge_param_locked(mac_prop_id_t pr_num)4525 nxge_param_locked(mac_prop_id_t pr_num)
4526 {
4527 /*
4528 * All adv_* parameters are locked (read-only) while
4529 * the device is in any sort of loopback mode ...
4530 */
4531 switch (pr_num) {
4532 case MAC_PROP_ADV_1000FDX_CAP:
4533 case MAC_PROP_EN_1000FDX_CAP:
4534 case MAC_PROP_ADV_1000HDX_CAP:
4535 case MAC_PROP_EN_1000HDX_CAP:
4536 case MAC_PROP_ADV_100FDX_CAP:
4537 case MAC_PROP_EN_100FDX_CAP:
4538 case MAC_PROP_ADV_100HDX_CAP:
4539 case MAC_PROP_EN_100HDX_CAP:
4540 case MAC_PROP_ADV_10FDX_CAP:
4541 case MAC_PROP_EN_10FDX_CAP:
4542 case MAC_PROP_ADV_10HDX_CAP:
4543 case MAC_PROP_EN_10HDX_CAP:
4544 case MAC_PROP_AUTONEG:
4545 case MAC_PROP_FLOWCTRL:
4546 return (B_TRUE);
4547 }
4548 return (B_FALSE);
4549 }
4550
4551 /*
4552 * callback functions for set/get of properties
4553 */
4554 static int
nxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)4555 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4556 uint_t pr_valsize, const void *pr_val)
4557 {
4558 nxge_t *nxgep = barg;
4559 p_nxge_param_t param_arr = nxgep->param_arr;
4560 p_nxge_stats_t statsp = nxgep->statsp;
4561 int err = 0;
4562
4563 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4564
4565 mutex_enter(nxgep->genlock);
4566 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4567 nxge_param_locked(pr_num)) {
4568 /*
4569 * All adv_* parameters are locked (read-only)
4570 * while the device is in any sort of loopback mode.
4571 */
4572 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4573 "==> nxge_m_setprop: loopback mode: read only"));
4574 mutex_exit(nxgep->genlock);
4575 return (EBUSY);
4576 }
4577
4578 switch (pr_num) {
4579 case MAC_PROP_EN_1000FDX_CAP:
4580 nxgep->param_en_1000fdx =
4581 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val;
4582 goto reprogram;
4583
4584 case MAC_PROP_EN_100FDX_CAP:
4585 nxgep->param_en_100fdx =
4586 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val;
4587 goto reprogram;
4588
4589 case MAC_PROP_EN_10FDX_CAP:
4590 nxgep->param_en_10fdx =
4591 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val;
4592 goto reprogram;
4593
4594 case MAC_PROP_AUTONEG:
4595 param_arr[param_autoneg].value = *(uint8_t *)pr_val;
4596 goto reprogram;
4597
4598 case MAC_PROP_MTU: {
4599 uint32_t cur_mtu, new_mtu, old_framesize;
4600
4601 cur_mtu = nxgep->mac.default_mtu;
4602 ASSERT(pr_valsize >= sizeof (new_mtu));
4603 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4604
4605 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4606 "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4607 new_mtu, nxgep->mac.is_jumbo));
4608
4609 if (new_mtu == cur_mtu) {
4610 err = 0;
4611 break;
4612 }
4613
4614 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4615 err = EBUSY;
4616 break;
4617 }
4618
4619 if ((new_mtu < NXGE_DEFAULT_MTU) ||
4620 (new_mtu > NXGE_MAXIMUM_MTU)) {
4621 err = EINVAL;
4622 break;
4623 }
4624
4625 old_framesize = (uint32_t)nxgep->mac.maxframesize;
4626 nxgep->mac.maxframesize = (uint16_t)
4627 (new_mtu + NXGE_EHEADER_VLAN_CRC);
4628 if (nxge_mac_set_framesize(nxgep)) {
4629 nxgep->mac.maxframesize =
4630 (uint16_t)old_framesize;
4631 err = EINVAL;
4632 break;
4633 }
4634
4635 nxgep->mac.default_mtu = new_mtu;
4636 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU);
4637
4638 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4639 "==> nxge_m_setprop: set MTU: %d maxframe %d",
4640 new_mtu, nxgep->mac.maxframesize));
4641 break;
4642 }
4643
4644 case MAC_PROP_FLOWCTRL: {
4645 link_flowctrl_t fl;
4646
4647 ASSERT(pr_valsize >= sizeof (fl));
4648 bcopy(pr_val, &fl, sizeof (fl));
4649
4650 switch (fl) {
4651 case LINK_FLOWCTRL_NONE:
4652 param_arr[param_anar_pause].value = 0;
4653 break;
4654
4655 case LINK_FLOWCTRL_RX:
4656 param_arr[param_anar_pause].value = 1;
4657 break;
4658
4659 case LINK_FLOWCTRL_TX:
4660 case LINK_FLOWCTRL_BI:
4661 err = EINVAL;
4662 break;
4663 default:
4664 err = EINVAL;
4665 break;
4666 }
4667 reprogram:
4668 if ((err == 0) && !isLDOMguest(nxgep)) {
4669 if (!nxge_param_link_update(nxgep)) {
4670 err = EINVAL;
4671 }
4672 } else {
4673 err = EINVAL;
4674 }
4675 break;
4676 }
4677
4678 case MAC_PROP_PRIVATE:
4679 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4680 "==> nxge_m_setprop: private property"));
4681 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val);
4682 break;
4683
4684 default:
4685 err = ENOTSUP;
4686 break;
4687 }
4688
4689 mutex_exit(nxgep->genlock);
4690
4691 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4692 "<== nxge_m_setprop (return %d)", err));
4693 return (err);
4694 }
4695
4696 static int
nxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)4697 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4698 uint_t pr_valsize, void *pr_val)
4699 {
4700 nxge_t *nxgep = barg;
4701 p_nxge_param_t param_arr = nxgep->param_arr;
4702 p_nxge_stats_t statsp = nxgep->statsp;
4703
4704 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4705 "==> nxge_m_getprop: pr_num %d", pr_num));
4706
4707 switch (pr_num) {
4708 case MAC_PROP_DUPLEX:
4709 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4710 break;
4711
4712 case MAC_PROP_SPEED: {
4713 uint64_t val = statsp->mac_stats.link_speed * 1000000ull;
4714
4715 ASSERT(pr_valsize >= sizeof (val));
4716 bcopy(&val, pr_val, sizeof (val));
4717 break;
4718 }
4719
4720 case MAC_PROP_STATUS: {
4721 link_state_t state = statsp->mac_stats.link_up ?
4722 LINK_STATE_UP : LINK_STATE_DOWN;
4723
4724 ASSERT(pr_valsize >= sizeof (state));
4725 bcopy(&state, pr_val, sizeof (state));
4726 break;
4727 }
4728
4729 case MAC_PROP_AUTONEG:
4730 *(uint8_t *)pr_val = param_arr[param_autoneg].value;
4731 break;
4732
4733 case MAC_PROP_FLOWCTRL: {
4734 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ?
4735 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE;
4736
4737 ASSERT(pr_valsize >= sizeof (fl));
4738 bcopy(&fl, pr_val, sizeof (fl));
4739 break;
4740 }
4741
4742 case MAC_PROP_ADV_1000FDX_CAP:
4743 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value;
4744 break;
4745
4746 case MAC_PROP_EN_1000FDX_CAP:
4747 *(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4748 break;
4749
4750 case MAC_PROP_ADV_100FDX_CAP:
4751 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value;
4752 break;
4753
4754 case MAC_PROP_EN_100FDX_CAP:
4755 *(uint8_t *)pr_val = nxgep->param_en_100fdx;
4756 break;
4757
4758 case MAC_PROP_ADV_10FDX_CAP:
4759 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value;
4760 break;
4761
4762 case MAC_PROP_EN_10FDX_CAP:
4763 *(uint8_t *)pr_val = nxgep->param_en_10fdx;
4764 break;
4765
4766 case MAC_PROP_PRIVATE:
4767 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4768 pr_val));
4769
4770 default:
4771 return (ENOTSUP);
4772 }
4773
4774 return (0);
4775 }
4776
4777 static void
nxge_m_propinfo(void * barg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)4778 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4779 mac_prop_info_handle_t prh)
4780 {
4781 nxge_t *nxgep = barg;
4782 p_nxge_stats_t statsp = nxgep->statsp;
4783
4784 /*
4785 * By default permissions are read/write unless specified
4786 * otherwise by the driver.
4787 */
4788
4789 switch (pr_num) {
4790 case MAC_PROP_DUPLEX:
4791 case MAC_PROP_SPEED:
4792 case MAC_PROP_STATUS:
4793 case MAC_PROP_EN_1000HDX_CAP:
4794 case MAC_PROP_EN_100HDX_CAP:
4795 case MAC_PROP_EN_10HDX_CAP:
4796 case MAC_PROP_ADV_1000FDX_CAP:
4797 case MAC_PROP_ADV_1000HDX_CAP:
4798 case MAC_PROP_ADV_100FDX_CAP:
4799 case MAC_PROP_ADV_100HDX_CAP:
4800 case MAC_PROP_ADV_10FDX_CAP:
4801 case MAC_PROP_ADV_10HDX_CAP:
4802 /*
4803 * Note that read-only properties don't need to
4804 * provide default values since they cannot be
4805 * changed by the administrator.
4806 */
4807 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4808 break;
4809
4810 case MAC_PROP_EN_1000FDX_CAP:
4811 case MAC_PROP_EN_100FDX_CAP:
4812 case MAC_PROP_EN_10FDX_CAP:
4813 mac_prop_info_set_default_uint8(prh, 1);
4814 break;
4815
4816 case MAC_PROP_AUTONEG:
4817 mac_prop_info_set_default_uint8(prh, 1);
4818 break;
4819
4820 case MAC_PROP_FLOWCTRL:
4821 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX);
4822 break;
4823
4824 case MAC_PROP_MTU:
4825 mac_prop_info_set_range_uint32(prh,
4826 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU);
4827 break;
4828
4829 case MAC_PROP_PRIVATE:
4830 nxge_priv_propinfo(pr_name, prh);
4831 break;
4832 }
4833
4834 mutex_enter(nxgep->genlock);
4835 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4836 nxge_param_locked(pr_num)) {
4837 /*
4838 * Some properties are locked (read-only) while the
4839 * device is in any sort of loopback mode.
4840 */
4841 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4842 }
4843 mutex_exit(nxgep->genlock);
4844 }
4845
4846 static void
nxge_priv_propinfo(const char * pr_name,mac_prop_info_handle_t prh)4847 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh)
4848 {
4849 char valstr[64];
4850
4851 bzero(valstr, sizeof (valstr));
4852
4853 if (strcmp(pr_name, "_function_number") == 0 ||
4854 strcmp(pr_name, "_fw_version") == 0 ||
4855 strcmp(pr_name, "_port_mode") == 0 ||
4856 strcmp(pr_name, "_hot_swap_phy") == 0) {
4857 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4858
4859 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4860 (void) snprintf(valstr, sizeof (valstr),
4861 "%d", RXDMA_RCR_TO_DEFAULT);
4862
4863 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4864 (void) snprintf(valstr, sizeof (valstr),
4865 "%d", RXDMA_RCR_PTHRES_DEFAULT);
4866
4867 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
4868 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
4869 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
4870 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
4871 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
4872 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
4873 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
4874 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4875 (void) snprintf(valstr, sizeof (valstr), "%x",
4876 NXGE_CLASS_FLOW_GEN_SERVER);
4877
4878 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4879 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
4880
4881 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
4882 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4883
4884 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4885 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4886 }
4887
4888 if (strlen(valstr) > 0)
4889 mac_prop_info_set_default_str(prh, valstr);
4890 }
4891
4892 /* ARGSUSED */
4893 static int
nxge_set_priv_prop(p_nxge_t nxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)4894 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4895 const void *pr_val)
4896 {
4897 p_nxge_param_t param_arr = nxgep->param_arr;
4898 int err = 0;
4899 long result;
4900
4901 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4902 "==> nxge_set_priv_prop: name %s", pr_name));
4903
4904 /* Blanking */
4905 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4906 err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4907 (char *)pr_val,
4908 (caddr_t)¶m_arr[param_rxdma_intr_time]);
4909 if (err) {
4910 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4911 "<== nxge_set_priv_prop: "
4912 "unable to set (%s)", pr_name));
4913 err = EINVAL;
4914 } else {
4915 err = 0;
4916 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4917 "<== nxge_set_priv_prop: "
4918 "set (%s)", pr_name));
4919 }
4920
4921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4922 "<== nxge_set_priv_prop: name %s (value %d)",
4923 pr_name, result));
4924
4925 return (err);
4926 }
4927
4928 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4929 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4930 (char *)pr_val,
4931 (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
4932 if (err) {
4933 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4934 "<== nxge_set_priv_prop: "
4935 "unable to set (%s)", pr_name));
4936 err = EINVAL;
4937 } else {
4938 err = 0;
4939 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4940 "<== nxge_set_priv_prop: "
4941 "set (%s)", pr_name));
4942 }
4943
4944 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4945 "<== nxge_set_priv_prop: name %s (value %d)",
4946 pr_name, result));
4947
4948 return (err);
4949 }
4950
4951 /* Classification */
4952 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4953 if (pr_val == NULL) {
4954 err = EINVAL;
4955 return (err);
4956 }
4957 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4958
4959 err = nxge_param_set_ip_opt(nxgep, NULL,
4960 NULL, (char *)pr_val,
4961 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
4962
4963 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4964 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4965 pr_name, result));
4966
4967 return (err);
4968 }
4969
4970 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4971 if (pr_val == NULL) {
4972 err = EINVAL;
4973 return (err);
4974 }
4975 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4976
4977 err = nxge_param_set_ip_opt(nxgep, NULL,
4978 NULL, (char *)pr_val,
4979 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
4980
4981 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4982 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4983 pr_name, result));
4984
4985 return (err);
4986 }
4987 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4988 if (pr_val == NULL) {
4989 err = EINVAL;
4990 return (err);
4991 }
4992 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4993
4994 err = nxge_param_set_ip_opt(nxgep, NULL,
4995 NULL, (char *)pr_val,
4996 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
4997
4998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4999 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5000 pr_name, result));
5001
5002 return (err);
5003 }
5004 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5005 if (pr_val == NULL) {
5006 err = EINVAL;
5007 return (err);
5008 }
5009 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5010
5011 err = nxge_param_set_ip_opt(nxgep, NULL,
5012 NULL, (char *)pr_val,
5013 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5014
5015 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5016 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5017 pr_name, result));
5018
5019 return (err);
5020 }
5021
5022 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5023 if (pr_val == NULL) {
5024 err = EINVAL;
5025 return (err);
5026 }
5027 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5028
5029 err = nxge_param_set_ip_opt(nxgep, NULL,
5030 NULL, (char *)pr_val,
5031 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5032
5033 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5034 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5035 pr_name, result));
5036
5037 return (err);
5038 }
5039
5040 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5041 if (pr_val == NULL) {
5042 err = EINVAL;
5043 return (err);
5044 }
5045 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5046
5047 err = nxge_param_set_ip_opt(nxgep, NULL,
5048 NULL, (char *)pr_val,
5049 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5050
5051 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5052 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5053 pr_name, result));
5054
5055 return (err);
5056 }
5057 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5058 if (pr_val == NULL) {
5059 err = EINVAL;
5060 return (err);
5061 }
5062 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5063
5064 err = nxge_param_set_ip_opt(nxgep, NULL,
5065 NULL, (char *)pr_val,
5066 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5067
5068 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5069 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5070 pr_name, result));
5071
5072 return (err);
5073 }
5074 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5075 if (pr_val == NULL) {
5076 err = EINVAL;
5077 return (err);
5078 }
5079 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5080
5081 err = nxge_param_set_ip_opt(nxgep, NULL,
5082 NULL, (char *)pr_val,
5083 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5084
5085 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5086 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5087 pr_name, result));
5088
5089 return (err);
5090 }
5091
5092 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5093 if (pr_val == NULL) {
5094 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5095 "==> nxge_set_priv_prop: name %s (null)", pr_name));
5096 err = EINVAL;
5097 return (err);
5098 }
5099
5100 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5101 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5102 "<== nxge_set_priv_prop: name %s "
5103 "(lso %d pr_val %s value %d)",
5104 pr_name, nxgep->soft_lso_enable, pr_val, result));
5105
5106 if (result > 1 || result < 0) {
5107 err = EINVAL;
5108 } else {
5109 if (nxgep->soft_lso_enable == (uint32_t)result) {
5110 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5111 "no change (%d %d)",
5112 nxgep->soft_lso_enable, result));
5113 return (0);
5114 }
5115 }
5116
5117 nxgep->soft_lso_enable = (int)result;
5118
5119 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5120 "<== nxge_set_priv_prop: name %s (value %d)",
5121 pr_name, result));
5122
5123 return (err);
5124 }
5125 /*
5126 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5127 * following code to be executed.
5128 */
5129 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5130 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5131 (caddr_t)¶m_arr[param_anar_10gfdx]);
5132 return (err);
5133 }
5134 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5135 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5136 (caddr_t)¶m_arr[param_anar_pause]);
5137 return (err);
5138 }
5139
5140 return (ENOTSUP);
5141 }
5142
5143 static int
nxge_get_priv_prop(p_nxge_t nxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)5144 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
5145 void *pr_val)
5146 {
5147 p_nxge_param_t param_arr = nxgep->param_arr;
5148 char valstr[MAXNAMELEN];
5149 int err = ENOTSUP;
5150 uint_t strsize;
5151
5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5153 "==> nxge_get_priv_prop: property %s", pr_name));
5154
5155 /* function number */
5156 if (strcmp(pr_name, "_function_number") == 0) {
5157 (void) snprintf(valstr, sizeof (valstr), "%d",
5158 nxgep->function_num);
5159 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5160 "==> nxge_get_priv_prop: name %s "
5161 "(value %d valstr %s)",
5162 pr_name, nxgep->function_num, valstr));
5163
5164 err = 0;
5165 goto done;
5166 }
5167
5168 /* Neptune firmware version */
5169 if (strcmp(pr_name, "_fw_version") == 0) {
5170 (void) snprintf(valstr, sizeof (valstr), "%s",
5171 nxgep->vpd_info.ver);
5172 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5173 "==> nxge_get_priv_prop: name %s "
5174 "(value %d valstr %s)",
5175 pr_name, nxgep->vpd_info.ver, valstr));
5176
5177 err = 0;
5178 goto done;
5179 }
5180
5181 /* port PHY mode */
5182 if (strcmp(pr_name, "_port_mode") == 0) {
5183 switch (nxgep->mac.portmode) {
5184 case PORT_1G_COPPER:
5185 (void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5186 nxgep->hot_swappable_phy ?
5187 "[Hot Swappable]" : "");
5188 break;
5189 case PORT_1G_FIBER:
5190 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5191 nxgep->hot_swappable_phy ?
5192 "[hot swappable]" : "");
5193 break;
5194 case PORT_10G_COPPER:
5195 (void) snprintf(valstr, sizeof (valstr),
5196 "10G copper %s",
5197 nxgep->hot_swappable_phy ?
5198 "[hot swappable]" : "");
5199 break;
5200 case PORT_10G_FIBER:
5201 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5202 nxgep->hot_swappable_phy ?
5203 "[hot swappable]" : "");
5204 break;
5205 case PORT_10G_SERDES:
5206 (void) snprintf(valstr, sizeof (valstr),
5207 "10G serdes %s", nxgep->hot_swappable_phy ?
5208 "[hot swappable]" : "");
5209 break;
5210 case PORT_1G_SERDES:
5211 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5212 nxgep->hot_swappable_phy ?
5213 "[hot swappable]" : "");
5214 break;
5215 case PORT_1G_TN1010:
5216 (void) snprintf(valstr, sizeof (valstr),
5217 "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5218 "[hot swappable]" : "");
5219 break;
5220 case PORT_10G_TN1010:
5221 (void) snprintf(valstr, sizeof (valstr),
5222 "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5223 "[hot swappable]" : "");
5224 break;
5225 case PORT_1G_RGMII_FIBER:
5226 (void) snprintf(valstr, sizeof (valstr),
5227 "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5228 "[hot swappable]" : "");
5229 break;
5230 case PORT_HSP_MODE:
5231 (void) snprintf(valstr, sizeof (valstr),
5232 "phy not present[hot swappable]");
5233 break;
5234 default:
5235 (void) snprintf(valstr, sizeof (valstr), "unknown %s",
5236 nxgep->hot_swappable_phy ?
5237 "[hot swappable]" : "");
5238 break;
5239 }
5240
5241 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5242 "==> nxge_get_priv_prop: name %s (value %s)",
5243 pr_name, valstr));
5244
5245 err = 0;
5246 goto done;
5247 }
5248
5249 /* Hot swappable PHY */
5250 if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5251 (void) snprintf(valstr, sizeof (valstr), "%s",
5252 nxgep->hot_swappable_phy ?
5253 "yes" : "no");
5254
5255 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5256 "==> nxge_get_priv_prop: name %s "
5257 "(value %d valstr %s)",
5258 pr_name, nxgep->hot_swappable_phy, valstr));
5259
5260 err = 0;
5261 goto done;
5262 }
5263
5264
5265 /* Receive Interrupt Blanking Parameters */
5266 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5267 err = 0;
5268 (void) snprintf(valstr, sizeof (valstr), "%d",
5269 nxgep->intr_timeout);
5270 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5271 "==> nxge_get_priv_prop: name %s (value %d)",
5272 pr_name,
5273 (uint32_t)nxgep->intr_timeout));
5274 goto done;
5275 }
5276
5277 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5278 err = 0;
5279 (void) snprintf(valstr, sizeof (valstr), "%d",
5280 nxgep->intr_threshold);
5281 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5282 "==> nxge_get_priv_prop: name %s (value %d)",
5283 pr_name, (uint32_t)nxgep->intr_threshold));
5284
5285 goto done;
5286 }
5287
5288 /* Classification and Load Distribution Configuration */
5289 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5290 err = nxge_dld_get_ip_opt(nxgep,
5291 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
5292
5293 (void) snprintf(valstr, sizeof (valstr), "%x",
5294 (int)param_arr[param_class_opt_ipv4_tcp].value);
5295
5296 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5297 "==> nxge_get_priv_prop: %s", valstr));
5298 goto done;
5299 }
5300
5301 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5302 err = nxge_dld_get_ip_opt(nxgep,
5303 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
5304
5305 (void) snprintf(valstr, sizeof (valstr), "%x",
5306 (int)param_arr[param_class_opt_ipv4_udp].value);
5307
5308 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5309 "==> nxge_get_priv_prop: %s", valstr));
5310 goto done;
5311 }
5312 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5313 err = nxge_dld_get_ip_opt(nxgep,
5314 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
5315
5316 (void) snprintf(valstr, sizeof (valstr), "%x",
5317 (int)param_arr[param_class_opt_ipv4_ah].value);
5318
5319 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5320 "==> nxge_get_priv_prop: %s", valstr));
5321 goto done;
5322 }
5323
5324 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5325 err = nxge_dld_get_ip_opt(nxgep,
5326 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5327
5328 (void) snprintf(valstr, sizeof (valstr), "%x",
5329 (int)param_arr[param_class_opt_ipv4_sctp].value);
5330
5331 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5332 "==> nxge_get_priv_prop: %s", valstr));
5333 goto done;
5334 }
5335
5336 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5337 err = nxge_dld_get_ip_opt(nxgep,
5338 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5339
5340 (void) snprintf(valstr, sizeof (valstr), "%x",
5341 (int)param_arr[param_class_opt_ipv6_tcp].value);
5342
5343 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5344 "==> nxge_get_priv_prop: %s", valstr));
5345 goto done;
5346 }
5347
5348 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5349 err = nxge_dld_get_ip_opt(nxgep,
5350 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5351
5352 (void) snprintf(valstr, sizeof (valstr), "%x",
5353 (int)param_arr[param_class_opt_ipv6_udp].value);
5354
5355 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5356 "==> nxge_get_priv_prop: %s", valstr));
5357 goto done;
5358 }
5359
5360 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5361 err = nxge_dld_get_ip_opt(nxgep,
5362 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5363
5364 (void) snprintf(valstr, sizeof (valstr), "%x",
5365 (int)param_arr[param_class_opt_ipv6_ah].value);
5366
5367 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5368 "==> nxge_get_priv_prop: %s", valstr));
5369 goto done;
5370 }
5371
5372 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5373 err = nxge_dld_get_ip_opt(nxgep,
5374 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5375
5376 (void) snprintf(valstr, sizeof (valstr), "%x",
5377 (int)param_arr[param_class_opt_ipv6_sctp].value);
5378
5379 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5380 "==> nxge_get_priv_prop: %s", valstr));
5381 goto done;
5382 }
5383
5384 /* Software LSO */
5385 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5386 (void) snprintf(valstr, sizeof (valstr),
5387 "%d", nxgep->soft_lso_enable);
5388 err = 0;
5389 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5390 "==> nxge_get_priv_prop: name %s (value %d)",
5391 pr_name, nxgep->soft_lso_enable));
5392
5393 goto done;
5394 }
5395 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5396 err = 0;
5397 if (nxgep->param_arr[param_anar_10gfdx].value != 0) {
5398 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5399 goto done;
5400 } else {
5401 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5402 goto done;
5403 }
5404 }
5405 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5406 err = 0;
5407 if (nxgep->param_arr[param_anar_pause].value != 0) {
5408 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5409 goto done;
5410 } else {
5411 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5412 goto done;
5413 }
5414 }
5415
5416 done:
5417 if (err == 0) {
5418 strsize = (uint_t)strlen(valstr);
5419 if (pr_valsize < strsize) {
5420 err = ENOBUFS;
5421 } else {
5422 (void) strlcpy(pr_val, valstr, pr_valsize);
5423 }
5424 }
5425
5426 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5427 "<== nxge_get_priv_prop: return %d", err));
5428 return (err);
5429 }
5430
5431 /*
5432 * Module loading and removing entry points.
5433 */
5434
5435 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5436 nodev, NULL, D_MP, NULL, nxge_quiesce);
5437
5438 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet"
5439
5440 /*
5441 * Module linkage information for the kernel.
5442 */
5443 static struct modldrv nxge_modldrv = {
5444 &mod_driverops,
5445 NXGE_DESC_VER,
5446 &nxge_dev_ops
5447 };
5448
5449 static struct modlinkage modlinkage = {
5450 MODREV_1, (void *) &nxge_modldrv, NULL
5451 };
5452
5453 int
_init(void)5454 _init(void)
5455 {
5456 int status;
5457
5458 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
5459
5460 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5461
5462 mac_init_ops(&nxge_dev_ops, "nxge");
5463
5464 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5465 if (status != 0) {
5466 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5467 "failed to init device soft state"));
5468 goto _init_exit;
5469 }
5470
5471 status = mod_install(&modlinkage);
5472 if (status != 0) {
5473 ddi_soft_state_fini(&nxge_list);
5474 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5475 goto _init_exit;
5476 }
5477
5478 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5479
5480 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5481 return (status);
5482
5483 _init_exit:
5484 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5485 MUTEX_DESTROY(&nxgedebuglock);
5486 return (status);
5487 }
5488
5489 int
_fini(void)5490 _fini(void)
5491 {
5492 int status;
5493
5494 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5495 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5496
5497 if (nxge_mblks_pending)
5498 return (EBUSY);
5499
5500 status = mod_remove(&modlinkage);
5501 if (status != DDI_SUCCESS) {
5502 NXGE_DEBUG_MSG((NULL, MOD_CTL,
5503 "Module removal failed 0x%08x",
5504 status));
5505 goto _fini_exit;
5506 }
5507
5508 mac_fini_ops(&nxge_dev_ops);
5509
5510 ddi_soft_state_fini(&nxge_list);
5511
5512 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5513
5514 MUTEX_DESTROY(&nxge_common_lock);
5515 MUTEX_DESTROY(&nxgedebuglock);
5516 return (status);
5517
5518 _fini_exit:
5519 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5520 return (status);
5521 }
5522
5523 int
_info(struct modinfo * modinfop)5524 _info(struct modinfo *modinfop)
5525 {
5526 int status;
5527
5528 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5529 status = mod_info(&modlinkage, modinfop);
5530 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5531
5532 return (status);
5533 }
5534
5535 /*ARGSUSED*/
5536 static int
nxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)5537 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5538 {
5539 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5540 p_nxge_t nxgep = rhp->nxgep;
5541 uint32_t channel;
5542 p_tx_ring_t ring;
5543
5544 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5545 ring = nxgep->tx_rings->rings[channel];
5546
5547 MUTEX_ENTER(&ring->lock);
5548 ASSERT(ring->tx_ring_handle == NULL);
5549 ring->tx_ring_handle = rhp->ring_handle;
5550 MUTEX_EXIT(&ring->lock);
5551
5552 return (0);
5553 }
5554
5555 static void
nxge_tx_ring_stop(mac_ring_driver_t rdriver)5556 nxge_tx_ring_stop(mac_ring_driver_t rdriver)
5557 {
5558 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5559 p_nxge_t nxgep = rhp->nxgep;
5560 uint32_t channel;
5561 p_tx_ring_t ring;
5562
5563 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5564 ring = nxgep->tx_rings->rings[channel];
5565
5566 MUTEX_ENTER(&ring->lock);
5567 ASSERT(ring->tx_ring_handle != NULL);
5568 ring->tx_ring_handle = (mac_ring_handle_t)NULL;
5569 MUTEX_EXIT(&ring->lock);
5570 }
5571
5572 int
nxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)5573 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5574 {
5575 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5576 p_nxge_t nxgep = rhp->nxgep;
5577 uint32_t channel;
5578 p_rx_rcr_ring_t ring;
5579 int i;
5580
5581 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5582 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5583
5584 MUTEX_ENTER(&ring->lock);
5585
5586 if (ring->started) {
5587 ASSERT(ring->started == B_FALSE);
5588 MUTEX_EXIT(&ring->lock);
5589 return (0);
5590 }
5591
5592 /* set rcr_ring */
5593 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5594 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5595 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5596 ring->ldvp = &nxgep->ldgvp->ldvp[i];
5597 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp;
5598 }
5599 }
5600
5601 ring->rcr_mac_handle = rhp->ring_handle;
5602 ring->rcr_gen_num = mr_gen_num;
5603 ring->started = B_TRUE;
5604 rhp->ring_gen_num = mr_gen_num;
5605 MUTEX_EXIT(&ring->lock);
5606
5607 return (0);
5608 }
5609
5610 static void
nxge_rx_ring_stop(mac_ring_driver_t rdriver)5611 nxge_rx_ring_stop(mac_ring_driver_t rdriver)
5612 {
5613 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5614 p_nxge_t nxgep = rhp->nxgep;
5615 uint32_t channel;
5616 p_rx_rcr_ring_t ring;
5617
5618 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5619 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5620
5621 MUTEX_ENTER(&ring->lock);
5622 ASSERT(ring->started == B_TRUE);
5623 ring->rcr_mac_handle = NULL;
5624 ring->ldvp = NULL;
5625 ring->ldgp = NULL;
5626 ring->started = B_FALSE;
5627 MUTEX_EXIT(&ring->lock);
5628 }
5629
5630 static int
nxge_ring_get_htable_idx(p_nxge_t nxgep,mac_ring_type_t type,uint32_t channel)5631 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel)
5632 {
5633 int i;
5634
5635 #if defined(sun4v)
5636 if (isLDOMguest(nxgep)) {
5637 return (nxge_hio_get_dc_htable_idx(nxgep,
5638 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX,
5639 channel));
5640 }
5641 #endif
5642
5643 ASSERT(nxgep->ldgvp != NULL);
5644
5645 switch (type) {
5646 case MAC_RING_TYPE_TX:
5647 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5648 if ((nxgep->ldgvp->ldvp[i].is_txdma) &&
5649 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5650 return ((int)
5651 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5652 }
5653 }
5654 break;
5655
5656 case MAC_RING_TYPE_RX:
5657 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5658 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5659 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5660 return ((int)
5661 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5662 }
5663 }
5664 }
5665
5666 return (-1);
5667 }
5668
5669 /*
5670 * Callback funtion for MAC layer to register all rings.
5671 */
5672 static void
nxge_fill_ring(void * arg,mac_ring_type_t rtype,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)5673 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
5674 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5675 {
5676 p_nxge_t nxgep = (p_nxge_t)arg;
5677 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
5678 p_nxge_intr_t intrp;
5679 uint32_t channel;
5680 int htable_idx;
5681 p_nxge_ring_handle_t rhandlep;
5682
5683 ASSERT(nxgep != NULL);
5684 ASSERT(p_cfgp != NULL);
5685 ASSERT(infop != NULL);
5686
5687 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5688 "==> nxge_fill_ring 0x%x index %d", rtype, index));
5689
5690
5691 switch (rtype) {
5692 case MAC_RING_TYPE_TX: {
5693 mac_intr_t *mintr = &infop->mri_intr;
5694
5695 NXGE_DEBUG_MSG((nxgep, TX_CTL,
5696 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
5697 rtype, index, p_cfgp->tdc.count));
5698
5699 ASSERT((index >= 0) && (index < p_cfgp->tdc.count));
5700 rhandlep = &nxgep->tx_ring_handles[index];
5701 rhandlep->nxgep = nxgep;
5702 rhandlep->index = index;
5703 rhandlep->ring_handle = rh;
5704
5705 channel = nxgep->pt_config.hw_config.tdc.start + index;
5706 rhandlep->channel = channel;
5707 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5708 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5709 channel);
5710 if (htable_idx >= 0)
5711 mintr->mi_ddi_handle = intrp->htable[htable_idx];
5712 else
5713 mintr->mi_ddi_handle = NULL;
5714
5715 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5716 infop->mri_start = nxge_tx_ring_start;
5717 infop->mri_stop = nxge_tx_ring_stop;
5718 infop->mri_tx = nxge_tx_ring_send;
5719 infop->mri_stat = nxge_tx_ring_stat;
5720 infop->mri_flags = MAC_RING_TX_SERIALIZE;
5721 break;
5722 }
5723
5724 case MAC_RING_TYPE_RX: {
5725 mac_intr_t nxge_mac_intr;
5726 int nxge_rindex;
5727 p_nxge_intr_t intrp;
5728
5729 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5730
5731 NXGE_DEBUG_MSG((nxgep, RX_CTL,
5732 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
5733 rtype, index, p_cfgp->max_rdcs));
5734
5735 /*
5736 * 'index' is the ring index within the group.
5737 * Find the ring index in the nxge instance.
5738 */
5739 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index);
5740 channel = nxgep->pt_config.hw_config.start_rdc + index;
5741 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5742
5743 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs));
5744 rhandlep = &nxgep->rx_ring_handles[nxge_rindex];
5745 rhandlep->nxgep = nxgep;
5746 rhandlep->index = nxge_rindex;
5747 rhandlep->ring_handle = rh;
5748 rhandlep->channel = channel;
5749
5750 /*
5751 * Entrypoint to enable interrupt (disable poll) and
5752 * disable interrupt (enable poll).
5753 */
5754 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr));
5755 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep;
5756 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll;
5757 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll;
5758
5759 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5760 channel);
5761 if (htable_idx >= 0)
5762 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
5763 else
5764 nxge_mac_intr.mi_ddi_handle = NULL;
5765
5766 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5767 infop->mri_start = nxge_rx_ring_start;
5768 infop->mri_stop = nxge_rx_ring_stop;
5769 infop->mri_intr = nxge_mac_intr;
5770 infop->mri_poll = nxge_rx_poll;
5771 infop->mri_stat = nxge_rx_ring_stat;
5772 infop->mri_flags = MAC_RING_RX_ENQUEUE;
5773 break;
5774 }
5775
5776 default:
5777 break;
5778 }
5779
5780 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype));
5781 }
5782
5783 static void
nxge_group_add_ring(mac_group_driver_t gh,mac_ring_driver_t rh,mac_ring_type_t type)5784 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5785 mac_ring_type_t type)
5786 {
5787 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5788 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5789 nxge_t *nxge;
5790 nxge_grp_t *grp;
5791 nxge_rdc_grp_t *rdc_grp;
5792 uint16_t channel; /* device-wise ring id */
5793 int dev_gindex;
5794 int rv;
5795
5796 nxge = rgroup->nxgep;
5797
5798 switch (type) {
5799 case MAC_RING_TYPE_TX:
5800 /*
5801 * nxge_grp_dc_add takes a channel number which is a
5802 * "devise" ring ID.
5803 */
5804 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5805
5806 /*
5807 * Remove the ring from the default group
5808 */
5809 if (rgroup->gindex != 0) {
5810 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5811 }
5812
5813 /*
5814 * nxge->tx_set.group[] is an array of groups indexed by
5815 * a "port" group ID.
5816 */
5817 grp = nxge->tx_set.group[rgroup->gindex];
5818 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5819 if (rv != 0) {
5820 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5821 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5822 }
5823 break;
5824
5825 case MAC_RING_TYPE_RX:
5826 /*
5827 * nxge->rx_set.group[] is an array of groups indexed by
5828 * a "port" group ID.
5829 */
5830 grp = nxge->rx_set.group[rgroup->gindex];
5831
5832 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5833 rgroup->gindex;
5834 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5835
5836 /*
5837 * nxge_grp_dc_add takes a channel number which is a
5838 * "devise" ring ID.
5839 */
5840 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index;
5841 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel);
5842 if (rv != 0) {
5843 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5844 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5845 }
5846
5847 rdc_grp->map |= (1 << channel);
5848 rdc_grp->max_rdcs++;
5849
5850 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5851 break;
5852 }
5853 }
5854
5855 static void
nxge_group_rem_ring(mac_group_driver_t gh,mac_ring_driver_t rh,mac_ring_type_t type)5856 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5857 mac_ring_type_t type)
5858 {
5859 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5860 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5861 nxge_t *nxge;
5862 uint16_t channel; /* device-wise ring id */
5863 nxge_rdc_grp_t *rdc_grp;
5864 int dev_gindex;
5865
5866 nxge = rgroup->nxgep;
5867
5868 switch (type) {
5869 case MAC_RING_TYPE_TX:
5870 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid +
5871 rgroup->gindex;
5872 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5873 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5874
5875 /*
5876 * Add the ring back to the default group
5877 */
5878 if (rgroup->gindex != 0) {
5879 nxge_grp_t *grp;
5880 grp = nxge->tx_set.group[0];
5881 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5882 }
5883 break;
5884
5885 case MAC_RING_TYPE_RX:
5886 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5887 rgroup->gindex;
5888 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5889 channel = rdc_grp->start_rdc + rhandle->index;
5890 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
5891
5892 rdc_grp->map &= ~(1 << channel);
5893 rdc_grp->max_rdcs--;
5894
5895 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5896 break;
5897 }
5898 }
5899
5900
5901 /*ARGSUSED*/
5902 static nxge_status_t
nxge_add_intrs(p_nxge_t nxgep)5903 nxge_add_intrs(p_nxge_t nxgep)
5904 {
5905
5906 int intr_types;
5907 int type = 0;
5908 int ddi_status = DDI_SUCCESS;
5909 nxge_status_t status = NXGE_OK;
5910
5911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5912
5913 nxgep->nxge_intr_type.intr_registered = B_FALSE;
5914 nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5915 nxgep->nxge_intr_type.msi_intx_cnt = 0;
5916 nxgep->nxge_intr_type.intr_added = 0;
5917 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5918 nxgep->nxge_intr_type.intr_type = 0;
5919
5920 if (nxgep->niu_type == N2_NIU) {
5921 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5922 } else if (nxge_msi_enable) {
5923 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5924 }
5925
5926 /* Get the supported interrupt types */
5927 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5928 != DDI_SUCCESS) {
5929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5930 "ddi_intr_get_supported_types failed: status 0x%08x",
5931 ddi_status));
5932 return (NXGE_ERROR | NXGE_DDI_FAILED);
5933 }
5934 nxgep->nxge_intr_type.intr_types = intr_types;
5935
5936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5937 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5938
5939 /*
5940 * Solaris MSIX is not supported yet. use MSI for now.
5941 * nxge_msi_enable (1):
5942 * 1 - MSI 2 - MSI-X others - FIXED
5943 */
5944 switch (nxge_msi_enable) {
5945 default:
5946 type = DDI_INTR_TYPE_FIXED;
5947 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5948 "use fixed (intx emulation) type %08x",
5949 type));
5950 break;
5951
5952 case 2:
5953 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5954 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5955 if (intr_types & DDI_INTR_TYPE_MSIX) {
5956 type = DDI_INTR_TYPE_MSIX;
5957 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5958 "ddi_intr_get_supported_types: MSIX 0x%08x",
5959 type));
5960 } else if (intr_types & DDI_INTR_TYPE_MSI) {
5961 type = DDI_INTR_TYPE_MSI;
5962 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5963 "ddi_intr_get_supported_types: MSI 0x%08x",
5964 type));
5965 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5966 type = DDI_INTR_TYPE_FIXED;
5967 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5968 "ddi_intr_get_supported_types: MSXED0x%08x",
5969 type));
5970 }
5971 break;
5972
5973 case 1:
5974 if (intr_types & DDI_INTR_TYPE_MSI) {
5975 type = DDI_INTR_TYPE_MSI;
5976 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5977 "ddi_intr_get_supported_types: MSI 0x%08x",
5978 type));
5979 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
5980 type = DDI_INTR_TYPE_MSIX;
5981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5982 "ddi_intr_get_supported_types: MSIX 0x%08x",
5983 type));
5984 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5985 type = DDI_INTR_TYPE_FIXED;
5986 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5987 "ddi_intr_get_supported_types: MSXED0x%08x",
5988 type));
5989 }
5990 }
5991
5992 nxgep->nxge_intr_type.intr_type = type;
5993 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5994 type == DDI_INTR_TYPE_FIXED) &&
5995 nxgep->nxge_intr_type.niu_msi_enable) {
5996 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5997 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5998 " nxge_add_intrs: "
5999 " nxge_add_intrs_adv failed: status 0x%08x",
6000 status));
6001 return (status);
6002 } else {
6003 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
6004 "interrupts registered : type %d", type));
6005 nxgep->nxge_intr_type.intr_registered = B_TRUE;
6006
6007 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6008 "\nAdded advanced nxge add_intr_adv "
6009 "intr type 0x%x\n", type));
6010
6011 return (status);
6012 }
6013 }
6014
6015 if (!nxgep->nxge_intr_type.intr_registered) {
6016 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
6017 "failed to register interrupts"));
6018 return (NXGE_ERROR | NXGE_DDI_FAILED);
6019 }
6020
6021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
6022 return (status);
6023 }
6024
6025 static nxge_status_t
nxge_add_intrs_adv(p_nxge_t nxgep)6026 nxge_add_intrs_adv(p_nxge_t nxgep)
6027 {
6028 int intr_type;
6029 p_nxge_intr_t intrp;
6030
6031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
6032
6033 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6034 intr_type = intrp->intr_type;
6035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
6036 intr_type));
6037
6038 switch (intr_type) {
6039 case DDI_INTR_TYPE_MSI: /* 0x2 */
6040 case DDI_INTR_TYPE_MSIX: /* 0x4 */
6041 return (nxge_add_intrs_adv_type(nxgep, intr_type));
6042
6043 case DDI_INTR_TYPE_FIXED: /* 0x1 */
6044 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
6045
6046 default:
6047 return (NXGE_ERROR);
6048 }
6049 }
6050
6051
6052 /*ARGSUSED*/
6053 static nxge_status_t
nxge_add_intrs_adv_type(p_nxge_t nxgep,uint32_t int_type)6054 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
6055 {
6056 dev_info_t *dip = nxgep->dip;
6057 p_nxge_ldg_t ldgp;
6058 p_nxge_intr_t intrp;
6059 uint_t *inthandler;
6060 void *arg1, *arg2;
6061 int behavior;
6062 int nintrs, navail, nrequest;
6063 int nactual, nrequired;
6064 int inum = 0;
6065 int x, y;
6066 int ddi_status = DDI_SUCCESS;
6067 nxge_status_t status = NXGE_OK;
6068
6069 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
6070 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6071 intrp->start_inum = 0;
6072
6073 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6074 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6076 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6077 "nintrs: %d", ddi_status, nintrs));
6078 return (NXGE_ERROR | NXGE_DDI_FAILED);
6079 }
6080
6081 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6082 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6083 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6084 "ddi_intr_get_navail() failed, status: 0x%x%, "
6085 "nintrs: %d", ddi_status, navail));
6086 return (NXGE_ERROR | NXGE_DDI_FAILED);
6087 }
6088
6089 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6090 "ddi_intr_get_navail() returned: nintrs %d, navail %d",
6091 nintrs, navail));
6092
6093 /* PSARC/2007/453 MSI-X interrupt limit override */
6094 if (int_type == DDI_INTR_TYPE_MSIX) {
6095 nrequest = nxge_create_msi_property(nxgep);
6096 if (nrequest < navail) {
6097 navail = nrequest;
6098 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6099 "nxge_add_intrs_adv_type: nintrs %d "
6100 "navail %d (nrequest %d)",
6101 nintrs, navail, nrequest));
6102 }
6103 }
6104
6105 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
6106 /* MSI must be power of 2 */
6107 if ((navail & 16) == 16) {
6108 navail = 16;
6109 } else if ((navail & 8) == 8) {
6110 navail = 8;
6111 } else if ((navail & 4) == 4) {
6112 navail = 4;
6113 } else if ((navail & 2) == 2) {
6114 navail = 2;
6115 } else {
6116 navail = 1;
6117 }
6118 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6119 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
6120 "navail %d", nintrs, navail));
6121 }
6122
6123 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6124 DDI_INTR_ALLOC_NORMAL);
6125 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6126 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6127 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6128 navail, &nactual, behavior);
6129 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6131 " ddi_intr_alloc() failed: %d",
6132 ddi_status));
6133 kmem_free(intrp->htable, intrp->intr_size);
6134 return (NXGE_ERROR | NXGE_DDI_FAILED);
6135 }
6136
6137 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6138 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6139 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6140 " ddi_intr_get_pri() failed: %d",
6141 ddi_status));
6142 /* Free already allocated interrupts */
6143 for (y = 0; y < nactual; y++) {
6144 (void) ddi_intr_free(intrp->htable[y]);
6145 }
6146
6147 kmem_free(intrp->htable, intrp->intr_size);
6148 return (NXGE_ERROR | NXGE_DDI_FAILED);
6149 }
6150
6151 nrequired = 0;
6152 switch (nxgep->niu_type) {
6153 default:
6154 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6155 break;
6156
6157 case N2_NIU:
6158 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6159 break;
6160 }
6161
6162 if (status != NXGE_OK) {
6163 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6164 "nxge_add_intrs_adv_typ:nxge_ldgv_init "
6165 "failed: 0x%x", status));
6166 /* Free already allocated interrupts */
6167 for (y = 0; y < nactual; y++) {
6168 (void) ddi_intr_free(intrp->htable[y]);
6169 }
6170
6171 kmem_free(intrp->htable, intrp->intr_size);
6172 return (status);
6173 }
6174
6175 ldgp = nxgep->ldgvp->ldgp;
6176 for (x = 0; x < nrequired; x++, ldgp++) {
6177 ldgp->vector = (uint8_t)x;
6178 ldgp->intdata = SID_DATA(ldgp->func, x);
6179 arg1 = ldgp->ldvp;
6180 arg2 = nxgep;
6181 if (ldgp->nldvs == 1) {
6182 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6183 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6184 "nxge_add_intrs_adv_type: "
6185 "arg1 0x%x arg2 0x%x: "
6186 "1-1 int handler (entry %d intdata 0x%x)\n",
6187 arg1, arg2,
6188 x, ldgp->intdata));
6189 } else if (ldgp->nldvs > 1) {
6190 inthandler = (uint_t *)ldgp->sys_intr_handler;
6191 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6192 "nxge_add_intrs_adv_type: "
6193 "arg1 0x%x arg2 0x%x: "
6194 "nldevs %d int handler "
6195 "(entry %d intdata 0x%x)\n",
6196 arg1, arg2,
6197 ldgp->nldvs, x, ldgp->intdata));
6198 }
6199
6200 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6201 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
6202 "htable 0x%llx", x, intrp->htable[x]));
6203
6204 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6205 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6206 != DDI_SUCCESS) {
6207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6208 "==> nxge_add_intrs_adv_type: failed #%d "
6209 "status 0x%x", x, ddi_status));
6210 for (y = 0; y < intrp->intr_added; y++) {
6211 (void) ddi_intr_remove_handler(
6212 intrp->htable[y]);
6213 }
6214 /* Free already allocated intr */
6215 for (y = 0; y < nactual; y++) {
6216 (void) ddi_intr_free(intrp->htable[y]);
6217 }
6218 kmem_free(intrp->htable, intrp->intr_size);
6219
6220 (void) nxge_ldgv_uninit(nxgep);
6221
6222 return (NXGE_ERROR | NXGE_DDI_FAILED);
6223 }
6224
6225 ldgp->htable_idx = x;
6226 intrp->intr_added++;
6227 }
6228
6229 intrp->msi_intx_cnt = nactual;
6230
6231 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6232 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6233 navail, nactual,
6234 intrp->msi_intx_cnt,
6235 intrp->intr_added));
6236
6237 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6238
6239 (void) nxge_intr_ldgv_init(nxgep);
6240
6241 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6242
6243 return (status);
6244 }
6245
6246 /*ARGSUSED*/
6247 static nxge_status_t
nxge_add_intrs_adv_type_fix(p_nxge_t nxgep,uint32_t int_type)6248 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6249 {
6250 dev_info_t *dip = nxgep->dip;
6251 p_nxge_ldg_t ldgp;
6252 p_nxge_intr_t intrp;
6253 uint_t *inthandler;
6254 void *arg1, *arg2;
6255 int behavior;
6256 int nintrs, navail;
6257 int nactual, nrequired;
6258 int inum = 0;
6259 int x, y;
6260 int ddi_status = DDI_SUCCESS;
6261 nxge_status_t status = NXGE_OK;
6262
6263 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6264 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6265 intrp->start_inum = 0;
6266
6267 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6268 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6269 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6270 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6271 "nintrs: %d", status, nintrs));
6272 return (NXGE_ERROR | NXGE_DDI_FAILED);
6273 }
6274
6275 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6276 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6278 "ddi_intr_get_navail() failed, status: 0x%x%, "
6279 "nintrs: %d", ddi_status, navail));
6280 return (NXGE_ERROR | NXGE_DDI_FAILED);
6281 }
6282
6283 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6284 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6285 nintrs, navail));
6286
6287 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6288 DDI_INTR_ALLOC_NORMAL);
6289 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6290 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6291 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6292 navail, &nactual, behavior);
6293 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6294 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6295 " ddi_intr_alloc() failed: %d",
6296 ddi_status));
6297 kmem_free(intrp->htable, intrp->intr_size);
6298 return (NXGE_ERROR | NXGE_DDI_FAILED);
6299 }
6300
6301 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6302 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6304 " ddi_intr_get_pri() failed: %d",
6305 ddi_status));
6306 /* Free already allocated interrupts */
6307 for (y = 0; y < nactual; y++) {
6308 (void) ddi_intr_free(intrp->htable[y]);
6309 }
6310
6311 kmem_free(intrp->htable, intrp->intr_size);
6312 return (NXGE_ERROR | NXGE_DDI_FAILED);
6313 }
6314
6315 nrequired = 0;
6316 switch (nxgep->niu_type) {
6317 default:
6318 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6319 break;
6320
6321 case N2_NIU:
6322 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6323 break;
6324 }
6325
6326 if (status != NXGE_OK) {
6327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6328 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6329 "failed: 0x%x", status));
6330 /* Free already allocated interrupts */
6331 for (y = 0; y < nactual; y++) {
6332 (void) ddi_intr_free(intrp->htable[y]);
6333 }
6334
6335 kmem_free(intrp->htable, intrp->intr_size);
6336 return (status);
6337 }
6338
6339 ldgp = nxgep->ldgvp->ldgp;
6340 for (x = 0; x < nrequired; x++, ldgp++) {
6341 ldgp->vector = (uint8_t)x;
6342 if (nxgep->niu_type != N2_NIU) {
6343 ldgp->intdata = SID_DATA(ldgp->func, x);
6344 }
6345
6346 arg1 = ldgp->ldvp;
6347 arg2 = nxgep;
6348 if (ldgp->nldvs == 1) {
6349 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6350 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6351 "nxge_add_intrs_adv_type_fix: "
6352 "1-1 int handler(%d) ldg %d ldv %d "
6353 "arg1 $%p arg2 $%p\n",
6354 x, ldgp->ldg, ldgp->ldvp->ldv,
6355 arg1, arg2));
6356 } else if (ldgp->nldvs > 1) {
6357 inthandler = (uint_t *)ldgp->sys_intr_handler;
6358 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6359 "nxge_add_intrs_adv_type_fix: "
6360 "shared ldv %d int handler(%d) ldv %d ldg %d"
6361 "arg1 0x%016llx arg2 0x%016llx\n",
6362 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6363 arg1, arg2));
6364 }
6365
6366 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6367 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6368 != DDI_SUCCESS) {
6369 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6370 "==> nxge_add_intrs_adv_type_fix: failed #%d "
6371 "status 0x%x", x, ddi_status));
6372 for (y = 0; y < intrp->intr_added; y++) {
6373 (void) ddi_intr_remove_handler(
6374 intrp->htable[y]);
6375 }
6376 for (y = 0; y < nactual; y++) {
6377 (void) ddi_intr_free(intrp->htable[y]);
6378 }
6379 /* Free already allocated intr */
6380 kmem_free(intrp->htable, intrp->intr_size);
6381
6382 (void) nxge_ldgv_uninit(nxgep);
6383
6384 return (NXGE_ERROR | NXGE_DDI_FAILED);
6385 }
6386
6387 ldgp->htable_idx = x;
6388 intrp->intr_added++;
6389 }
6390
6391 intrp->msi_intx_cnt = nactual;
6392
6393 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6394
6395 status = nxge_intr_ldgv_init(nxgep);
6396 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6397
6398 return (status);
6399 }
6400
6401 static void
nxge_remove_intrs(p_nxge_t nxgep)6402 nxge_remove_intrs(p_nxge_t nxgep)
6403 {
6404 int i, inum;
6405 p_nxge_intr_t intrp;
6406
6407 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6408 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6409 if (!intrp->intr_registered) {
6410 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6411 "<== nxge_remove_intrs: interrupts not registered"));
6412 return;
6413 }
6414
6415 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6416
6417 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6418 (void) ddi_intr_block_disable(intrp->htable,
6419 intrp->intr_added);
6420 } else {
6421 for (i = 0; i < intrp->intr_added; i++) {
6422 (void) ddi_intr_disable(intrp->htable[i]);
6423 }
6424 }
6425
6426 for (inum = 0; inum < intrp->intr_added; inum++) {
6427 if (intrp->htable[inum]) {
6428 (void) ddi_intr_remove_handler(intrp->htable[inum]);
6429 }
6430 }
6431
6432 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6433 if (intrp->htable[inum]) {
6434 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6435 "nxge_remove_intrs: ddi_intr_free inum %d "
6436 "msi_intx_cnt %d intr_added %d",
6437 inum,
6438 intrp->msi_intx_cnt,
6439 intrp->intr_added));
6440
6441 (void) ddi_intr_free(intrp->htable[inum]);
6442 }
6443 }
6444
6445 kmem_free(intrp->htable, intrp->intr_size);
6446 intrp->intr_registered = B_FALSE;
6447 intrp->intr_enabled = B_FALSE;
6448 intrp->msi_intx_cnt = 0;
6449 intrp->intr_added = 0;
6450
6451 (void) nxge_ldgv_uninit(nxgep);
6452
6453 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6454 "#msix-request");
6455
6456 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6457 }
6458
6459 /*ARGSUSED*/
6460 static void
nxge_intrs_enable(p_nxge_t nxgep)6461 nxge_intrs_enable(p_nxge_t nxgep)
6462 {
6463 p_nxge_intr_t intrp;
6464 int i;
6465 int status;
6466
6467 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6468
6469 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6470
6471 if (!intrp->intr_registered) {
6472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6473 "interrupts are not registered"));
6474 return;
6475 }
6476
6477 if (intrp->intr_enabled) {
6478 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6479 "<== nxge_intrs_enable: already enabled"));
6480 return;
6481 }
6482
6483 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6484 status = ddi_intr_block_enable(intrp->htable,
6485 intrp->intr_added);
6486 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6487 "block enable - status 0x%x total inums #%d\n",
6488 status, intrp->intr_added));
6489 } else {
6490 for (i = 0; i < intrp->intr_added; i++) {
6491 status = ddi_intr_enable(intrp->htable[i]);
6492 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6493 "ddi_intr_enable:enable - status 0x%x "
6494 "total inums %d enable inum #%d\n",
6495 status, intrp->intr_added, i));
6496 if (status == DDI_SUCCESS) {
6497 intrp->intr_enabled = B_TRUE;
6498 }
6499 }
6500 }
6501
6502 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6503 }
6504
6505 /*ARGSUSED*/
6506 static void
nxge_intrs_disable(p_nxge_t nxgep)6507 nxge_intrs_disable(p_nxge_t nxgep)
6508 {
6509 p_nxge_intr_t intrp;
6510 int i;
6511
6512 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6513
6514 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6515
6516 if (!intrp->intr_registered) {
6517 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6518 "interrupts are not registered"));
6519 return;
6520 }
6521
6522 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6523 (void) ddi_intr_block_disable(intrp->htable,
6524 intrp->intr_added);
6525 } else {
6526 for (i = 0; i < intrp->intr_added; i++) {
6527 (void) ddi_intr_disable(intrp->htable[i]);
6528 }
6529 }
6530
6531 intrp->intr_enabled = B_FALSE;
6532 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6533 }
6534
6535 nxge_status_t
nxge_mac_register(p_nxge_t nxgep)6536 nxge_mac_register(p_nxge_t nxgep)
6537 {
6538 mac_register_t *macp;
6539 int status;
6540
6541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6542
6543 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6544 return (NXGE_ERROR);
6545
6546 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6547 macp->m_driver = nxgep;
6548 macp->m_dip = nxgep->dip;
6549 if (!isLDOMguest(nxgep)) {
6550 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6551 } else {
6552 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6553 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6554 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
6555 }
6556 macp->m_callbacks = &nxge_m_callbacks;
6557 macp->m_min_sdu = 0;
6558 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6559 NXGE_EHEADER_VLAN_CRC;
6560 macp->m_max_sdu = nxgep->mac.default_mtu;
6561 macp->m_margin = VLAN_TAGSZ;
6562 macp->m_priv_props = nxge_priv_props;
6563 if (isLDOMguest(nxgep))
6564 macp->m_v12n = MAC_VIRT_LEVEL1;
6565 else
6566 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1;
6567
6568 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6569 "==> nxge_mac_register: instance %d "
6570 "max_sdu %d margin %d maxframe %d (header %d)",
6571 nxgep->instance,
6572 macp->m_max_sdu, macp->m_margin,
6573 nxgep->mac.maxframesize,
6574 NXGE_EHEADER_VLAN_CRC));
6575
6576 status = mac_register(macp, &nxgep->mach);
6577 if (isLDOMguest(nxgep)) {
6578 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN);
6579 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN);
6580 }
6581 mac_free(macp);
6582
6583 if (status != 0) {
6584 cmn_err(CE_WARN,
6585 "!nxge_mac_register failed (status %d instance %d)",
6586 status, nxgep->instance);
6587 return (NXGE_ERROR);
6588 }
6589
6590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6591 "(instance %d)", nxgep->instance));
6592
6593 return (NXGE_OK);
6594 }
6595
6596 void
nxge_err_inject(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)6597 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6598 {
6599 ssize_t size;
6600 mblk_t *nmp;
6601 uint8_t blk_id;
6602 uint8_t chan;
6603 uint32_t err_id;
6604 err_inject_t *eip;
6605
6606 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6607
6608 size = 1024;
6609 nmp = mp->b_cont;
6610 eip = (err_inject_t *)nmp->b_rptr;
6611 blk_id = eip->blk_id;
6612 err_id = eip->err_id;
6613 chan = eip->chan;
6614 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6615 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6616 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6617 switch (blk_id) {
6618 case MAC_BLK_ID:
6619 break;
6620 case TXMAC_BLK_ID:
6621 break;
6622 case RXMAC_BLK_ID:
6623 break;
6624 case MIF_BLK_ID:
6625 break;
6626 case IPP_BLK_ID:
6627 nxge_ipp_inject_err(nxgep, err_id);
6628 break;
6629 case TXC_BLK_ID:
6630 nxge_txc_inject_err(nxgep, err_id);
6631 break;
6632 case TXDMA_BLK_ID:
6633 nxge_txdma_inject_err(nxgep, err_id, chan);
6634 break;
6635 case RXDMA_BLK_ID:
6636 nxge_rxdma_inject_err(nxgep, err_id, chan);
6637 break;
6638 case ZCP_BLK_ID:
6639 nxge_zcp_inject_err(nxgep, err_id);
6640 break;
6641 case ESPC_BLK_ID:
6642 break;
6643 case FFLP_BLK_ID:
6644 break;
6645 case PHY_BLK_ID:
6646 break;
6647 case ETHER_SERDES_BLK_ID:
6648 break;
6649 case PCIE_SERDES_BLK_ID:
6650 break;
6651 case VIR_BLK_ID:
6652 break;
6653 }
6654
6655 nmp->b_wptr = nmp->b_rptr + size;
6656 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6657
6658 miocack(wq, mp, (int)size, 0);
6659 }
6660
6661 static int
nxge_init_common_dev(p_nxge_t nxgep)6662 nxge_init_common_dev(p_nxge_t nxgep)
6663 {
6664 p_nxge_hw_list_t hw_p;
6665 dev_info_t *p_dip;
6666
6667 ASSERT(nxgep != NULL);
6668
6669 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6670
6671 p_dip = nxgep->p_dip;
6672 MUTEX_ENTER(&nxge_common_lock);
6673 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6674 "==> nxge_init_common_dev:func # %d",
6675 nxgep->function_num));
6676 /*
6677 * Loop through existing per neptune hardware list.
6678 */
6679 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6680 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6681 "==> nxge_init_common_device:func # %d "
6682 "hw_p $%p parent dip $%p",
6683 nxgep->function_num,
6684 hw_p,
6685 p_dip));
6686 if (hw_p->parent_devp == p_dip) {
6687 nxgep->nxge_hw_p = hw_p;
6688 hw_p->ndevs++;
6689 hw_p->nxge_p[nxgep->function_num] = nxgep;
6690 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6691 "==> nxge_init_common_device:func # %d "
6692 "hw_p $%p parent dip $%p "
6693 "ndevs %d (found)",
6694 nxgep->function_num,
6695 hw_p,
6696 p_dip,
6697 hw_p->ndevs));
6698 break;
6699 }
6700 }
6701
6702 if (hw_p == NULL) {
6703
6704 char **prop_val;
6705 uint_t prop_len;
6706 int i;
6707
6708 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6709 "==> nxge_init_common_device:func # %d "
6710 "parent dip $%p (new)",
6711 nxgep->function_num,
6712 p_dip));
6713 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6714 hw_p->parent_devp = p_dip;
6715 hw_p->magic = NXGE_NEPTUNE_MAGIC;
6716 nxgep->nxge_hw_p = hw_p;
6717 hw_p->ndevs++;
6718 hw_p->nxge_p[nxgep->function_num] = nxgep;
6719 hw_p->next = nxge_hw_list;
6720 if (nxgep->niu_type == N2_NIU) {
6721 hw_p->niu_type = N2_NIU;
6722 hw_p->platform_type = P_NEPTUNE_NIU;
6723 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
6724 } else {
6725 hw_p->niu_type = NIU_TYPE_NONE;
6726 hw_p->platform_type = P_NEPTUNE_NONE;
6727 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
6728 }
6729
6730 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) *
6731 hw_p->tcam_size, KM_SLEEP);
6732
6733 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6734 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6735 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6736 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6737
6738 nxge_hw_list = hw_p;
6739
6740 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0,
6741 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
6742 for (i = 0; i < prop_len; i++) {
6743 if ((strcmp((caddr_t)prop_val[i],
6744 NXGE_ROCK_COMPATIBLE) == 0)) {
6745 hw_p->platform_type = P_NEPTUNE_ROCK;
6746 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6747 "ROCK hw_p->platform_type %d",
6748 hw_p->platform_type));
6749 break;
6750 }
6751 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6752 "nxge_init_common_dev: read compatible"
6753 " property[%d] val[%s]",
6754 i, (caddr_t)prop_val[i]));
6755 }
6756 }
6757
6758 ddi_prop_free(prop_val);
6759
6760 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6761 }
6762
6763 MUTEX_EXIT(&nxge_common_lock);
6764
6765 nxgep->platform_type = hw_p->platform_type;
6766 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d",
6767 nxgep->platform_type));
6768 if (nxgep->niu_type != N2_NIU) {
6769 nxgep->niu_type = hw_p->niu_type;
6770 }
6771
6772 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6773 "==> nxge_init_common_device (nxge_hw_list) $%p",
6774 nxge_hw_list));
6775 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6776
6777 return (NXGE_OK);
6778 }
6779
6780 static void
nxge_uninit_common_dev(p_nxge_t nxgep)6781 nxge_uninit_common_dev(p_nxge_t nxgep)
6782 {
6783 p_nxge_hw_list_t hw_p, h_hw_p;
6784 p_nxge_dma_pt_cfg_t p_dma_cfgp;
6785 p_nxge_hw_pt_cfg_t p_cfgp;
6786 dev_info_t *p_dip;
6787
6788 ASSERT(nxgep != NULL);
6789
6790 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6791 if (nxgep->nxge_hw_p == NULL) {
6792 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6793 "<== nxge_uninit_common_device (no common)"));
6794 return;
6795 }
6796
6797 MUTEX_ENTER(&nxge_common_lock);
6798 h_hw_p = nxge_hw_list;
6799 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6800 p_dip = hw_p->parent_devp;
6801 if (nxgep->nxge_hw_p == hw_p &&
6802 p_dip == nxgep->p_dip &&
6803 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6804 hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6805
6806 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6807 "==> nxge_uninit_common_device:func # %d "
6808 "hw_p $%p parent dip $%p "
6809 "ndevs %d (found)",
6810 nxgep->function_num,
6811 hw_p,
6812 p_dip,
6813 hw_p->ndevs));
6814
6815 /*
6816 * Release the RDC table, a shared resoruce
6817 * of the nxge hardware. The RDC table was
6818 * assigned to this instance of nxge in
6819 * nxge_use_cfg_dma_config().
6820 */
6821 if (!isLDOMguest(nxgep)) {
6822 p_dma_cfgp =
6823 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6824 p_cfgp =
6825 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6826 (void) nxge_fzc_rdc_tbl_unbind(nxgep,
6827 p_cfgp->def_mac_rxdma_grpid);
6828
6829 /* Cleanup any outstanding groups. */
6830 nxge_grp_cleanup(nxgep);
6831 }
6832
6833 if (hw_p->ndevs) {
6834 hw_p->ndevs--;
6835 }
6836 hw_p->nxge_p[nxgep->function_num] = NULL;
6837 if (!hw_p->ndevs) {
6838 KMEM_FREE(hw_p->tcam,
6839 sizeof (tcam_flow_spec_t) *
6840 hw_p->tcam_size);
6841 MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6842 MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6843 MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6844 MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6845 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6846 "==> nxge_uninit_common_device: "
6847 "func # %d "
6848 "hw_p $%p parent dip $%p "
6849 "ndevs %d (last)",
6850 nxgep->function_num,
6851 hw_p,
6852 p_dip,
6853 hw_p->ndevs));
6854
6855 nxge_hio_uninit(nxgep);
6856
6857 if (hw_p == nxge_hw_list) {
6858 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6859 "==> nxge_uninit_common_device:"
6860 "remove head func # %d "
6861 "hw_p $%p parent dip $%p "
6862 "ndevs %d (head)",
6863 nxgep->function_num,
6864 hw_p,
6865 p_dip,
6866 hw_p->ndevs));
6867 nxge_hw_list = hw_p->next;
6868 } else {
6869 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6870 "==> nxge_uninit_common_device:"
6871 "remove middle func # %d "
6872 "hw_p $%p parent dip $%p "
6873 "ndevs %d (middle)",
6874 nxgep->function_num,
6875 hw_p,
6876 p_dip,
6877 hw_p->ndevs));
6878 h_hw_p->next = hw_p->next;
6879 }
6880
6881 nxgep->nxge_hw_p = NULL;
6882 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6883 }
6884 break;
6885 } else {
6886 h_hw_p = hw_p;
6887 }
6888 }
6889
6890 MUTEX_EXIT(&nxge_common_lock);
6891 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6892 "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6893 nxge_hw_list));
6894
6895 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6896 }
6897
6898 /*
6899 * Determines the number of ports from the niu_type or the platform type.
6900 * Returns the number of ports, or returns zero on failure.
6901 */
6902
6903 int
nxge_get_nports(p_nxge_t nxgep)6904 nxge_get_nports(p_nxge_t nxgep)
6905 {
6906 int nports = 0;
6907
6908 switch (nxgep->niu_type) {
6909 case N2_NIU:
6910 case NEPTUNE_2_10GF:
6911 nports = 2;
6912 break;
6913 case NEPTUNE_4_1GC:
6914 case NEPTUNE_2_10GF_2_1GC:
6915 case NEPTUNE_1_10GF_3_1GC:
6916 case NEPTUNE_1_1GC_1_10GF_2_1GC:
6917 case NEPTUNE_2_10GF_2_1GRF:
6918 nports = 4;
6919 break;
6920 default:
6921 switch (nxgep->platform_type) {
6922 case P_NEPTUNE_NIU:
6923 case P_NEPTUNE_ATLAS_2PORT:
6924 nports = 2;
6925 break;
6926 case P_NEPTUNE_ATLAS_4PORT:
6927 case P_NEPTUNE_MARAMBA_P0:
6928 case P_NEPTUNE_MARAMBA_P1:
6929 case P_NEPTUNE_ROCK:
6930 case P_NEPTUNE_ALONSO:
6931 nports = 4;
6932 break;
6933 default:
6934 break;
6935 }
6936 break;
6937 }
6938
6939 return (nports);
6940 }
6941
6942 /*
6943 * The following two functions are to support
6944 * PSARC/2007/453 MSI-X interrupt limit override.
6945 */
6946 static int
nxge_create_msi_property(p_nxge_t nxgep)6947 nxge_create_msi_property(p_nxge_t nxgep)
6948 {
6949 int nmsi;
6950 extern int ncpus;
6951
6952 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6953
6954 switch (nxgep->mac.portmode) {
6955 case PORT_10G_COPPER:
6956 case PORT_10G_FIBER:
6957 case PORT_10G_TN1010:
6958 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6959 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6960 /*
6961 * The maximum MSI-X requested will be 8.
6962 * If the # of CPUs is less than 8, we will request
6963 * # MSI-X based on the # of CPUs (default).
6964 */
6965 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6966 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
6967 nxge_msix_10g_intrs));
6968 if ((nxge_msix_10g_intrs == 0) ||
6969 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
6970 nmsi = NXGE_MSIX_REQUEST_10G;
6971 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6972 "==>nxge_create_msi_property (10G): reset to 8"));
6973 } else {
6974 nmsi = nxge_msix_10g_intrs;
6975 }
6976
6977 /*
6978 * If # of interrupts requested is 8 (default),
6979 * the checking of the number of cpus will be
6980 * be maintained.
6981 */
6982 if ((nmsi == NXGE_MSIX_REQUEST_10G) &&
6983 (ncpus < nmsi)) {
6984 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6985 "==>nxge_create_msi_property (10G): reset to 8"));
6986 nmsi = ncpus;
6987 }
6988 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6989 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6990 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6991 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6992 break;
6993
6994 default:
6995 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6996 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6997 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6998 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
6999 nxge_msix_1g_intrs));
7000 if ((nxge_msix_1g_intrs == 0) ||
7001 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
7002 nmsi = NXGE_MSIX_REQUEST_1G;
7003 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7004 "==>nxge_create_msi_property (1G): reset to 2"));
7005 } else {
7006 nmsi = nxge_msix_1g_intrs;
7007 }
7008 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7009 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
7010 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
7011 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
7012 break;
7013 }
7014
7015 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
7016 return (nmsi);
7017 }
7018
7019 /*
7020 * The following is a software around for the Neptune hardware's
7021 * interrupt bugs; The Neptune hardware may generate spurious interrupts when
7022 * an interrupr handler is removed.
7023 */
7024 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98
7025 #define NXGE_PIM_RESET (1ULL << 29)
7026 #define NXGE_GLU_RESET (1ULL << 30)
7027 #define NXGE_NIU_RESET (1ULL << 31)
7028 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \
7029 NXGE_GLU_RESET | \
7030 NXGE_NIU_RESET)
7031
7032 #define NXGE_WAIT_QUITE_TIME 200000
7033 #define NXGE_WAIT_QUITE_RETRY 40
7034 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */
7035
7036 static void
nxge_niu_peu_reset(p_nxge_t nxgep)7037 nxge_niu_peu_reset(p_nxge_t nxgep)
7038 {
7039 uint32_t rvalue;
7040 p_nxge_hw_list_t hw_p;
7041 p_nxge_t fnxgep;
7042 int i, j;
7043
7044 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
7045 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
7046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7047 "==> nxge_niu_peu_reset: NULL hardware pointer"));
7048 return;
7049 }
7050
7051 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7052 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
7053 hw_p->flags, nxgep->nxge_link_poll_timerid,
7054 nxgep->nxge_timerid));
7055
7056 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
7057 /*
7058 * Make sure other instances from the same hardware
7059 * stop sending PIO and in quiescent state.
7060 */
7061 for (i = 0; i < NXGE_MAX_PORTS; i++) {
7062 fnxgep = hw_p->nxge_p[i];
7063 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7064 "==> nxge_niu_peu_reset: checking entry %d "
7065 "nxgep $%p", i, fnxgep));
7066 #ifdef NXGE_DEBUG
7067 if (fnxgep) {
7068 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7069 "==> nxge_niu_peu_reset: entry %d (function %d) "
7070 "link timer id %d hw timer id %d",
7071 i, fnxgep->function_num,
7072 fnxgep->nxge_link_poll_timerid,
7073 fnxgep->nxge_timerid));
7074 }
7075 #endif
7076 if (fnxgep && fnxgep != nxgep &&
7077 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
7078 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7079 "==> nxge_niu_peu_reset: checking $%p "
7080 "(function %d) timer ids",
7081 fnxgep, fnxgep->function_num));
7082 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
7083 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7084 "==> nxge_niu_peu_reset: waiting"));
7085 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7086 if (!fnxgep->nxge_timerid &&
7087 !fnxgep->nxge_link_poll_timerid) {
7088 break;
7089 }
7090 }
7091 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7092 if (fnxgep->nxge_timerid ||
7093 fnxgep->nxge_link_poll_timerid) {
7094 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7096 "<== nxge_niu_peu_reset: cannot reset "
7097 "hardware (devices are still in use)"));
7098 return;
7099 }
7100 }
7101 }
7102
7103 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
7104 hw_p->flags |= COMMON_RESET_NIU_PCI;
7105 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
7106 NXGE_PCI_PORT_LOGIC_OFFSET);
7107 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7108 "nxge_niu_peu_reset: read offset 0x%x (%d) "
7109 "(data 0x%x)",
7110 NXGE_PCI_PORT_LOGIC_OFFSET,
7111 NXGE_PCI_PORT_LOGIC_OFFSET,
7112 rvalue));
7113
7114 rvalue |= NXGE_PCI_RESET_ALL;
7115 pci_config_put32(nxgep->dev_regs->nxge_pciregh,
7116 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
7117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7118 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
7119 rvalue));
7120
7121 NXGE_DELAY(NXGE_PCI_RESET_WAIT);
7122 }
7123
7124 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7125 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
7126 }
7127
7128 static void
nxge_set_pci_replay_timeout(p_nxge_t nxgep)7129 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
7130 {
7131 p_dev_regs_t dev_regs;
7132 uint32_t value;
7133
7134 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
7135
7136 if (!nxge_set_replay_timer) {
7137 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7138 "==> nxge_set_pci_replay_timeout: will not change "
7139 "the timeout"));
7140 return;
7141 }
7142
7143 dev_regs = nxgep->dev_regs;
7144 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7145 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
7146 dev_regs, dev_regs->nxge_pciregh));
7147
7148 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
7149 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7150 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
7151 "no PCI handle",
7152 dev_regs));
7153 return;
7154 }
7155 value = (pci_config_get32(dev_regs->nxge_pciregh,
7156 PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
7157 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
7158
7159 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7160 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
7161 "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
7162 pci_config_get32(dev_regs->nxge_pciregh,
7163 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
7164 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
7165
7166 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
7167 value);
7168
7169 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7170 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
7171 pci_config_get32(dev_regs->nxge_pciregh,
7172 PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
7173
7174 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
7175 }
7176
7177 /*
7178 * quiesce(9E) entry point.
7179 *
7180 * This function is called when the system is single-threaded at high
7181 * PIL with preemption disabled. Therefore, this function must not be
7182 * blocked.
7183 *
7184 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7185 * DDI_FAILURE indicates an error condition and should almost never happen.
7186 */
7187 static int
nxge_quiesce(dev_info_t * dip)7188 nxge_quiesce(dev_info_t *dip)
7189 {
7190 int instance = ddi_get_instance(dip);
7191 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
7192
7193 if (nxgep == NULL)
7194 return (DDI_FAILURE);
7195
7196 /* Turn off debugging */
7197 nxge_debug_level = NO_DEBUG;
7198 nxgep->nxge_debug_level = NO_DEBUG;
7199 npi_debug_level = NO_DEBUG;
7200
7201 /*
7202 * Stop link monitor only when linkchkmod is interrupt based
7203 */
7204 if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
7205 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
7206 }
7207
7208 (void) nxge_intr_hw_disable(nxgep);
7209
7210 /*
7211 * Reset the receive MAC side.
7212 */
7213 (void) nxge_rx_mac_disable(nxgep);
7214
7215 /* Disable and soft reset the IPP */
7216 if (!isLDOMguest(nxgep))
7217 (void) nxge_ipp_disable(nxgep);
7218
7219 /*
7220 * Reset the transmit/receive DMA side.
7221 */
7222 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
7223 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
7224
7225 /*
7226 * Reset the transmit MAC side.
7227 */
7228 (void) nxge_tx_mac_disable(nxgep);
7229
7230 return (DDI_SUCCESS);
7231 }
7232