1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
27 */
28 #include <sys/nxge/nxge_impl.h>
29 #include <sys/nxge/nxge_hio.h>
30 #include <sys/nxge/nxge_rxdma.h>
31 #include <sys/pcie.h>
32
33 uint32_t nxge_use_partition = 0; /* debug partition flag */
34 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */
35 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */
36 /*
37 * PSARC/2007/453 MSI-X interrupt limit override
38 */
39 uint32_t nxge_msi_enable = 2;
40
41 /*
42 * Software workaround for a Neptune (PCI-E)
43 * hardware interrupt bug which the hardware
44 * may generate spurious interrupts after the
45 * device interrupt handler was removed. If this flag
46 * is enabled, the driver will reset the
47 * hardware when devices are being detached.
48 */
49 uint32_t nxge_peu_reset_enable = 0;
50
51 /*
52 * Software workaround for the hardware
53 * checksum bugs that affect packet transmission
54 * and receive:
55 *
56 * Usage of nxge_cksum_offload:
57 *
58 * (1) nxge_cksum_offload = 0 (default):
59 * - transmits packets:
60 * TCP: uses the hardware checksum feature.
61 * UDP: driver will compute the software checksum
62 * based on the partial checksum computed
63 * by the IP layer.
64 * - receives packets
65 * TCP: marks packets checksum flags based on hardware result.
66 * UDP: will not mark checksum flags.
67 *
68 * (2) nxge_cksum_offload = 1:
69 * - transmit packets:
70 * TCP/UDP: uses the hardware checksum feature.
71 * - receives packets
72 * TCP/UDP: marks packet checksum flags based on hardware result.
73 *
74 * (3) nxge_cksum_offload = 2:
75 * - The driver will not register its checksum capability.
76 * Checksum for both TCP and UDP will be computed
77 * by the stack.
78 * - The software LSO is not allowed in this case.
79 *
80 * (4) nxge_cksum_offload > 2:
81 * - Will be treated as it is set to 2
82 * (stack will compute the checksum).
83 *
84 * (5) If the hardware bug is fixed, this workaround
85 * needs to be updated accordingly to reflect
86 * the new hardware revision.
87 */
88 uint32_t nxge_cksum_offload = 0;
89
90 /*
91 * Globals: tunable parameters (/etc/system or adb)
92 *
93 */
94 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
95 uint32_t nxge_rbr_spare_size = 0;
96 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT;
97 uint16_t nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
98 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
99 boolean_t nxge_no_msg = B_TRUE; /* control message display */
100 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */
101 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX;
102 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN;
103 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN;
104 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU;
105 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL;
106
107 /* MAX LSO size */
108 #define NXGE_LSO_MAXLEN 65535
109 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN;
110
111
112 /*
113 * Add tunable to reduce the amount of time spent in the
114 * ISR doing Rx Processing.
115 */
116 uint32_t nxge_max_rx_pkts = 1024;
117
118 /*
119 * Tunables to manage the receive buffer blocks.
120 *
121 * nxge_rx_threshold_hi: copy all buffers.
122 * nxge_rx_bcopy_size_type: receive buffer block size type.
123 * nxge_rx_threshold_lo: copy only up to tunable block size type.
124 */
125 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
126 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
127 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
128
129 /* Use kmem_alloc() to allocate data buffers. */
130 #if defined(__sparc)
131 uint32_t nxge_use_kmem_alloc = 1;
132 #elif defined(__i386)
133 uint32_t nxge_use_kmem_alloc = 0;
134 #else
135 uint32_t nxge_use_kmem_alloc = 1;
136 #endif
137
138 rtrace_t npi_rtracebuf;
139
140 /*
141 * The hardware sometimes fails to allow enough time for the link partner
142 * to send an acknowledgement for packets that the hardware sent to it. The
143 * hardware resends the packets earlier than it should be in those instances.
144 * This behavior caused some switches to acknowledge the wrong packets
145 * and it triggered the fatal error.
146 * This software workaround is to set the replay timer to a value
147 * suggested by the hardware team.
148 *
149 * PCI config space replay timer register:
150 * The following replay timeout value is 0xc
151 * for bit 14:18.
152 */
153 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8
154 #define PCI_REPLAY_TIMEOUT_SHIFT 14
155
156 uint32_t nxge_set_replay_timer = 1;
157 uint32_t nxge_replay_timeout = 0xc;
158
159 /*
160 * The transmit serialization sometimes causes
161 * longer sleep before calling the driver transmit
162 * function as it sleeps longer than it should.
163 * The performace group suggests that a time wait tunable
164 * can be used to set the maximum wait time when needed
165 * and the default is set to 1 tick.
166 */
167 uint32_t nxge_tx_serial_maxsleep = 1;
168
169 #if defined(sun4v)
170 /*
171 * Hypervisor N2/NIU services information.
172 */
173 /*
174 * The following is the default API supported:
175 * major 1 and minor 1.
176 *
177 * Please update the MAX_NIU_MAJORS,
178 * MAX_NIU_MINORS, and minor number supported
179 * when the newer Hypervior API interfaces
180 * are added. Also, please update nxge_hsvc_register()
181 * if needed.
182 */
183 static hsvc_info_t niu_hsvc = {
184 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
185 NIU_MINOR_VER, "nxge"
186 };
187
188 static int nxge_hsvc_register(p_nxge_t);
189 #endif
190
191 /*
192 * Function Prototypes
193 */
194 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
195 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
196 static void nxge_unattach(p_nxge_t);
197 static int nxge_quiesce(dev_info_t *);
198
199 #if NXGE_PROPERTY
200 static void nxge_remove_hard_properties(p_nxge_t);
201 #endif
202
203 /*
204 * These two functions are required by nxge_hio.c
205 */
206 extern int nxge_m_mmac_remove(void *arg, int slot);
207 extern void nxge_grp_cleanup(p_nxge_t nxge);
208
209 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
210
211 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
212 static void nxge_destroy_mutexes(p_nxge_t);
213
214 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
215 static void nxge_unmap_regs(p_nxge_t nxgep);
216 #ifdef NXGE_DEBUG
217 static void nxge_test_map_regs(p_nxge_t nxgep);
218 #endif
219
220 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
221 static void nxge_remove_intrs(p_nxge_t nxgep);
222
223 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
224 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
225 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
226 static void nxge_intrs_enable(p_nxge_t nxgep);
227 static void nxge_intrs_disable(p_nxge_t nxgep);
228
229 static void nxge_suspend(p_nxge_t);
230 static nxge_status_t nxge_resume(p_nxge_t);
231
232 static nxge_status_t nxge_setup_dev(p_nxge_t);
233 static void nxge_destroy_dev(p_nxge_t);
234
235 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
236 static void nxge_free_mem_pool(p_nxge_t);
237
238 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
239 static void nxge_free_rx_mem_pool(p_nxge_t);
240
241 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
242 static void nxge_free_tx_mem_pool(p_nxge_t);
243
244 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
245 struct ddi_dma_attr *,
246 size_t, ddi_device_acc_attr_t *, uint_t,
247 p_nxge_dma_common_t);
248
249 static void nxge_dma_mem_free(p_nxge_dma_common_t);
250 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
251
252 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
253 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
254 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
255
256 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
257 p_nxge_dma_common_t *, size_t);
258 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
259
260 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
261 p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
262 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
263
264 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
265 p_nxge_dma_common_t *,
266 size_t);
267 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
268
269 static int nxge_init_common_dev(p_nxge_t);
270 static void nxge_uninit_common_dev(p_nxge_t);
271 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *,
272 char *, caddr_t);
273 #if defined(sun4v)
274 extern nxge_status_t nxge_hio_rdc_enable(p_nxge_t nxgep);
275 extern nxge_status_t nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm);
276 #endif
277
278 /*
279 * The next declarations are for the GLDv3 interface.
280 */
281 static int nxge_m_start(void *);
282 static void nxge_m_stop(void *);
283 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
284 static int nxge_m_promisc(void *, boolean_t);
285 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
286 nxge_status_t nxge_mac_register(p_nxge_t);
287 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
288 int slot, int rdctbl, boolean_t usetbl);
289 void nxge_mmac_kstat_update(p_nxge_t nxgep, int slot,
290 boolean_t factory);
291
292 static void nxge_m_getfactaddr(void *, uint_t, uint8_t *);
293 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *);
294 static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
295 uint_t, const void *);
296 static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
297 uint_t, void *);
298 static void nxge_m_propinfo(void *, const char *, mac_prop_id_t,
299 mac_prop_info_handle_t);
300 static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t);
301 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t,
302 const void *);
303 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, void *);
304 static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int,
305 mac_ring_info_t *, mac_ring_handle_t);
306 static void nxge_group_add_ring(mac_group_driver_t, mac_ring_driver_t,
307 mac_ring_type_t);
308 static void nxge_group_rem_ring(mac_group_driver_t, mac_ring_driver_t,
309 mac_ring_type_t);
310
311 static void nxge_niu_peu_reset(p_nxge_t nxgep);
312 static void nxge_set_pci_replay_timeout(nxge_t *);
313
314 char *nxge_priv_props[] = {
315 "_adv_10gfdx_cap",
316 "_adv_pause_cap",
317 "_function_number",
318 "_fw_version",
319 "_port_mode",
320 "_hot_swap_phy",
321 "_rxdma_intr_time",
322 "_rxdma_intr_pkts",
323 "_class_opt_ipv4_tcp",
324 "_class_opt_ipv4_udp",
325 "_class_opt_ipv4_ah",
326 "_class_opt_ipv4_sctp",
327 "_class_opt_ipv6_tcp",
328 "_class_opt_ipv6_udp",
329 "_class_opt_ipv6_ah",
330 "_class_opt_ipv6_sctp",
331 "_soft_lso_enable",
332 NULL
333 };
334
335 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL
336 #define MAX_DUMP_SZ 256
337
338 #define NXGE_M_CALLBACK_FLAGS \
339 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
340
341 mac_callbacks_t nxge_m_callbacks = {
342 NXGE_M_CALLBACK_FLAGS,
343 nxge_m_stat,
344 nxge_m_start,
345 nxge_m_stop,
346 nxge_m_promisc,
347 nxge_m_multicst,
348 NULL,
349 NULL,
350 NULL,
351 nxge_m_ioctl,
352 nxge_m_getcapab,
353 NULL,
354 NULL,
355 nxge_m_setprop,
356 nxge_m_getprop,
357 nxge_m_propinfo
358 };
359
360 void
361 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
362
363 /* PSARC/2007/453 MSI-X interrupt limit override. */
364 #define NXGE_MSIX_REQUEST_10G 8
365 #define NXGE_MSIX_REQUEST_1G 2
366 static int nxge_create_msi_property(p_nxge_t);
367 /*
368 * For applications that care about the
369 * latency, it was requested by PAE and the
370 * customers that the driver has tunables that
371 * allow the user to tune it to a higher number
372 * interrupts to spread the interrupts among
373 * multiple channels. The DDI framework limits
374 * the maximum number of MSI-X resources to allocate
375 * to 8 (ddi_msix_alloc_limit). If more than 8
376 * is set, ddi_msix_alloc_limit must be set accordingly.
377 * The default number of MSI interrupts are set to
378 * 8 for 10G and 2 for 1G link.
379 */
380 #define NXGE_MSIX_MAX_ALLOWED 32
381 uint32_t nxge_msix_10g_intrs = NXGE_MSIX_REQUEST_10G;
382 uint32_t nxge_msix_1g_intrs = NXGE_MSIX_REQUEST_1G;
383
384 /*
385 * These global variables control the message
386 * output.
387 */
388 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
389 uint64_t nxge_debug_level;
390
391 /*
392 * This list contains the instance structures for the Neptune
393 * devices present in the system. The lock exists to guarantee
394 * mutually exclusive access to the list.
395 */
396 void *nxge_list = NULL;
397 void *nxge_hw_list = NULL;
398 nxge_os_mutex_t nxge_common_lock;
399 nxge_os_mutex_t nxgedebuglock;
400
401 extern uint64_t npi_debug_level;
402
403 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *);
404 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *);
405 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t);
406 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t);
407 extern void nxge_fm_init(p_nxge_t,
408 ddi_device_acc_attr_t *,
409 ddi_dma_attr_t *);
410 extern void nxge_fm_fini(p_nxge_t);
411 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
412
413 /*
414 * Count used to maintain the number of buffers being used
415 * by Neptune instances and loaned up to the upper layers.
416 */
417 uint32_t nxge_mblks_pending = 0;
418
419 /*
420 * Device register access attributes for PIO.
421 */
422 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
423 DDI_DEVICE_ATTR_V1,
424 DDI_STRUCTURE_LE_ACC,
425 DDI_STRICTORDER_ACC,
426 DDI_DEFAULT_ACC
427 };
428
429 /*
430 * Device descriptor access attributes for DMA.
431 */
432 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
433 DDI_DEVICE_ATTR_V0,
434 DDI_STRUCTURE_LE_ACC,
435 DDI_STRICTORDER_ACC
436 };
437
438 /*
439 * Device buffer access attributes for DMA.
440 */
441 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
442 DDI_DEVICE_ATTR_V0,
443 DDI_STRUCTURE_BE_ACC,
444 DDI_STRICTORDER_ACC
445 };
446
447 ddi_dma_attr_t nxge_desc_dma_attr = {
448 DMA_ATTR_V0, /* version number. */
449 0, /* low address */
450 0xffffffffffffffff, /* high address */
451 0xffffffffffffffff, /* address counter max */
452 #ifndef NIU_PA_WORKAROUND
453 0x100000, /* alignment */
454 #else
455 0x2000,
456 #endif
457 0xfc00fc, /* dlim_burstsizes */
458 0x1, /* minimum transfer size */
459 0xffffffffffffffff, /* maximum transfer size */
460 0xffffffffffffffff, /* maximum segment size */
461 1, /* scatter/gather list length */
462 (unsigned int) 1, /* granularity */
463 0 /* attribute flags */
464 };
465
466 ddi_dma_attr_t nxge_tx_dma_attr = {
467 DMA_ATTR_V0, /* version number. */
468 0, /* low address */
469 0xffffffffffffffff, /* high address */
470 0xffffffffffffffff, /* address counter max */
471 #if defined(_BIG_ENDIAN)
472 0x2000, /* alignment */
473 #else
474 0x1000, /* alignment */
475 #endif
476 0xfc00fc, /* dlim_burstsizes */
477 0x1, /* minimum transfer size */
478 0xffffffffffffffff, /* maximum transfer size */
479 0xffffffffffffffff, /* maximum segment size */
480 5, /* scatter/gather list length */
481 (unsigned int) 1, /* granularity */
482 0 /* attribute flags */
483 };
484
485 ddi_dma_attr_t nxge_rx_dma_attr = {
486 DMA_ATTR_V0, /* version number. */
487 0, /* low address */
488 0xffffffffffffffff, /* high address */
489 0xffffffffffffffff, /* address counter max */
490 0x2000, /* alignment */
491 0xfc00fc, /* dlim_burstsizes */
492 0x1, /* minimum transfer size */
493 0xffffffffffffffff, /* maximum transfer size */
494 0xffffffffffffffff, /* maximum segment size */
495 1, /* scatter/gather list length */
496 (unsigned int) 1, /* granularity */
497 DDI_DMA_RELAXED_ORDERING /* attribute flags */
498 };
499
500 ddi_dma_lim_t nxge_dma_limits = {
501 (uint_t)0, /* dlim_addr_lo */
502 (uint_t)0xffffffff, /* dlim_addr_hi */
503 (uint_t)0xffffffff, /* dlim_cntr_max */
504 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
505 0x1, /* dlim_minxfer */
506 1024 /* dlim_speed */
507 };
508
509 dma_method_t nxge_force_dma = DVMA;
510
511 /*
512 * dma chunk sizes.
513 *
514 * Try to allocate the largest possible size
515 * so that fewer number of dma chunks would be managed
516 */
517 #ifdef NIU_PA_WORKAROUND
518 size_t alloc_sizes [] = {0x2000};
519 #else
520 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
521 0x10000, 0x20000, 0x40000, 0x80000,
522 0x100000, 0x200000, 0x400000, 0x800000,
523 0x1000000, 0x2000000, 0x4000000};
524 #endif
525
526 /*
527 * Translate "dev_t" to a pointer to the associated "dev_info_t".
528 */
529
530 extern void nxge_get_environs(nxge_t *);
531
532 static int
nxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)533 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
534 {
535 p_nxge_t nxgep = NULL;
536 int instance;
537 int status = DDI_SUCCESS;
538 uint8_t portn;
539 nxge_mmac_t *mmac_info;
540
541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
542
543 /*
544 * Get the device instance since we'll need to setup
545 * or retrieve a soft state for this instance.
546 */
547 instance = ddi_get_instance(dip);
548
549 switch (cmd) {
550 case DDI_ATTACH:
551 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
552 break;
553
554 case DDI_RESUME:
555 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
556 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
557 if (nxgep == NULL) {
558 status = DDI_FAILURE;
559 break;
560 }
561 if (nxgep->dip != dip) {
562 status = DDI_FAILURE;
563 break;
564 }
565 if (nxgep->suspended == DDI_PM_SUSPEND) {
566 status = ddi_dev_is_needed(nxgep->dip, 0, 1);
567 } else {
568 status = nxge_resume(nxgep);
569 }
570 goto nxge_attach_exit;
571
572 case DDI_PM_RESUME:
573 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
574 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
575 if (nxgep == NULL) {
576 status = DDI_FAILURE;
577 break;
578 }
579 if (nxgep->dip != dip) {
580 status = DDI_FAILURE;
581 break;
582 }
583 status = nxge_resume(nxgep);
584 goto nxge_attach_exit;
585
586 default:
587 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
588 status = DDI_FAILURE;
589 goto nxge_attach_exit;
590 }
591
592
593 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
594 status = DDI_FAILURE;
595 goto nxge_attach_exit;
596 }
597
598 nxgep = ddi_get_soft_state(nxge_list, instance);
599 if (nxgep == NULL) {
600 status = NXGE_ERROR;
601 goto nxge_attach_fail2;
602 }
603
604 nxgep->nxge_magic = NXGE_MAGIC;
605
606 nxgep->drv_state = 0;
607 nxgep->dip = dip;
608 nxgep->instance = instance;
609 nxgep->p_dip = ddi_get_parent(dip);
610 nxgep->nxge_debug_level = nxge_debug_level;
611 npi_debug_level = nxge_debug_level;
612
613 /* Are we a guest running in a Hybrid I/O environment? */
614 nxge_get_environs(nxgep);
615
616 status = nxge_map_regs(nxgep);
617
618 if (status != NXGE_OK) {
619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
620 goto nxge_attach_fail3;
621 }
622
623 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_rx_dma_attr);
624
625 /* Create & initialize the per-Neptune data structure */
626 /* (even if we're a guest). */
627 status = nxge_init_common_dev(nxgep);
628 if (status != NXGE_OK) {
629 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
630 "nxge_init_common_dev failed"));
631 goto nxge_attach_fail4;
632 }
633
634 /*
635 * Software workaround: set the replay timer.
636 */
637 if (nxgep->niu_type != N2_NIU) {
638 nxge_set_pci_replay_timeout(nxgep);
639 }
640
641 #if defined(sun4v)
642 /* This is required by nxge_hio_init(), which follows. */
643 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS)
644 goto nxge_attach_fail4;
645 #endif
646
647 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) {
648 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
649 "nxge_hio_init failed"));
650 goto nxge_attach_fail4;
651 }
652
653 if (nxgep->niu_type == NEPTUNE_2_10GF) {
654 if (nxgep->function_num > 1) {
655 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported"
656 " function %d. Only functions 0 and 1 are "
657 "supported for this card.", nxgep->function_num));
658 status = NXGE_ERROR;
659 goto nxge_attach_fail4;
660 }
661 }
662
663 if (isLDOMguest(nxgep)) {
664 /*
665 * Use the function number here.
666 */
667 nxgep->mac.portnum = nxgep->function_num;
668 nxgep->mac.porttype = PORT_TYPE_LOGICAL;
669
670 /* XXX We'll set the MAC address counts to 1 for now. */
671 mmac_info = &nxgep->nxge_mmac_info;
672 mmac_info->num_mmac = 1;
673 mmac_info->naddrfree = 1;
674 } else {
675 portn = NXGE_GET_PORT_NUM(nxgep->function_num);
676 nxgep->mac.portnum = portn;
677 if ((portn == 0) || (portn == 1))
678 nxgep->mac.porttype = PORT_TYPE_XMAC;
679 else
680 nxgep->mac.porttype = PORT_TYPE_BMAC;
681 /*
682 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
683 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
684 * The two types of MACs have different characterizations.
685 */
686 mmac_info = &nxgep->nxge_mmac_info;
687 if (nxgep->function_num < 2) {
688 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
689 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
690 } else {
691 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
692 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
693 }
694 }
695 /*
696 * Setup the Ndd parameters for the this instance.
697 */
698 nxge_init_param(nxgep);
699
700 /*
701 * Setup Register Tracing Buffer.
702 */
703 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
704
705 /* init stats ptr */
706 nxge_init_statsp(nxgep);
707
708 /*
709 * Copy the vpd info from eeprom to a local data
710 * structure, and then check its validity.
711 */
712 if (!isLDOMguest(nxgep)) {
713 int *regp;
714 uint_t reglen;
715 int rv;
716
717 nxge_vpd_info_get(nxgep);
718
719 /* Find the NIU config handle. */
720 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
721 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS,
722 "reg", ®p, ®len);
723
724 if (rv != DDI_PROP_SUCCESS) {
725 goto nxge_attach_fail5;
726 }
727 /*
728 * The address_hi, that is the first int, in the reg
729 * property consists of config handle, but need to remove
730 * the bits 28-31 which are OBP specific info.
731 */
732 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF;
733 ddi_prop_free(regp);
734 }
735
736 /*
737 * Set the defaults for the MTU size.
738 */
739 nxge_hw_id_init(nxgep);
740
741 if (isLDOMguest(nxgep)) {
742 uchar_t *prop_val;
743 uint_t prop_len;
744 uint32_t max_frame_size;
745
746 extern void nxge_get_logical_props(p_nxge_t);
747
748 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR;
749 nxgep->mac.portmode = PORT_LOGICAL;
750 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip,
751 "phy-type", "virtual transceiver");
752
753 nxgep->nports = 1;
754 nxgep->board_ver = 0; /* XXX What? */
755
756 /*
757 * local-mac-address property gives us info on which
758 * specific MAC address the Hybrid resource is associated
759 * with.
760 */
761 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0,
762 "local-mac-address", &prop_val,
763 &prop_len) != DDI_PROP_SUCCESS) {
764 goto nxge_attach_fail5;
765 }
766 if (prop_len != ETHERADDRL) {
767 ddi_prop_free(prop_val);
768 goto nxge_attach_fail5;
769 }
770 ether_copy(prop_val, nxgep->hio_mac_addr);
771 ddi_prop_free(prop_val);
772 nxge_get_logical_props(nxgep);
773
774 /*
775 * Enable Jumbo property based on the "max-frame-size"
776 * property value.
777 */
778 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY,
779 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
780 "max-frame-size", NXGE_MTU_DEFAULT_MAX);
781 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
782 (max_frame_size <= TX_JUMBO_MTU)) {
783 nxgep->mac.is_jumbo = B_TRUE;
784 nxgep->mac.maxframesize = (uint16_t)max_frame_size;
785 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
786 NXGE_EHEADER_VLAN_CRC;
787 }
788 } else {
789 status = nxge_xcvr_find(nxgep);
790
791 if (status != NXGE_OK) {
792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
793 " Couldn't determine card type"
794 " .... exit "));
795 goto nxge_attach_fail5;
796 }
797
798 status = nxge_get_config_properties(nxgep);
799
800 if (status != NXGE_OK) {
801 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
802 "get_hw create failed"));
803 goto nxge_attach_fail;
804 }
805 }
806
807 /*
808 * Setup the Kstats for the driver.
809 */
810 nxge_setup_kstats(nxgep);
811
812 if (!isLDOMguest(nxgep))
813 nxge_setup_param(nxgep);
814
815 status = nxge_setup_system_dma_pages(nxgep);
816 if (status != NXGE_OK) {
817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
818 goto nxge_attach_fail;
819 }
820
821
822 if (!isLDOMguest(nxgep))
823 nxge_hw_init_niu_common(nxgep);
824
825 status = nxge_setup_mutexes(nxgep);
826 if (status != NXGE_OK) {
827 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
828 goto nxge_attach_fail;
829 }
830
831 #if defined(sun4v)
832 if (isLDOMguest(nxgep)) {
833 /* Find our VR & channel sets. */
834 status = nxge_hio_vr_add(nxgep);
835 if (status != DDI_SUCCESS) {
836 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
837 "nxge_hio_vr_add failed"));
838 (void) hsvc_unregister(&nxgep->niu_hsvc);
839 nxgep->niu_hsvc_available = B_FALSE;
840 goto nxge_attach_fail;
841 }
842 goto nxge_attach_exit;
843 }
844 #endif
845
846 status = nxge_setup_dev(nxgep);
847 if (status != DDI_SUCCESS) {
848 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
849 goto nxge_attach_fail;
850 }
851
852 status = nxge_add_intrs(nxgep);
853 if (status != DDI_SUCCESS) {
854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
855 goto nxge_attach_fail;
856 }
857
858 /* If a guest, register with vio_net instead. */
859 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
860 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
861 "unable to register to mac layer (%d)", status));
862 goto nxge_attach_fail;
863 }
864
865 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
866
867 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
868 "registered to mac (instance %d)", instance));
869
870 /* nxge_link_monitor calls xcvr.check_link recursively */
871 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
872
873 goto nxge_attach_exit;
874
875 nxge_attach_fail:
876 nxge_unattach(nxgep);
877 goto nxge_attach_fail1;
878
879 nxge_attach_fail5:
880 /*
881 * Tear down the ndd parameters setup.
882 */
883 nxge_destroy_param(nxgep);
884
885 /*
886 * Tear down the kstat setup.
887 */
888 nxge_destroy_kstats(nxgep);
889
890 nxge_attach_fail4:
891 if (nxgep->nxge_hw_p) {
892 nxge_uninit_common_dev(nxgep);
893 nxgep->nxge_hw_p = NULL;
894 }
895
896 nxge_attach_fail3:
897 /*
898 * Unmap the register setup.
899 */
900 nxge_unmap_regs(nxgep);
901
902 nxge_fm_fini(nxgep);
903
904 nxge_attach_fail2:
905 ddi_soft_state_free(nxge_list, nxgep->instance);
906
907 nxge_attach_fail1:
908 if (status != NXGE_OK)
909 status = (NXGE_ERROR | NXGE_DDI_FAILED);
910 nxgep = NULL;
911
912 nxge_attach_exit:
913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
914 status));
915
916 return (status);
917 }
918
919 static int
nxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)920 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
921 {
922 int status = DDI_SUCCESS;
923 int instance;
924 p_nxge_t nxgep = NULL;
925
926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
927 instance = ddi_get_instance(dip);
928 nxgep = ddi_get_soft_state(nxge_list, instance);
929 if (nxgep == NULL) {
930 status = DDI_FAILURE;
931 goto nxge_detach_exit;
932 }
933
934 switch (cmd) {
935 case DDI_DETACH:
936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
937 break;
938
939 case DDI_PM_SUSPEND:
940 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
941 nxgep->suspended = DDI_PM_SUSPEND;
942 nxge_suspend(nxgep);
943 break;
944
945 case DDI_SUSPEND:
946 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
947 if (nxgep->suspended != DDI_PM_SUSPEND) {
948 nxgep->suspended = DDI_SUSPEND;
949 nxge_suspend(nxgep);
950 }
951 break;
952
953 default:
954 status = DDI_FAILURE;
955 }
956
957 if (cmd != DDI_DETACH)
958 goto nxge_detach_exit;
959
960 /*
961 * Stop the xcvr polling.
962 */
963 nxgep->suspended = cmd;
964
965 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
966
967 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
968 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
969 "<== nxge_detach status = 0x%08X", status));
970 return (DDI_FAILURE);
971 }
972
973 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
974 "<== nxge_detach (mac_unregister) status = 0x%08X", status));
975
976 nxge_unattach(nxgep);
977 nxgep = NULL;
978
979 nxge_detach_exit:
980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
981 status));
982
983 return (status);
984 }
985
986 static void
nxge_unattach(p_nxge_t nxgep)987 nxge_unattach(p_nxge_t nxgep)
988 {
989 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
990
991 if (nxgep == NULL || nxgep->dev_regs == NULL) {
992 return;
993 }
994
995 nxgep->nxge_magic = 0;
996
997 if (nxgep->nxge_timerid) {
998 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
999 nxgep->nxge_timerid = 0;
1000 }
1001
1002 /*
1003 * If this flag is set, it will affect the Neptune
1004 * only.
1005 */
1006 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) {
1007 nxge_niu_peu_reset(nxgep);
1008 }
1009
1010 #if defined(sun4v)
1011 if (isLDOMguest(nxgep)) {
1012 (void) nxge_hio_vr_release(nxgep);
1013 }
1014 #endif
1015
1016 if (nxgep->nxge_hw_p) {
1017 nxge_uninit_common_dev(nxgep);
1018 nxgep->nxge_hw_p = NULL;
1019 }
1020
1021 #if defined(sun4v)
1022 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
1023 (void) hsvc_unregister(&nxgep->niu_hsvc);
1024 nxgep->niu_hsvc_available = B_FALSE;
1025 }
1026 #endif
1027 /*
1028 * Stop any further interrupts.
1029 */
1030 nxge_remove_intrs(nxgep);
1031
1032 /*
1033 * Stop the device and free resources.
1034 */
1035 if (!isLDOMguest(nxgep)) {
1036 nxge_destroy_dev(nxgep);
1037 }
1038
1039 /*
1040 * Tear down the ndd parameters setup.
1041 */
1042 nxge_destroy_param(nxgep);
1043
1044 /*
1045 * Tear down the kstat setup.
1046 */
1047 nxge_destroy_kstats(nxgep);
1048
1049 /*
1050 * Free any memory allocated for PHY properties
1051 */
1052 if (nxgep->phy_prop.cnt > 0) {
1053 KMEM_FREE(nxgep->phy_prop.arr,
1054 sizeof (nxge_phy_mdio_val_t) * nxgep->phy_prop.cnt);
1055 nxgep->phy_prop.cnt = 0;
1056 }
1057
1058 /*
1059 * Destroy all mutexes.
1060 */
1061 nxge_destroy_mutexes(nxgep);
1062
1063 /*
1064 * Remove the list of ndd parameters which
1065 * were setup during attach.
1066 */
1067 if (nxgep->dip) {
1068 NXGE_DEBUG_MSG((nxgep, OBP_CTL,
1069 " nxge_unattach: remove all properties"));
1070
1071 (void) ddi_prop_remove_all(nxgep->dip);
1072 }
1073
1074 #if NXGE_PROPERTY
1075 nxge_remove_hard_properties(nxgep);
1076 #endif
1077
1078 /*
1079 * Unmap the register setup.
1080 */
1081 nxge_unmap_regs(nxgep);
1082
1083 nxge_fm_fini(nxgep);
1084
1085 ddi_soft_state_free(nxge_list, nxgep->instance);
1086
1087 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
1088 }
1089
1090 #if defined(sun4v)
1091 int
nxge_hsvc_register(nxge_t * nxgep)1092 nxge_hsvc_register(nxge_t *nxgep)
1093 {
1094 nxge_status_t status;
1095 int i, j;
1096
1097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hsvc_register"));
1098 if (nxgep->niu_type != N2_NIU) {
1099 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hsvc_register"));
1100 return (DDI_SUCCESS);
1101 }
1102
1103 /*
1104 * Currently, the NIU Hypervisor API supports two major versions:
1105 * version 1 and 2.
1106 * If Hypervisor introduces a higher major or minor version,
1107 * please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
1108 */
1109 nxgep->niu_hsvc_available = B_FALSE;
1110 bcopy(&niu_hsvc, &nxgep->niu_hsvc,
1111 sizeof (hsvc_info_t));
1112
1113 for (i = NIU_MAJOR_HI; i > 0; i--) {
1114 nxgep->niu_hsvc.hsvc_major = i;
1115 for (j = NIU_MINOR_HI; j >= 0; j--) {
1116 nxgep->niu_hsvc.hsvc_minor = j;
1117 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1118 "nxge_hsvc_register: %s: negotiating "
1119 "hypervisor services revision %d "
1120 "group: 0x%lx major: 0x%lx "
1121 "minor: 0x%lx",
1122 nxgep->niu_hsvc.hsvc_modname,
1123 nxgep->niu_hsvc.hsvc_rev,
1124 nxgep->niu_hsvc.hsvc_group,
1125 nxgep->niu_hsvc.hsvc_major,
1126 nxgep->niu_hsvc.hsvc_minor,
1127 nxgep->niu_min_ver));
1128
1129 if ((status = hsvc_register(&nxgep->niu_hsvc,
1130 &nxgep->niu_min_ver)) == 0) {
1131 /* Use the supported minor */
1132 nxgep->niu_hsvc.hsvc_minor = nxgep->niu_min_ver;
1133 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1134 "nxge_hsvc_register: %s: negotiated "
1135 "hypervisor services revision %d "
1136 "group: 0x%lx major: 0x%lx "
1137 "minor: 0x%lx (niu_min_ver 0x%lx)",
1138 nxgep->niu_hsvc.hsvc_modname,
1139 nxgep->niu_hsvc.hsvc_rev,
1140 nxgep->niu_hsvc.hsvc_group,
1141 nxgep->niu_hsvc.hsvc_major,
1142 nxgep->niu_hsvc.hsvc_minor,
1143 nxgep->niu_min_ver));
1144
1145 nxgep->niu_hsvc_available = B_TRUE;
1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1147 "<== nxge_hsvc_register: "
1148 "NIU Hypervisor service enabled"));
1149 return (DDI_SUCCESS);
1150 }
1151
1152 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1153 "nxge_hsvc_register: %s: negotiated failed - "
1154 "try lower major number "
1155 "hypervisor services revision %d "
1156 "group: 0x%lx major: 0x%lx minor: 0x%lx "
1157 "errno: %d",
1158 nxgep->niu_hsvc.hsvc_modname,
1159 nxgep->niu_hsvc.hsvc_rev,
1160 nxgep->niu_hsvc.hsvc_group,
1161 nxgep->niu_hsvc.hsvc_major,
1162 nxgep->niu_hsvc.hsvc_minor, status));
1163 }
1164 }
1165
1166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1167 "nxge_hsvc_register: %s: cannot negotiate "
1168 "hypervisor services revision %d group: 0x%lx "
1169 "major: 0x%lx minor: 0x%lx errno: %d",
1170 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev,
1171 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major,
1172 niu_hsvc.hsvc_minor, status));
1173
1174 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1175 "<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
1176
1177 return (DDI_FAILURE);
1178 }
1179 #endif
1180
1181 static char n2_siu_name[] = "niu";
1182
1183 static nxge_status_t
nxge_map_regs(p_nxge_t nxgep)1184 nxge_map_regs(p_nxge_t nxgep)
1185 {
1186 int ddi_status = DDI_SUCCESS;
1187 p_dev_regs_t dev_regs;
1188 char buf[MAXPATHLEN + 1];
1189 char *devname;
1190 #ifdef NXGE_DEBUG
1191 char *sysname;
1192 #endif
1193 off_t regsize;
1194 nxge_status_t status = NXGE_OK;
1195 #if !defined(_BIG_ENDIAN)
1196 off_t pci_offset;
1197 uint16_t pcie_devctl;
1198 #endif
1199
1200 if (isLDOMguest(nxgep)) {
1201 return (nxge_guest_regs_map(nxgep));
1202 }
1203
1204 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
1205 nxgep->dev_regs = NULL;
1206 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
1207 dev_regs->nxge_regh = NULL;
1208 dev_regs->nxge_pciregh = NULL;
1209 dev_regs->nxge_msix_regh = NULL;
1210 dev_regs->nxge_vir_regh = NULL;
1211 dev_regs->nxge_vir2_regh = NULL;
1212 nxgep->niu_type = NIU_TYPE_NONE;
1213
1214 devname = ddi_pathname(nxgep->dip, buf);
1215 ASSERT(strlen(devname) > 0);
1216 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1217 "nxge_map_regs: pathname devname %s", devname));
1218
1219 /*
1220 * The driver is running on a N2-NIU system if devname is something
1221 * like "/niu@80/network@0"
1222 */
1223 if (strstr(devname, n2_siu_name)) {
1224 /* N2/NIU */
1225 nxgep->niu_type = N2_NIU;
1226 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1227 "nxge_map_regs: N2/NIU devname %s", devname));
1228 /*
1229 * Get function number:
1230 * - N2/NIU: "/niu@80/network@0" and "/niu@80/network@1"
1231 */
1232 nxgep->function_num =
1233 (devname[strlen(devname) -1] == '1' ? 1 : 0);
1234 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1235 "nxge_map_regs: N2/NIU function number %d",
1236 nxgep->function_num));
1237 } else {
1238 int *prop_val;
1239 uint_t prop_len;
1240 uint8_t func_num;
1241
1242 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
1243 0, "reg",
1244 &prop_val, &prop_len) != DDI_PROP_SUCCESS) {
1245 NXGE_DEBUG_MSG((nxgep, VPD_CTL,
1246 "Reg property not found"));
1247 ddi_status = DDI_FAILURE;
1248 goto nxge_map_regs_fail0;
1249
1250 } else {
1251 func_num = (prop_val[0] >> 8) & 0x7;
1252 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1253 "Reg property found: fun # %d",
1254 func_num));
1255 nxgep->function_num = func_num;
1256 if (isLDOMguest(nxgep)) {
1257 nxgep->function_num /= 2;
1258 return (NXGE_OK);
1259 }
1260 ddi_prop_free(prop_val);
1261 }
1262 }
1263
1264 switch (nxgep->niu_type) {
1265 default:
1266 (void) ddi_dev_regsize(nxgep->dip, 0, ®size);
1267 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1268 "nxge_map_regs: pci config size 0x%x", regsize));
1269
1270 ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
1271 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
1272 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
1273 if (ddi_status != DDI_SUCCESS) {
1274 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1275 "ddi_map_regs, nxge bus config regs failed"));
1276 goto nxge_map_regs_fail0;
1277 }
1278 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1279 "nxge_map_reg: PCI config addr 0x%0llx "
1280 " handle 0x%0llx", dev_regs->nxge_pciregp,
1281 dev_regs->nxge_pciregh));
1282 /*
1283 * IMP IMP
1284 * workaround for bit swapping bug in HW
1285 * which ends up in no-snoop = yes
1286 * resulting, in DMA not synched properly
1287 */
1288 #if !defined(_BIG_ENDIAN)
1289 /* workarounds for x86 systems */
1290 pci_offset = 0x80 + PCIE_DEVCTL;
1291 pcie_devctl = pci_config_get16(dev_regs->nxge_pciregh,
1292 pci_offset);
1293 pcie_devctl &= ~PCIE_DEVCTL_ENABLE_NO_SNOOP;
1294 pcie_devctl |= PCIE_DEVCTL_RO_EN;
1295 pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
1296 pcie_devctl);
1297 #endif
1298
1299 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1300 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1301 "nxge_map_regs: pio size 0x%x", regsize));
1302 /* set up the device mapped register */
1303 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1304 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1305 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1306 if (ddi_status != DDI_SUCCESS) {
1307 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1308 "ddi_map_regs for Neptune global reg failed"));
1309 goto nxge_map_regs_fail1;
1310 }
1311
1312 /* set up the msi/msi-x mapped register */
1313 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1314 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1315 "nxge_map_regs: msix size 0x%x", regsize));
1316 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1317 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
1318 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
1319 if (ddi_status != DDI_SUCCESS) {
1320 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1321 "ddi_map_regs for msi reg failed"));
1322 goto nxge_map_regs_fail2;
1323 }
1324
1325 /* set up the vio region mapped register */
1326 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1327 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1328 "nxge_map_regs: vio size 0x%x", regsize));
1329 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1330 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1331 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1332
1333 if (ddi_status != DDI_SUCCESS) {
1334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1335 "ddi_map_regs for nxge vio reg failed"));
1336 goto nxge_map_regs_fail3;
1337 }
1338 nxgep->dev_regs = dev_regs;
1339
1340 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
1341 NPI_PCI_ADD_HANDLE_SET(nxgep,
1342 (npi_reg_ptr_t)dev_regs->nxge_pciregp);
1343 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
1344 NPI_MSI_ADD_HANDLE_SET(nxgep,
1345 (npi_reg_ptr_t)dev_regs->nxge_msix_regp);
1346
1347 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1348 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1349
1350 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1351 NPI_REG_ADD_HANDLE_SET(nxgep,
1352 (npi_reg_ptr_t)dev_regs->nxge_regp);
1353
1354 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1355 NPI_VREG_ADD_HANDLE_SET(nxgep,
1356 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1357
1358 break;
1359
1360 case N2_NIU:
1361 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
1362 /*
1363 * Set up the device mapped register (FWARC 2006/556)
1364 * (changed back to 1: reg starts at 1!)
1365 */
1366 (void) ddi_dev_regsize(nxgep->dip, 1, ®size);
1367 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1368 "nxge_map_regs: dev size 0x%x", regsize));
1369 ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
1370 (caddr_t *)&(dev_regs->nxge_regp), 0, 0,
1371 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
1372
1373 if (ddi_status != DDI_SUCCESS) {
1374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1375 "ddi_map_regs for N2/NIU, global reg failed "));
1376 goto nxge_map_regs_fail1;
1377 }
1378
1379 /* set up the first vio region mapped register */
1380 (void) ddi_dev_regsize(nxgep->dip, 2, ®size);
1381 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1382 "nxge_map_regs: vio (1) size 0x%x", regsize));
1383 ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
1384 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
1385 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
1386
1387 if (ddi_status != DDI_SUCCESS) {
1388 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1389 "ddi_map_regs for nxge vio reg failed"));
1390 goto nxge_map_regs_fail2;
1391 }
1392 /* set up the second vio region mapped register */
1393 (void) ddi_dev_regsize(nxgep->dip, 3, ®size);
1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1395 "nxge_map_regs: vio (3) size 0x%x", regsize));
1396 ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
1397 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
1398 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
1399
1400 if (ddi_status != DDI_SUCCESS) {
1401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1402 "ddi_map_regs for nxge vio2 reg failed"));
1403 goto nxge_map_regs_fail3;
1404 }
1405 nxgep->dev_regs = dev_regs;
1406
1407 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1408 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
1409
1410 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
1411 NPI_REG_ADD_HANDLE_SET(nxgep,
1412 (npi_reg_ptr_t)dev_regs->nxge_regp);
1413
1414 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
1415 NPI_VREG_ADD_HANDLE_SET(nxgep,
1416 (npi_reg_ptr_t)dev_regs->nxge_vir_regp);
1417
1418 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
1419 NPI_V2REG_ADD_HANDLE_SET(nxgep,
1420 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
1421
1422 break;
1423 }
1424
1425 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
1426 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
1427
1428 goto nxge_map_regs_exit;
1429 nxge_map_regs_fail3:
1430 if (dev_regs->nxge_msix_regh) {
1431 ddi_regs_map_free(&dev_regs->nxge_msix_regh);
1432 }
1433 if (dev_regs->nxge_vir_regh) {
1434 ddi_regs_map_free(&dev_regs->nxge_regh);
1435 }
1436 nxge_map_regs_fail2:
1437 if (dev_regs->nxge_regh) {
1438 ddi_regs_map_free(&dev_regs->nxge_regh);
1439 }
1440 nxge_map_regs_fail1:
1441 if (dev_regs->nxge_pciregh) {
1442 ddi_regs_map_free(&dev_regs->nxge_pciregh);
1443 }
1444 nxge_map_regs_fail0:
1445 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
1446 kmem_free(dev_regs, sizeof (dev_regs_t));
1447
1448 nxge_map_regs_exit:
1449 if (ddi_status != DDI_SUCCESS)
1450 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1451 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
1452 return (status);
1453 }
1454
1455 static void
nxge_unmap_regs(p_nxge_t nxgep)1456 nxge_unmap_regs(p_nxge_t nxgep)
1457 {
1458 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
1459
1460 if (isLDOMguest(nxgep)) {
1461 nxge_guest_regs_map_free(nxgep);
1462 return;
1463 }
1464
1465 if (nxgep->dev_regs) {
1466 if (nxgep->dev_regs->nxge_pciregh) {
1467 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1468 "==> nxge_unmap_regs: bus"));
1469 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
1470 nxgep->dev_regs->nxge_pciregh = NULL;
1471 }
1472 if (nxgep->dev_regs->nxge_regh) {
1473 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1474 "==> nxge_unmap_regs: device registers"));
1475 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
1476 nxgep->dev_regs->nxge_regh = NULL;
1477 }
1478 if (nxgep->dev_regs->nxge_msix_regh) {
1479 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1480 "==> nxge_unmap_regs: device interrupts"));
1481 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
1482 nxgep->dev_regs->nxge_msix_regh = NULL;
1483 }
1484 if (nxgep->dev_regs->nxge_vir_regh) {
1485 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1486 "==> nxge_unmap_regs: vio region"));
1487 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
1488 nxgep->dev_regs->nxge_vir_regh = NULL;
1489 }
1490 if (nxgep->dev_regs->nxge_vir2_regh) {
1491 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1492 "==> nxge_unmap_regs: vio2 region"));
1493 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
1494 nxgep->dev_regs->nxge_vir2_regh = NULL;
1495 }
1496
1497 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
1498 nxgep->dev_regs = NULL;
1499 }
1500
1501 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
1502 }
1503
1504 static nxge_status_t
nxge_setup_mutexes(p_nxge_t nxgep)1505 nxge_setup_mutexes(p_nxge_t nxgep)
1506 {
1507 int ddi_status = DDI_SUCCESS;
1508 nxge_status_t status = NXGE_OK;
1509 nxge_classify_t *classify_ptr;
1510 int partition;
1511
1512 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
1513
1514 /*
1515 * Get the interrupt cookie so the mutexes can be
1516 * Initialized.
1517 */
1518 if (isLDOMguest(nxgep)) {
1519 nxgep->interrupt_cookie = 0;
1520 } else {
1521 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
1522 &nxgep->interrupt_cookie);
1523
1524 if (ddi_status != DDI_SUCCESS) {
1525 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1526 "<== nxge_setup_mutexes: failed 0x%x",
1527 ddi_status));
1528 goto nxge_setup_mutexes_exit;
1529 }
1530 }
1531
1532 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
1533 MUTEX_INIT(&nxgep->poll_lock, NULL,
1534 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1535
1536 /*
1537 * Initialize mutexes for this device.
1538 */
1539 MUTEX_INIT(nxgep->genlock, NULL,
1540 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1541 MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
1542 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1543 MUTEX_INIT(&nxgep->mif_lock, NULL,
1544 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1545 MUTEX_INIT(&nxgep->group_lock, NULL,
1546 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1547 RW_INIT(&nxgep->filter_lock, NULL,
1548 RW_DRIVER, (void *)nxgep->interrupt_cookie);
1549
1550 classify_ptr = &nxgep->classifier;
1551 /*
1552 * FFLP Mutexes are never used in interrupt context
1553 * as fflp operation can take very long time to
1554 * complete and hence not suitable to invoke from interrupt
1555 * handlers.
1556 */
1557 MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
1558 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1559 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1560 MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
1561 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1562 for (partition = 0; partition < MAX_PARTITION; partition++) {
1563 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
1564 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
1565 }
1566 }
1567
1568 nxge_setup_mutexes_exit:
1569 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1570 "<== nxge_setup_mutexes status = %x", status));
1571
1572 if (ddi_status != DDI_SUCCESS)
1573 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
1574
1575 return (status);
1576 }
1577
1578 static void
nxge_destroy_mutexes(p_nxge_t nxgep)1579 nxge_destroy_mutexes(p_nxge_t nxgep)
1580 {
1581 int partition;
1582 nxge_classify_t *classify_ptr;
1583
1584 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
1585 RW_DESTROY(&nxgep->filter_lock);
1586 MUTEX_DESTROY(&nxgep->group_lock);
1587 MUTEX_DESTROY(&nxgep->mif_lock);
1588 MUTEX_DESTROY(&nxgep->ouraddr_lock);
1589 MUTEX_DESTROY(nxgep->genlock);
1590
1591 classify_ptr = &nxgep->classifier;
1592 MUTEX_DESTROY(&classify_ptr->tcam_lock);
1593
1594 /* Destroy all polling resources. */
1595 MUTEX_DESTROY(&nxgep->poll_lock);
1596 cv_destroy(&nxgep->poll_cv);
1597
1598 /* free data structures, based on HW type */
1599 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1600 MUTEX_DESTROY(&classify_ptr->fcram_lock);
1601 for (partition = 0; partition < MAX_PARTITION; partition++) {
1602 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
1603 }
1604 }
1605
1606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
1607 }
1608
1609 nxge_status_t
nxge_init(p_nxge_t nxgep)1610 nxge_init(p_nxge_t nxgep)
1611 {
1612 nxge_status_t status = NXGE_OK;
1613
1614 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
1615
1616 if (nxgep->drv_state & STATE_HW_INITIALIZED) {
1617 return (status);
1618 }
1619
1620 /*
1621 * Allocate system memory for the receive/transmit buffer blocks
1622 * and receive/transmit descriptor rings.
1623 */
1624 status = nxge_alloc_mem_pool(nxgep);
1625 if (status != NXGE_OK) {
1626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
1627 goto nxge_init_fail1;
1628 }
1629
1630 if (!isLDOMguest(nxgep)) {
1631 /*
1632 * Initialize and enable the TXC registers.
1633 * (Globally enable the Tx controller,
1634 * enable the port, configure the dma channel bitmap,
1635 * configure the max burst size).
1636 */
1637 status = nxge_txc_init(nxgep);
1638 if (status != NXGE_OK) {
1639 NXGE_ERROR_MSG((nxgep,
1640 NXGE_ERR_CTL, "init txc failed\n"));
1641 goto nxge_init_fail2;
1642 }
1643 }
1644
1645 /*
1646 * Initialize and enable TXDMA channels.
1647 */
1648 status = nxge_init_txdma_channels(nxgep);
1649 if (status != NXGE_OK) {
1650 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
1651 goto nxge_init_fail3;
1652 }
1653
1654 /*
1655 * Initialize and enable RXDMA channels.
1656 */
1657 status = nxge_init_rxdma_channels(nxgep);
1658 if (status != NXGE_OK) {
1659 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
1660 goto nxge_init_fail4;
1661 }
1662
1663 /*
1664 * The guest domain is now done.
1665 */
1666 if (isLDOMguest(nxgep)) {
1667 nxgep->drv_state |= STATE_HW_INITIALIZED;
1668 goto nxge_init_exit;
1669 }
1670
1671 /*
1672 * Initialize TCAM and FCRAM (Neptune).
1673 */
1674 status = nxge_classify_init(nxgep);
1675 if (status != NXGE_OK) {
1676 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
1677 goto nxge_init_fail5;
1678 }
1679
1680 /*
1681 * Initialize ZCP
1682 */
1683 status = nxge_zcp_init(nxgep);
1684 if (status != NXGE_OK) {
1685 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
1686 goto nxge_init_fail5;
1687 }
1688
1689 /*
1690 * Initialize IPP.
1691 */
1692 status = nxge_ipp_init(nxgep);
1693 if (status != NXGE_OK) {
1694 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
1695 goto nxge_init_fail5;
1696 }
1697
1698 /*
1699 * Initialize the MAC block.
1700 */
1701 status = nxge_mac_init(nxgep);
1702 if (status != NXGE_OK) {
1703 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
1704 goto nxge_init_fail5;
1705 }
1706
1707 /*
1708 * Enable the interrrupts for DDI.
1709 */
1710 nxge_intrs_enable(nxgep);
1711
1712 nxgep->drv_state |= STATE_HW_INITIALIZED;
1713
1714 goto nxge_init_exit;
1715
1716 nxge_init_fail5:
1717 nxge_uninit_rxdma_channels(nxgep);
1718 nxge_init_fail4:
1719 nxge_uninit_txdma_channels(nxgep);
1720 nxge_init_fail3:
1721 if (!isLDOMguest(nxgep)) {
1722 (void) nxge_txc_uninit(nxgep);
1723 }
1724 nxge_init_fail2:
1725 nxge_free_mem_pool(nxgep);
1726 nxge_init_fail1:
1727 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1728 "<== nxge_init status (failed) = 0x%08x", status));
1729 return (status);
1730
1731 nxge_init_exit:
1732 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
1733 status));
1734 return (status);
1735 }
1736
1737
1738 timeout_id_t
nxge_start_timer(p_nxge_t nxgep,fptrv_t func,int msec)1739 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
1740 {
1741 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) {
1742 return (timeout(func, (caddr_t)nxgep,
1743 drv_usectohz(1000 * msec)));
1744 }
1745 return (NULL);
1746 }
1747
1748 /*ARGSUSED*/
1749 void
nxge_stop_timer(p_nxge_t nxgep,timeout_id_t timerid)1750 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
1751 {
1752 if (timerid) {
1753 (void) untimeout(timerid);
1754 }
1755 }
1756
1757 void
nxge_uninit(p_nxge_t nxgep)1758 nxge_uninit(p_nxge_t nxgep)
1759 {
1760 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
1761
1762 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1763 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1764 "==> nxge_uninit: not initialized"));
1765 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1766 "<== nxge_uninit"));
1767 return;
1768 }
1769
1770 if (!isLDOMguest(nxgep)) {
1771 /*
1772 * Reset the receive MAC side.
1773 */
1774 (void) nxge_rx_mac_disable(nxgep);
1775
1776 /*
1777 * Drain the IPP.
1778 */
1779 (void) nxge_ipp_drain(nxgep);
1780 }
1781
1782 /* stop timer */
1783 if (nxgep->nxge_timerid) {
1784 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
1785 nxgep->nxge_timerid = 0;
1786 }
1787
1788 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1789 (void) nxge_intr_hw_disable(nxgep);
1790
1791
1792 /* Disable and soft reset the IPP */
1793 if (!isLDOMguest(nxgep))
1794 (void) nxge_ipp_disable(nxgep);
1795
1796 /* Free classification resources */
1797 (void) nxge_classify_uninit(nxgep);
1798
1799 /*
1800 * Reset the transmit/receive DMA side.
1801 */
1802 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
1803 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1804
1805 nxge_uninit_txdma_channels(nxgep);
1806 nxge_uninit_rxdma_channels(nxgep);
1807
1808 /*
1809 * Reset the transmit MAC side.
1810 */
1811 (void) nxge_tx_mac_disable(nxgep);
1812
1813 nxge_free_mem_pool(nxgep);
1814
1815 /*
1816 * Start the timer if the reset flag is not set.
1817 * If this reset flag is set, the link monitor
1818 * will not be started in order to stop furthur bus
1819 * activities coming from this interface.
1820 * The driver will start the monitor function
1821 * if the interface was initialized again later.
1822 */
1823 if (!nxge_peu_reset_enable) {
1824 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1825 }
1826
1827 nxgep->drv_state &= ~STATE_HW_INITIALIZED;
1828
1829 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
1830 "nxge_mblks_pending %d", nxge_mblks_pending));
1831 }
1832
1833 void
nxge_get64(p_nxge_t nxgep,p_mblk_t mp)1834 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
1835 {
1836 uint64_t reg;
1837 uint64_t regdata;
1838 int i, retry;
1839
1840 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t));
1841 regdata = 0;
1842 retry = 1;
1843
1844 for (i = 0; i < retry; i++) {
1845 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data);
1846 }
1847 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t));
1848 }
1849
1850 void
nxge_put64(p_nxge_t nxgep,p_mblk_t mp)1851 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
1852 {
1853 uint64_t reg;
1854 uint64_t buf[2];
1855
1856 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1857 reg = buf[0];
1858
1859 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
1860 }
1861
1862 /*ARGSUSED*/
1863 /*VARARGS*/
1864 void
nxge_debug_msg(p_nxge_t nxgep,uint64_t level,char * fmt,...)1865 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
1866 {
1867 char msg_buffer[1048];
1868 char prefix_buffer[32];
1869 int instance;
1870 uint64_t debug_level;
1871 int cmn_level = CE_CONT;
1872 va_list ap;
1873
1874 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) {
1875 /* In case a developer has changed nxge_debug_level. */
1876 if (nxgep->nxge_debug_level != nxge_debug_level)
1877 nxgep->nxge_debug_level = nxge_debug_level;
1878 }
1879
1880 debug_level = (nxgep == NULL) ? nxge_debug_level :
1881 nxgep->nxge_debug_level;
1882
1883 if ((level & debug_level) ||
1884 (level == NXGE_NOTE) ||
1885 (level == NXGE_ERR_CTL)) {
1886 /* do the msg processing */
1887 MUTEX_ENTER(&nxgedebuglock);
1888
1889 if ((level & NXGE_NOTE)) {
1890 cmn_level = CE_NOTE;
1891 }
1892
1893 if (level & NXGE_ERR_CTL) {
1894 cmn_level = CE_WARN;
1895 }
1896
1897 va_start(ap, fmt);
1898 (void) vsprintf(msg_buffer, fmt, ap);
1899 va_end(ap);
1900 if (nxgep == NULL) {
1901 instance = -1;
1902 (void) sprintf(prefix_buffer, "%s :", "nxge");
1903 } else {
1904 instance = nxgep->instance;
1905 (void) sprintf(prefix_buffer,
1906 "%s%d :", "nxge", instance);
1907 }
1908
1909 MUTEX_EXIT(&nxgedebuglock);
1910 cmn_err(cmn_level, "!%s %s\n",
1911 prefix_buffer, msg_buffer);
1912
1913 }
1914 }
1915
1916 char *
nxge_dump_packet(char * addr,int size)1917 nxge_dump_packet(char *addr, int size)
1918 {
1919 uchar_t *ap = (uchar_t *)addr;
1920 int i;
1921 static char etherbuf[1024];
1922 char *cp = etherbuf;
1923 char digits[] = "0123456789abcdef";
1924
1925 if (!size)
1926 size = 60;
1927
1928 if (size > MAX_DUMP_SZ) {
1929 /* Dump the leading bytes */
1930 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1931 if (*ap > 0x0f)
1932 *cp++ = digits[*ap >> 4];
1933 *cp++ = digits[*ap++ & 0xf];
1934 *cp++ = ':';
1935 }
1936 for (i = 0; i < 20; i++)
1937 *cp++ = '.';
1938 /* Dump the last MAX_DUMP_SZ/2 bytes */
1939 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
1940 for (i = 0; i < MAX_DUMP_SZ/2; i++) {
1941 if (*ap > 0x0f)
1942 *cp++ = digits[*ap >> 4];
1943 *cp++ = digits[*ap++ & 0xf];
1944 *cp++ = ':';
1945 }
1946 } else {
1947 for (i = 0; i < size; i++) {
1948 if (*ap > 0x0f)
1949 *cp++ = digits[*ap >> 4];
1950 *cp++ = digits[*ap++ & 0xf];
1951 *cp++ = ':';
1952 }
1953 }
1954 *--cp = 0;
1955 return (etherbuf);
1956 }
1957
1958 #ifdef NXGE_DEBUG
1959 static void
nxge_test_map_regs(p_nxge_t nxgep)1960 nxge_test_map_regs(p_nxge_t nxgep)
1961 {
1962 ddi_acc_handle_t cfg_handle;
1963 p_pci_cfg_t cfg_ptr;
1964 ddi_acc_handle_t dev_handle;
1965 char *dev_ptr;
1966 ddi_acc_handle_t pci_config_handle;
1967 uint32_t regval;
1968 int i;
1969
1970 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
1971
1972 dev_handle = nxgep->dev_regs->nxge_regh;
1973 dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
1974
1975 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1976 cfg_handle = nxgep->dev_regs->nxge_pciregh;
1977 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
1978
1979 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1980 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
1981 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1982 "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
1983 &cfg_ptr->vendorid));
1984 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1985 "\tvendorid 0x%x devid 0x%x",
1986 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
1987 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0)));
1988 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1989 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
1990 "bar1c 0x%x",
1991 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0),
1992 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
1993 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
1994 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
1995 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1996 "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
1997 "base 28 0x%x bar2c 0x%x\n",
1998 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
1999 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
2000 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
2001 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
2002 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2003 "\nNeptune PCI BAR: base30 0x%x\n",
2004 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
2005
2006 cfg_handle = nxgep->dev_regs->nxge_pciregh;
2007 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
2008 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2009 "first 0x%llx second 0x%llx third 0x%llx "
2010 "last 0x%llx ",
2011 NXGE_PIO_READ64(dev_handle,
2012 (uint64_t *)(dev_ptr + 0), 0),
2013 NXGE_PIO_READ64(dev_handle,
2014 (uint64_t *)(dev_ptr + 8), 0),
2015 NXGE_PIO_READ64(dev_handle,
2016 (uint64_t *)(dev_ptr + 16), 0),
2017 NXGE_PIO_READ64(cfg_handle,
2018 (uint64_t *)(dev_ptr + 24), 0)));
2019 }
2020 }
2021
2022 #endif
2023
2024 static void
nxge_suspend(p_nxge_t nxgep)2025 nxge_suspend(p_nxge_t nxgep)
2026 {
2027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
2028
2029 nxge_intrs_disable(nxgep);
2030 nxge_destroy_dev(nxgep);
2031
2032 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
2033 }
2034
2035 static nxge_status_t
nxge_resume(p_nxge_t nxgep)2036 nxge_resume(p_nxge_t nxgep)
2037 {
2038 nxge_status_t status = NXGE_OK;
2039
2040 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
2041
2042 nxgep->suspended = DDI_RESUME;
2043 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
2044 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
2045 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
2046 (void) nxge_rx_mac_enable(nxgep);
2047 (void) nxge_tx_mac_enable(nxgep);
2048 nxge_intrs_enable(nxgep);
2049 nxgep->suspended = 0;
2050
2051 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2052 "<== nxge_resume status = 0x%x", status));
2053 return (status);
2054 }
2055
2056 static nxge_status_t
nxge_setup_dev(p_nxge_t nxgep)2057 nxge_setup_dev(p_nxge_t nxgep)
2058 {
2059 nxge_status_t status = NXGE_OK;
2060
2061 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
2062 nxgep->mac.portnum));
2063
2064 status = nxge_link_init(nxgep);
2065
2066 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
2067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2068 "port%d Bad register acc handle", nxgep->mac.portnum));
2069 status = NXGE_ERROR;
2070 }
2071
2072 if (status != NXGE_OK) {
2073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2074 " nxge_setup_dev status "
2075 "(xcvr init 0x%08x)", status));
2076 goto nxge_setup_dev_exit;
2077 }
2078
2079 nxge_setup_dev_exit:
2080 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2081 "<== nxge_setup_dev port %d status = 0x%08x",
2082 nxgep->mac.portnum, status));
2083
2084 return (status);
2085 }
2086
2087 static void
nxge_destroy_dev(p_nxge_t nxgep)2088 nxge_destroy_dev(p_nxge_t nxgep)
2089 {
2090 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
2091
2092 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
2093
2094 (void) nxge_hw_stop(nxgep);
2095
2096 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
2097 }
2098
2099 static nxge_status_t
nxge_setup_system_dma_pages(p_nxge_t nxgep)2100 nxge_setup_system_dma_pages(p_nxge_t nxgep)
2101 {
2102 int ddi_status = DDI_SUCCESS;
2103 uint_t count;
2104 ddi_dma_cookie_t cookie;
2105 uint_t iommu_pagesize;
2106 nxge_status_t status = NXGE_OK;
2107
2108 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
2109 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
2110 if (nxgep->niu_type != N2_NIU) {
2111 iommu_pagesize = dvma_pagesize(nxgep->dip);
2112 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2113 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2114 " default_block_size %d iommu_pagesize %d",
2115 nxgep->sys_page_sz,
2116 ddi_ptob(nxgep->dip, (ulong_t)1),
2117 nxgep->rx_default_block_size,
2118 iommu_pagesize));
2119
2120 if (iommu_pagesize != 0) {
2121 if (nxgep->sys_page_sz == iommu_pagesize) {
2122 if (iommu_pagesize > 0x4000)
2123 nxgep->sys_page_sz = 0x4000;
2124 } else {
2125 if (nxgep->sys_page_sz > iommu_pagesize)
2126 nxgep->sys_page_sz = iommu_pagesize;
2127 }
2128 }
2129 }
2130 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2131 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2132 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
2133 "default_block_size %d page mask %d",
2134 nxgep->sys_page_sz,
2135 ddi_ptob(nxgep->dip, (ulong_t)1),
2136 nxgep->rx_default_block_size,
2137 nxgep->sys_page_mask));
2138
2139
2140 switch (nxgep->sys_page_sz) {
2141 default:
2142 nxgep->sys_page_sz = 0x1000;
2143 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
2144 nxgep->rx_default_block_size = 0x1000;
2145 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2146 break;
2147 case 0x1000:
2148 nxgep->rx_default_block_size = 0x1000;
2149 nxgep->rx_bksize_code = RBR_BKSIZE_4K;
2150 break;
2151 case 0x2000:
2152 nxgep->rx_default_block_size = 0x2000;
2153 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2154 break;
2155 case 0x4000:
2156 nxgep->rx_default_block_size = 0x4000;
2157 nxgep->rx_bksize_code = RBR_BKSIZE_16K;
2158 break;
2159 case 0x8000:
2160 nxgep->rx_default_block_size = 0x8000;
2161 nxgep->rx_bksize_code = RBR_BKSIZE_32K;
2162 break;
2163 }
2164
2165 #ifndef USE_RX_BIG_BUF
2166 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
2167 #else
2168 nxgep->rx_default_block_size = 0x2000;
2169 nxgep->rx_bksize_code = RBR_BKSIZE_8K;
2170 #endif
2171 /*
2172 * Get the system DMA burst size.
2173 */
2174 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2175 DDI_DMA_DONTWAIT, 0,
2176 &nxgep->dmasparehandle);
2177 if (ddi_status != DDI_SUCCESS) {
2178 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2179 "ddi_dma_alloc_handle: failed "
2180 " status 0x%x", ddi_status));
2181 goto nxge_get_soft_properties_exit;
2182 }
2183
2184 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
2185 (caddr_t)nxgep->dmasparehandle,
2186 sizeof (nxgep->dmasparehandle),
2187 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2188 DDI_DMA_DONTWAIT, 0,
2189 &cookie, &count);
2190 if (ddi_status != DDI_DMA_MAPPED) {
2191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2192 "Binding spare handle to find system"
2193 " burstsize failed."));
2194 ddi_status = DDI_FAILURE;
2195 goto nxge_get_soft_properties_fail1;
2196 }
2197
2198 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
2199 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
2200
2201 nxge_get_soft_properties_fail1:
2202 ddi_dma_free_handle(&nxgep->dmasparehandle);
2203
2204 nxge_get_soft_properties_exit:
2205
2206 if (ddi_status != DDI_SUCCESS)
2207 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2208
2209 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
2210 "<== nxge_setup_system_dma_pages status = 0x%08x", status));
2211 return (status);
2212 }
2213
2214 static nxge_status_t
nxge_alloc_mem_pool(p_nxge_t nxgep)2215 nxge_alloc_mem_pool(p_nxge_t nxgep)
2216 {
2217 nxge_status_t status = NXGE_OK;
2218
2219 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
2220
2221 status = nxge_alloc_rx_mem_pool(nxgep);
2222 if (status != NXGE_OK) {
2223 return (NXGE_ERROR);
2224 }
2225
2226 status = nxge_alloc_tx_mem_pool(nxgep);
2227 if (status != NXGE_OK) {
2228 nxge_free_rx_mem_pool(nxgep);
2229 return (NXGE_ERROR);
2230 }
2231
2232 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
2233 return (NXGE_OK);
2234 }
2235
2236 static void
nxge_free_mem_pool(p_nxge_t nxgep)2237 nxge_free_mem_pool(p_nxge_t nxgep)
2238 {
2239 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
2240
2241 nxge_free_rx_mem_pool(nxgep);
2242 nxge_free_tx_mem_pool(nxgep);
2243
2244 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
2245 }
2246
2247 nxge_status_t
nxge_alloc_rx_mem_pool(p_nxge_t nxgep)2248 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
2249 {
2250 uint32_t rdc_max;
2251 p_nxge_dma_pt_cfg_t p_all_cfgp;
2252 p_nxge_hw_pt_cfg_t p_cfgp;
2253 p_nxge_dma_pool_t dma_poolp;
2254 p_nxge_dma_common_t *dma_buf_p;
2255 p_nxge_dma_pool_t dma_cntl_poolp;
2256 p_nxge_dma_common_t *dma_cntl_p;
2257 uint32_t *num_chunks; /* per dma */
2258 nxge_status_t status = NXGE_OK;
2259
2260 uint32_t nxge_port_rbr_size;
2261 uint32_t nxge_port_rbr_spare_size;
2262 uint32_t nxge_port_rcr_size;
2263 uint32_t rx_cntl_alloc_size;
2264
2265 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
2266
2267 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2268 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2269 rdc_max = NXGE_MAX_RDCS;
2270
2271 /*
2272 * Allocate memory for the common DMA data structures.
2273 */
2274 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
2275 KM_SLEEP);
2276 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2277 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2278
2279 dma_cntl_poolp = (p_nxge_dma_pool_t)
2280 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
2281 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
2282 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP);
2283
2284 num_chunks = (uint32_t *)KMEM_ZALLOC(
2285 sizeof (uint32_t) * rdc_max, KM_SLEEP);
2286
2287 /*
2288 * Assume that each DMA channel will be configured with
2289 * the default block size.
2290 * rbr block counts are modulo the batch count (16).
2291 */
2292 nxge_port_rbr_size = p_all_cfgp->rbr_size;
2293 nxge_port_rcr_size = p_all_cfgp->rcr_size;
2294
2295 if (!nxge_port_rbr_size) {
2296 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
2297 }
2298 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
2299 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
2300 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
2301 }
2302
2303 p_all_cfgp->rbr_size = nxge_port_rbr_size;
2304 nxge_port_rbr_spare_size = nxge_rbr_spare_size;
2305
2306 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
2307 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
2308 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
2309 }
2310 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
2311 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2312 "nxge_alloc_rx_mem_pool: RBR size too high %d, "
2313 "set to default %d",
2314 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
2315 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
2316 }
2317 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
2318 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2319 "nxge_alloc_rx_mem_pool: RCR too high %d, "
2320 "set to default %d",
2321 nxge_port_rcr_size, RCR_DEFAULT_MAX));
2322 nxge_port_rcr_size = RCR_DEFAULT_MAX;
2323 }
2324
2325 /*
2326 * N2/NIU has limitation on the descriptor sizes (contiguous
2327 * memory allocation on data buffers to 4M (contig_mem_alloc)
2328 * and little endian for control buffers (must use the ddi/dki mem alloc
2329 * function).
2330 */
2331 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2332 if (nxgep->niu_type == N2_NIU) {
2333 nxge_port_rbr_spare_size = 0;
2334 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
2335 (!ISP2(nxge_port_rbr_size))) {
2336 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
2337 }
2338 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
2339 (!ISP2(nxge_port_rcr_size))) {
2340 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
2341 }
2342 }
2343 #endif
2344
2345 /*
2346 * Addresses of receive block ring, receive completion ring and the
2347 * mailbox must be all cache-aligned (64 bytes).
2348 */
2349 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
2350 rx_cntl_alloc_size *= (sizeof (rx_desc_t));
2351 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
2352 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
2353
2354 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
2355 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
2356 "nxge_port_rcr_size = %d "
2357 "rx_cntl_alloc_size = %d",
2358 nxge_port_rbr_size, nxge_port_rbr_spare_size,
2359 nxge_port_rcr_size,
2360 rx_cntl_alloc_size));
2361
2362 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2363 if (nxgep->niu_type == N2_NIU) {
2364 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size *
2365 (nxge_port_rbr_size + nxge_port_rbr_spare_size));
2366
2367 if (!ISP2(rx_buf_alloc_size)) {
2368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2369 "==> nxge_alloc_rx_mem_pool: "
2370 " must be power of 2"));
2371 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2372 goto nxge_alloc_rx_mem_pool_exit;
2373 }
2374
2375 if (rx_buf_alloc_size > (1 << 22)) {
2376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2377 "==> nxge_alloc_rx_mem_pool: "
2378 " limit size to 4M"));
2379 status |= (NXGE_ERROR | NXGE_DDI_FAILED);
2380 goto nxge_alloc_rx_mem_pool_exit;
2381 }
2382
2383 if (rx_cntl_alloc_size < 0x2000) {
2384 rx_cntl_alloc_size = 0x2000;
2385 }
2386 }
2387 #endif
2388 nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
2389 nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
2390 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size;
2391 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size;
2392
2393 dma_poolp->ndmas = p_cfgp->max_rdcs;
2394 dma_poolp->num_chunks = num_chunks;
2395 dma_poolp->buf_allocated = B_TRUE;
2396 nxgep->rx_buf_pool_p = dma_poolp;
2397 dma_poolp->dma_buf_pool_p = dma_buf_p;
2398
2399 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs;
2400 dma_cntl_poolp->buf_allocated = B_TRUE;
2401 nxgep->rx_cntl_pool_p = dma_cntl_poolp;
2402 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2403
2404 /* Allocate the receive rings, too. */
2405 nxgep->rx_rbr_rings =
2406 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2407 nxgep->rx_rbr_rings->rbr_rings =
2408 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP);
2409 nxgep->rx_rcr_rings =
2410 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2411 nxgep->rx_rcr_rings->rcr_rings =
2412 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP);
2413 nxgep->rx_mbox_areas_p =
2414 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2415 nxgep->rx_mbox_areas_p->rxmbox_areas =
2416 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP);
2417
2418 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas =
2419 p_cfgp->max_rdcs;
2420
2421 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2422 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2423
2424 nxge_alloc_rx_mem_pool_exit:
2425 return (status);
2426 }
2427
2428 /*
2429 * nxge_alloc_rxb
2430 *
2431 * Allocate buffers for an RDC.
2432 *
2433 * Arguments:
2434 * nxgep
2435 * channel The channel to map into our kernel space.
2436 *
2437 * Notes:
2438 *
2439 * NPI function calls:
2440 *
2441 * NXGE function calls:
2442 *
2443 * Registers accessed:
2444 *
2445 * Context:
2446 *
2447 * Taking apart:
2448 *
2449 * Open questions:
2450 *
2451 */
2452 nxge_status_t
nxge_alloc_rxb(p_nxge_t nxgep,int channel)2453 nxge_alloc_rxb(
2454 p_nxge_t nxgep,
2455 int channel)
2456 {
2457 size_t rx_buf_alloc_size;
2458 nxge_status_t status = NXGE_OK;
2459
2460 nxge_dma_common_t **data;
2461 nxge_dma_common_t **control;
2462 uint32_t *num_chunks;
2463
2464 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2465
2466 /*
2467 * Allocate memory for the receive buffers and descriptor rings.
2468 * Replace these allocation functions with the interface functions
2469 * provided by the partition manager if/when they are available.
2470 */
2471
2472 /*
2473 * Allocate memory for the receive buffer blocks.
2474 */
2475 rx_buf_alloc_size = (nxgep->rx_default_block_size *
2476 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size));
2477
2478 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2479 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel];
2480
2481 if ((status = nxge_alloc_rx_buf_dma(
2482 nxgep, channel, data, rx_buf_alloc_size,
2483 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) {
2484 return (status);
2485 }
2486
2487 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): "
2488 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data));
2489
2490 /*
2491 * Allocate memory for descriptor rings and mailbox.
2492 */
2493 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2494
2495 if ((status = nxge_alloc_rx_cntl_dma(
2496 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size))
2497 != NXGE_OK) {
2498 nxge_free_rx_cntl_dma(nxgep, *control);
2499 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE;
2500 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks);
2501 return (status);
2502 }
2503
2504 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2505 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
2506
2507 return (status);
2508 }
2509
2510 void
nxge_free_rxb(p_nxge_t nxgep,int channel)2511 nxge_free_rxb(
2512 p_nxge_t nxgep,
2513 int channel)
2514 {
2515 nxge_dma_common_t *data;
2516 nxge_dma_common_t *control;
2517 uint32_t num_chunks;
2518
2519 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb"));
2520
2521 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
2522 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
2523 nxge_free_rx_buf_dma(nxgep, data, num_chunks);
2524
2525 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2526 nxgep->rx_buf_pool_p->num_chunks[channel] = 0;
2527
2528 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
2529 nxge_free_rx_cntl_dma(nxgep, control);
2530
2531 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2532
2533 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2534 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2535
2536 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb"));
2537 }
2538
2539 static void
nxge_free_rx_mem_pool(p_nxge_t nxgep)2540 nxge_free_rx_mem_pool(p_nxge_t nxgep)
2541 {
2542 int rdc_max = NXGE_MAX_RDCS;
2543
2544 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
2545
2546 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) {
2547 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2548 "<== nxge_free_rx_mem_pool "
2549 "(null rx buf pool or buf not allocated"));
2550 return;
2551 }
2552 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) {
2553 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2554 "<== nxge_free_rx_mem_pool "
2555 "(null rx cntl buf pool or cntl buf not allocated"));
2556 return;
2557 }
2558
2559 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p,
2560 sizeof (p_nxge_dma_common_t) * rdc_max);
2561 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t));
2562
2563 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks,
2564 sizeof (uint32_t) * rdc_max);
2565 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p,
2566 sizeof (p_nxge_dma_common_t) * rdc_max);
2567 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t));
2568
2569 nxgep->rx_buf_pool_p = 0;
2570 nxgep->rx_cntl_pool_p = 0;
2571
2572 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings,
2573 sizeof (p_rx_rbr_ring_t) * rdc_max);
2574 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t));
2575 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings,
2576 sizeof (p_rx_rcr_ring_t) * rdc_max);
2577 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t));
2578 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas,
2579 sizeof (p_rx_mbox_t) * rdc_max);
2580 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2581
2582 nxgep->rx_rbr_rings = 0;
2583 nxgep->rx_rcr_rings = 0;
2584 nxgep->rx_mbox_areas_p = 0;
2585
2586 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
2587 }
2588
2589
2590 static nxge_status_t
nxge_alloc_rx_buf_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2591 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
2592 p_nxge_dma_common_t *dmap,
2593 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
2594 {
2595 p_nxge_dma_common_t rx_dmap;
2596 nxge_status_t status = NXGE_OK;
2597 size_t total_alloc_size;
2598 size_t allocated = 0;
2599 int i, size_index, array_size;
2600 boolean_t use_kmem_alloc = B_FALSE;
2601
2602 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
2603
2604 rx_dmap = (p_nxge_dma_common_t)
2605 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
2606 KM_SLEEP);
2607
2608 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2609 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
2610 dma_channel, alloc_size, block_size, dmap));
2611
2612 total_alloc_size = alloc_size;
2613
2614 #if defined(RX_USE_RECLAIM_POST)
2615 total_alloc_size = alloc_size + alloc_size/4;
2616 #endif
2617
2618 i = 0;
2619 size_index = 0;
2620 array_size = sizeof (alloc_sizes)/sizeof (size_t);
2621 while ((size_index < array_size) &&
2622 (alloc_sizes[size_index] < alloc_size))
2623 size_index++;
2624 if (size_index >= array_size) {
2625 size_index = array_size - 1;
2626 }
2627
2628 /* For Neptune, use kmem_alloc if the kmem flag is set. */
2629 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) {
2630 use_kmem_alloc = B_TRUE;
2631 #if defined(__i386) || defined(__amd64)
2632 size_index = 0;
2633 #endif
2634 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2635 "==> nxge_alloc_rx_buf_dma: "
2636 "Neptune use kmem_alloc() - size_index %d",
2637 size_index));
2638 }
2639
2640 while ((allocated < total_alloc_size) &&
2641 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
2642 rx_dmap[i].dma_chunk_index = i;
2643 rx_dmap[i].block_size = block_size;
2644 rx_dmap[i].alength = alloc_sizes[size_index];
2645 rx_dmap[i].orig_alength = rx_dmap[i].alength;
2646 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2647 rx_dmap[i].dma_channel = dma_channel;
2648 rx_dmap[i].contig_alloc_type = B_FALSE;
2649 rx_dmap[i].kmem_alloc_type = B_FALSE;
2650 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC;
2651
2652 /*
2653 * N2/NIU: data buffers must be contiguous as the driver
2654 * needs to call Hypervisor api to set up
2655 * logical pages.
2656 */
2657 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
2658 rx_dmap[i].contig_alloc_type = B_TRUE;
2659 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC;
2660 } else if (use_kmem_alloc) {
2661 /* For Neptune, use kmem_alloc */
2662 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2663 "==> nxge_alloc_rx_buf_dma: "
2664 "Neptune use kmem_alloc()"));
2665 rx_dmap[i].kmem_alloc_type = B_TRUE;
2666 rx_dmap[i].buf_alloc_type = KMEM_ALLOC;
2667 }
2668
2669 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2670 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
2671 "i %d nblocks %d alength %d",
2672 dma_channel, i, &rx_dmap[i], block_size,
2673 i, rx_dmap[i].nblocks,
2674 rx_dmap[i].alength));
2675 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2676 &nxge_rx_dma_attr,
2677 rx_dmap[i].alength,
2678 &nxge_dev_buf_dma_acc_attr,
2679 DDI_DMA_READ | DDI_DMA_STREAMING,
2680 (p_nxge_dma_common_t)(&rx_dmap[i]));
2681 if (status != NXGE_OK) {
2682 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2683 "nxge_alloc_rx_buf_dma: Alloc Failed: "
2684 "dma %d size_index %d size requested %d",
2685 dma_channel,
2686 size_index,
2687 rx_dmap[i].alength));
2688 size_index--;
2689 } else {
2690 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED;
2691 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2692 " nxge_alloc_rx_buf_dma DONE alloc mem: "
2693 "dma %d dma_buf_p $%p kaddrp $%p alength %d "
2694 "buf_alloc_state %d alloc_type %d",
2695 dma_channel,
2696 &rx_dmap[i],
2697 rx_dmap[i].kaddrp,
2698 rx_dmap[i].alength,
2699 rx_dmap[i].buf_alloc_state,
2700 rx_dmap[i].buf_alloc_type));
2701 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2702 " alloc_rx_buf_dma allocated rdc %d "
2703 "chunk %d size %x dvma %x bufp %llx kaddrp $%p",
2704 dma_channel, i, rx_dmap[i].alength,
2705 rx_dmap[i].ioaddr_pp, &rx_dmap[i],
2706 rx_dmap[i].kaddrp));
2707 i++;
2708 allocated += alloc_sizes[size_index];
2709 }
2710 }
2711
2712 if (allocated < total_alloc_size) {
2713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2714 "==> nxge_alloc_rx_buf_dma: not enough for channel %d "
2715 "allocated 0x%x requested 0x%x",
2716 dma_channel,
2717 allocated, total_alloc_size));
2718 status = NXGE_ERROR;
2719 goto nxge_alloc_rx_mem_fail1;
2720 }
2721
2722 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2723 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
2724 "allocated 0x%x requested 0x%x",
2725 dma_channel,
2726 allocated, total_alloc_size));
2727
2728 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2729 " alloc_rx_buf_dma rdc %d allocated %d chunks",
2730 dma_channel, i));
2731 *num_chunks = i;
2732 *dmap = rx_dmap;
2733
2734 goto nxge_alloc_rx_mem_exit;
2735
2736 nxge_alloc_rx_mem_fail1:
2737 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2738
2739 nxge_alloc_rx_mem_exit:
2740 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2741 "<== nxge_alloc_rx_buf_dma status 0x%08x", status));
2742
2743 return (status);
2744 }
2745
2746 /*ARGSUSED*/
2747 static void
nxge_free_rx_buf_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap,uint32_t num_chunks)2748 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
2749 uint32_t num_chunks)
2750 {
2751 int i;
2752
2753 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2754 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
2755
2756 if (dmap == 0)
2757 return;
2758
2759 for (i = 0; i < num_chunks; i++) {
2760 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2761 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
2762 i, dmap));
2763 nxge_dma_free_rx_data_buf(dmap++);
2764 }
2765
2766 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
2767 }
2768
2769 /*ARGSUSED*/
2770 static nxge_status_t
nxge_alloc_rx_cntl_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t size)2771 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
2772 p_nxge_dma_common_t *dmap, size_t size)
2773 {
2774 p_nxge_dma_common_t rx_dmap;
2775 nxge_status_t status = NXGE_OK;
2776
2777 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
2778
2779 rx_dmap = (p_nxge_dma_common_t)
2780 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
2781
2782 rx_dmap->contig_alloc_type = B_FALSE;
2783 rx_dmap->kmem_alloc_type = B_FALSE;
2784
2785 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
2786 &nxge_desc_dma_attr,
2787 size,
2788 &nxge_dev_desc_dma_acc_attr,
2789 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2790 rx_dmap);
2791 if (status != NXGE_OK) {
2792 goto nxge_alloc_rx_cntl_dma_fail1;
2793 }
2794
2795 *dmap = rx_dmap;
2796 goto nxge_alloc_rx_cntl_dma_exit;
2797
2798 nxge_alloc_rx_cntl_dma_fail1:
2799 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
2800
2801 nxge_alloc_rx_cntl_dma_exit:
2802 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2803 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
2804
2805 return (status);
2806 }
2807
2808 /*ARGSUSED*/
2809 static void
nxge_free_rx_cntl_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap)2810 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
2811 {
2812 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
2813
2814 if (dmap == 0)
2815 return;
2816
2817 nxge_dma_mem_free(dmap);
2818
2819 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
2820 }
2821
2822 typedef struct {
2823 size_t tx_size;
2824 size_t cr_size;
2825 size_t threshhold;
2826 } nxge_tdc_sizes_t;
2827
2828 static
2829 nxge_status_t
nxge_tdc_sizes(nxge_t * nxgep,nxge_tdc_sizes_t * sizes)2830 nxge_tdc_sizes(
2831 nxge_t *nxgep,
2832 nxge_tdc_sizes_t *sizes)
2833 {
2834 uint32_t threshhold; /* The bcopy() threshhold */
2835 size_t tx_size; /* Transmit buffer size */
2836 size_t cr_size; /* Completion ring size */
2837
2838 /*
2839 * Assume that each DMA channel will be configured with the
2840 * default transmit buffer size for copying transmit data.
2841 * (If a packet is bigger than this, it will not be copied.)
2842 */
2843 if (nxgep->niu_type == N2_NIU) {
2844 threshhold = TX_BCOPY_SIZE;
2845 } else {
2846 threshhold = nxge_bcopy_thresh;
2847 }
2848 tx_size = nxge_tx_ring_size * threshhold;
2849
2850 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t);
2851 cr_size += sizeof (txdma_mailbox_t);
2852
2853 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2854 if (nxgep->niu_type == N2_NIU) {
2855 if (!ISP2(tx_size)) {
2856 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2857 "==> nxge_tdc_sizes: Tx size"
2858 " must be power of 2"));
2859 return (NXGE_ERROR);
2860 }
2861
2862 if (tx_size > (1 << 22)) {
2863 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2864 "==> nxge_tdc_sizes: Tx size"
2865 " limited to 4M"));
2866 return (NXGE_ERROR);
2867 }
2868
2869 if (cr_size < 0x2000)
2870 cr_size = 0x2000;
2871 }
2872 #endif
2873
2874 sizes->threshhold = threshhold;
2875 sizes->tx_size = tx_size;
2876 sizes->cr_size = cr_size;
2877
2878 return (NXGE_OK);
2879 }
2880 /*
2881 * nxge_alloc_txb
2882 *
2883 * Allocate buffers for an TDC.
2884 *
2885 * Arguments:
2886 * nxgep
2887 * channel The channel to map into our kernel space.
2888 *
2889 * Notes:
2890 *
2891 * NPI function calls:
2892 *
2893 * NXGE function calls:
2894 *
2895 * Registers accessed:
2896 *
2897 * Context:
2898 *
2899 * Taking apart:
2900 *
2901 * Open questions:
2902 *
2903 */
2904 nxge_status_t
nxge_alloc_txb(p_nxge_t nxgep,int channel)2905 nxge_alloc_txb(
2906 p_nxge_t nxgep,
2907 int channel)
2908 {
2909 nxge_dma_common_t **dma_buf_p;
2910 nxge_dma_common_t **dma_cntl_p;
2911 uint32_t *num_chunks;
2912 nxge_status_t status = NXGE_OK;
2913
2914 nxge_tdc_sizes_t sizes;
2915
2916 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb"));
2917
2918 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK)
2919 return (NXGE_ERROR);
2920
2921 /*
2922 * Allocate memory for transmit buffers and descriptor rings.
2923 * Replace these allocation functions with the interface functions
2924 * provided by the partition manager Real Soon Now.
2925 */
2926 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2927 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel];
2928
2929 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2930
2931 /*
2932 * Allocate memory for transmit buffers and descriptor rings.
2933 * Replace allocation functions with interface functions provided
2934 * by the partition manager when it is available.
2935 *
2936 * Allocate memory for the transmit buffer pool.
2937 */
2938 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
2939 "sizes: tx: %ld, cr:%ld, th:%ld",
2940 sizes.tx_size, sizes.cr_size, sizes.threshhold));
2941
2942 *num_chunks = 0;
2943 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p,
2944 sizes.tx_size, sizes.threshhold, num_chunks);
2945 if (status != NXGE_OK) {
2946 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!");
2947 return (status);
2948 }
2949
2950 /*
2951 * Allocate memory for descriptor rings and mailbox.
2952 */
2953 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p,
2954 sizes.cr_size);
2955 if (status != NXGE_OK) {
2956 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks);
2957 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!");
2958 return (status);
2959 }
2960
2961 return (NXGE_OK);
2962 }
2963
2964 void
nxge_free_txb(p_nxge_t nxgep,int channel)2965 nxge_free_txb(
2966 p_nxge_t nxgep,
2967 int channel)
2968 {
2969 nxge_dma_common_t *data;
2970 nxge_dma_common_t *control;
2971 uint32_t num_chunks;
2972
2973 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb"));
2974
2975 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2976 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2977 nxge_free_tx_buf_dma(nxgep, data, num_chunks);
2978
2979 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0;
2980 nxgep->tx_buf_pool_p->num_chunks[channel] = 0;
2981
2982 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2983 nxge_free_tx_cntl_dma(nxgep, control);
2984
2985 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0;
2986
2987 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
2988 KMEM_FREE(control, sizeof (nxge_dma_common_t));
2989
2990 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb"));
2991 }
2992
2993 /*
2994 * nxge_alloc_tx_mem_pool
2995 *
2996 * This function allocates all of the per-port TDC control data structures.
2997 * The per-channel (TDC) data structures are allocated when needed.
2998 *
2999 * Arguments:
3000 * nxgep
3001 *
3002 * Notes:
3003 *
3004 * Context:
3005 * Any domain
3006 */
3007 nxge_status_t
nxge_alloc_tx_mem_pool(p_nxge_t nxgep)3008 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
3009 {
3010 nxge_hw_pt_cfg_t *p_cfgp;
3011 nxge_dma_pool_t *dma_poolp;
3012 nxge_dma_common_t **dma_buf_p;
3013 nxge_dma_pool_t *dma_cntl_poolp;
3014 nxge_dma_common_t **dma_cntl_p;
3015 uint32_t *num_chunks; /* per dma */
3016 int tdc_max;
3017
3018 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
3019
3020 p_cfgp = &nxgep->pt_config.hw_config;
3021 tdc_max = NXGE_MAX_TDCS;
3022
3023 /*
3024 * Allocate memory for each transmit DMA channel.
3025 */
3026 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
3027 KM_SLEEP);
3028 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3029 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3030
3031 dma_cntl_poolp = (p_nxge_dma_pool_t)
3032 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
3033 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
3034 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP);
3035
3036 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
3037 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3038 "nxge_alloc_tx_mem_pool: TDC too high %d, "
3039 "set to default %d",
3040 nxge_tx_ring_size, TDC_DEFAULT_MAX));
3041 nxge_tx_ring_size = TDC_DEFAULT_MAX;
3042 }
3043
3044 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3045 /*
3046 * N2/NIU has limitation on the descriptor sizes (contiguous
3047 * memory allocation on data buffers to 4M (contig_mem_alloc)
3048 * and little endian for control buffers (must use the ddi/dki mem alloc
3049 * function). The transmit ring is limited to 8K (includes the
3050 * mailbox).
3051 */
3052 if (nxgep->niu_type == N2_NIU) {
3053 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
3054 (!ISP2(nxge_tx_ring_size))) {
3055 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
3056 }
3057 }
3058 #endif
3059
3060 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
3061
3062 num_chunks = (uint32_t *)KMEM_ZALLOC(
3063 sizeof (uint32_t) * tdc_max, KM_SLEEP);
3064
3065 dma_poolp->ndmas = p_cfgp->tdc.owned;
3066 dma_poolp->num_chunks = num_chunks;
3067 dma_poolp->dma_buf_pool_p = dma_buf_p;
3068 nxgep->tx_buf_pool_p = dma_poolp;
3069
3070 dma_poolp->buf_allocated = B_TRUE;
3071
3072 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned;
3073 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
3074 nxgep->tx_cntl_pool_p = dma_cntl_poolp;
3075
3076 dma_cntl_poolp->buf_allocated = B_TRUE;
3077
3078 nxgep->tx_rings =
3079 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
3080 nxgep->tx_rings->rings =
3081 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP);
3082 nxgep->tx_mbox_areas_p =
3083 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
3084 nxgep->tx_mbox_areas_p->txmbox_areas_p =
3085 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP);
3086
3087 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned;
3088
3089 NXGE_DEBUG_MSG((nxgep, MEM_CTL,
3090 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
3091 tdc_max, dma_poolp->ndmas));
3092
3093 return (NXGE_OK);
3094 }
3095
3096 nxge_status_t
nxge_alloc_tx_buf_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)3097 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
3098 p_nxge_dma_common_t *dmap, size_t alloc_size,
3099 size_t block_size, uint32_t *num_chunks)
3100 {
3101 p_nxge_dma_common_t tx_dmap;
3102 nxge_status_t status = NXGE_OK;
3103 size_t total_alloc_size;
3104 size_t allocated = 0;
3105 int i, size_index, array_size;
3106
3107 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
3108
3109 tx_dmap = (p_nxge_dma_common_t)
3110 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
3111 KM_SLEEP);
3112
3113 total_alloc_size = alloc_size;
3114 i = 0;
3115 size_index = 0;
3116 array_size = sizeof (alloc_sizes) / sizeof (size_t);
3117 while ((size_index < array_size) &&
3118 (alloc_sizes[size_index] < alloc_size))
3119 size_index++;
3120 if (size_index >= array_size) {
3121 size_index = array_size - 1;
3122 }
3123
3124 while ((allocated < total_alloc_size) &&
3125 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
3126
3127 tx_dmap[i].dma_chunk_index = i;
3128 tx_dmap[i].block_size = block_size;
3129 tx_dmap[i].alength = alloc_sizes[size_index];
3130 tx_dmap[i].orig_alength = tx_dmap[i].alength;
3131 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
3132 tx_dmap[i].dma_channel = dma_channel;
3133 tx_dmap[i].contig_alloc_type = B_FALSE;
3134 tx_dmap[i].kmem_alloc_type = B_FALSE;
3135
3136 /*
3137 * N2/NIU: data buffers must be contiguous as the driver
3138 * needs to call Hypervisor api to set up
3139 * logical pages.
3140 */
3141 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
3142 tx_dmap[i].contig_alloc_type = B_TRUE;
3143 }
3144
3145 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3146 &nxge_tx_dma_attr,
3147 tx_dmap[i].alength,
3148 &nxge_dev_buf_dma_acc_attr,
3149 DDI_DMA_WRITE | DDI_DMA_STREAMING,
3150 (p_nxge_dma_common_t)(&tx_dmap[i]));
3151 if (status != NXGE_OK) {
3152 size_index--;
3153 } else {
3154 i++;
3155 allocated += alloc_sizes[size_index];
3156 }
3157 }
3158
3159 if (allocated < total_alloc_size) {
3160 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3161 "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
3162 "allocated 0x%x requested 0x%x",
3163 dma_channel,
3164 allocated, total_alloc_size));
3165 status = NXGE_ERROR;
3166 goto nxge_alloc_tx_mem_fail1;
3167 }
3168
3169 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3170 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
3171 "allocated 0x%x requested 0x%x",
3172 dma_channel,
3173 allocated, total_alloc_size));
3174
3175 *num_chunks = i;
3176 *dmap = tx_dmap;
3177 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3178 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
3179 *dmap, i));
3180 goto nxge_alloc_tx_mem_exit;
3181
3182 nxge_alloc_tx_mem_fail1:
3183 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
3184
3185 nxge_alloc_tx_mem_exit:
3186 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3187 "<== nxge_alloc_tx_buf_dma status 0x%08x", status));
3188
3189 return (status);
3190 }
3191
3192 /*ARGSUSED*/
3193 static void
nxge_free_tx_buf_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap,uint32_t num_chunks)3194 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
3195 uint32_t num_chunks)
3196 {
3197 int i;
3198
3199 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
3200
3201 if (dmap == 0)
3202 return;
3203
3204 for (i = 0; i < num_chunks; i++) {
3205 nxge_dma_mem_free(dmap++);
3206 }
3207
3208 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
3209 }
3210
3211 /*ARGSUSED*/
3212 nxge_status_t
nxge_alloc_tx_cntl_dma(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dmap,size_t size)3213 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
3214 p_nxge_dma_common_t *dmap, size_t size)
3215 {
3216 p_nxge_dma_common_t tx_dmap;
3217 nxge_status_t status = NXGE_OK;
3218
3219 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
3220 tx_dmap = (p_nxge_dma_common_t)
3221 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
3222
3223 tx_dmap->contig_alloc_type = B_FALSE;
3224 tx_dmap->kmem_alloc_type = B_FALSE;
3225
3226 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
3227 &nxge_desc_dma_attr,
3228 size,
3229 &nxge_dev_desc_dma_acc_attr,
3230 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3231 tx_dmap);
3232 if (status != NXGE_OK) {
3233 goto nxge_alloc_tx_cntl_dma_fail1;
3234 }
3235
3236 *dmap = tx_dmap;
3237 goto nxge_alloc_tx_cntl_dma_exit;
3238
3239 nxge_alloc_tx_cntl_dma_fail1:
3240 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
3241
3242 nxge_alloc_tx_cntl_dma_exit:
3243 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3244 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
3245
3246 return (status);
3247 }
3248
3249 /*ARGSUSED*/
3250 static void
nxge_free_tx_cntl_dma(p_nxge_t nxgep,p_nxge_dma_common_t dmap)3251 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
3252 {
3253 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
3254
3255 if (dmap == 0)
3256 return;
3257
3258 nxge_dma_mem_free(dmap);
3259
3260 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
3261 }
3262
3263 /*
3264 * nxge_free_tx_mem_pool
3265 *
3266 * This function frees all of the per-port TDC control data structures.
3267 * The per-channel (TDC) data structures are freed when the channel
3268 * is stopped.
3269 *
3270 * Arguments:
3271 * nxgep
3272 *
3273 * Notes:
3274 *
3275 * Context:
3276 * Any domain
3277 */
3278 static void
nxge_free_tx_mem_pool(p_nxge_t nxgep)3279 nxge_free_tx_mem_pool(p_nxge_t nxgep)
3280 {
3281 int tdc_max = NXGE_MAX_TDCS;
3282
3283 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool"));
3284
3285 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) {
3286 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3287 "<== nxge_free_tx_mem_pool "
3288 "(null tx buf pool or buf not allocated"));
3289 return;
3290 }
3291 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) {
3292 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3293 "<== nxge_free_tx_mem_pool "
3294 "(null tx cntl buf pool or cntl buf not allocated"));
3295 return;
3296 }
3297
3298 /* 1. Free the mailboxes. */
3299 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p,
3300 sizeof (p_tx_mbox_t) * tdc_max);
3301 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
3302
3303 nxgep->tx_mbox_areas_p = 0;
3304
3305 /* 2. Free the transmit ring arrays. */
3306 KMEM_FREE(nxgep->tx_rings->rings,
3307 sizeof (p_tx_ring_t) * tdc_max);
3308 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t));
3309
3310 nxgep->tx_rings = 0;
3311
3312 /* 3. Free the completion ring data structures. */
3313 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p,
3314 sizeof (p_nxge_dma_common_t) * tdc_max);
3315 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t));
3316
3317 nxgep->tx_cntl_pool_p = 0;
3318
3319 /* 4. Free the data ring data structures. */
3320 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks,
3321 sizeof (uint32_t) * tdc_max);
3322 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p,
3323 sizeof (p_nxge_dma_common_t) * tdc_max);
3324 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t));
3325
3326 nxgep->tx_buf_pool_p = 0;
3327
3328 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool"));
3329 }
3330
3331 /*ARGSUSED*/
3332 static nxge_status_t
nxge_dma_mem_alloc(p_nxge_t nxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_nxge_dma_common_t dma_p)3333 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
3334 struct ddi_dma_attr *dma_attrp,
3335 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
3336 p_nxge_dma_common_t dma_p)
3337 {
3338 caddr_t kaddrp;
3339 int ddi_status = DDI_SUCCESS;
3340 boolean_t contig_alloc_type;
3341 boolean_t kmem_alloc_type;
3342
3343 contig_alloc_type = dma_p->contig_alloc_type;
3344
3345 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
3346 /*
3347 * contig_alloc_type for contiguous memory only allowed
3348 * for N2/NIU.
3349 */
3350 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3351 "nxge_dma_mem_alloc: alloc type not allowed (%d)",
3352 dma_p->contig_alloc_type));
3353 return (NXGE_ERROR | NXGE_DDI_FAILED);
3354 }
3355
3356 dma_p->dma_handle = NULL;
3357 dma_p->acc_handle = NULL;
3358 dma_p->kaddrp = dma_p->last_kaddrp = NULL;
3359 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
3360 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
3361 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
3362 if (ddi_status != DDI_SUCCESS) {
3363 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3364 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
3365 return (NXGE_ERROR | NXGE_DDI_FAILED);
3366 }
3367
3368 kmem_alloc_type = dma_p->kmem_alloc_type;
3369
3370 switch (contig_alloc_type) {
3371 case B_FALSE:
3372 switch (kmem_alloc_type) {
3373 case B_FALSE:
3374 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle,
3375 length,
3376 acc_attr_p,
3377 xfer_flags,
3378 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
3379 &dma_p->acc_handle);
3380 if (ddi_status != DDI_SUCCESS) {
3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3382 "nxge_dma_mem_alloc: "
3383 "ddi_dma_mem_alloc failed"));
3384 ddi_dma_free_handle(&dma_p->dma_handle);
3385 dma_p->dma_handle = NULL;
3386 return (NXGE_ERROR | NXGE_DDI_FAILED);
3387 }
3388 if (dma_p->alength < length) {
3389 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3390 "nxge_dma_mem_alloc:di_dma_mem_alloc "
3391 "< length."));
3392 ddi_dma_mem_free(&dma_p->acc_handle);
3393 ddi_dma_free_handle(&dma_p->dma_handle);
3394 dma_p->acc_handle = NULL;
3395 dma_p->dma_handle = NULL;
3396 return (NXGE_ERROR);
3397 }
3398
3399 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3400 NULL,
3401 kaddrp, dma_p->alength, xfer_flags,
3402 DDI_DMA_DONTWAIT,
3403 0, &dma_p->dma_cookie, &dma_p->ncookies);
3404 if (ddi_status != DDI_DMA_MAPPED) {
3405 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3406 "nxge_dma_mem_alloc: ddi_dma_addr_bind "
3407 "failed "
3408 "(staus 0x%x ncookies %d.)", ddi_status,
3409 dma_p->ncookies));
3410 if (dma_p->acc_handle) {
3411 ddi_dma_mem_free(&dma_p->acc_handle);
3412 dma_p->acc_handle = NULL;
3413 }
3414 ddi_dma_free_handle(&dma_p->dma_handle);
3415 dma_p->dma_handle = NULL;
3416 return (NXGE_ERROR | NXGE_DDI_FAILED);
3417 }
3418
3419 if (dma_p->ncookies != 1) {
3420 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3421 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3422 "> 1 cookie"
3423 "(staus 0x%x ncookies %d.)", ddi_status,
3424 dma_p->ncookies));
3425 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3426 if (dma_p->acc_handle) {
3427 ddi_dma_mem_free(&dma_p->acc_handle);
3428 dma_p->acc_handle = NULL;
3429 }
3430 ddi_dma_free_handle(&dma_p->dma_handle);
3431 dma_p->dma_handle = NULL;
3432 dma_p->acc_handle = NULL;
3433 return (NXGE_ERROR);
3434 }
3435 break;
3436
3437 case B_TRUE:
3438 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP);
3439 if (kaddrp == NULL) {
3440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3441 "nxge_dma_mem_alloc:ddi_dma_mem_alloc "
3442 "kmem alloc failed"));
3443 return (NXGE_ERROR);
3444 }
3445
3446 dma_p->alength = length;
3447 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle,
3448 NULL, kaddrp, dma_p->alength, xfer_flags,
3449 DDI_DMA_DONTWAIT, 0,
3450 &dma_p->dma_cookie, &dma_p->ncookies);
3451 if (ddi_status != DDI_DMA_MAPPED) {
3452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3453 "nxge_dma_mem_alloc:ddi_dma_addr_bind: "
3454 "(kmem_alloc) failed kaddrp $%p length %d "
3455 "(staus 0x%x (%d) ncookies %d.)",
3456 kaddrp, length,
3457 ddi_status, ddi_status, dma_p->ncookies));
3458 KMEM_FREE(kaddrp, length);
3459 dma_p->acc_handle = NULL;
3460 ddi_dma_free_handle(&dma_p->dma_handle);
3461 dma_p->dma_handle = NULL;
3462 dma_p->kaddrp = NULL;
3463 return (NXGE_ERROR | NXGE_DDI_FAILED);
3464 }
3465
3466 if (dma_p->ncookies != 1) {
3467 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3468 "nxge_dma_mem_alloc:ddi_dma_addr_bind "
3469 "(kmem_alloc) > 1 cookie"
3470 "(staus 0x%x ncookies %d.)", ddi_status,
3471 dma_p->ncookies));
3472 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3473 KMEM_FREE(kaddrp, length);
3474 ddi_dma_free_handle(&dma_p->dma_handle);
3475 dma_p->dma_handle = NULL;
3476 dma_p->acc_handle = NULL;
3477 dma_p->kaddrp = NULL;
3478 return (NXGE_ERROR);
3479 }
3480
3481 dma_p->kaddrp = kaddrp;
3482
3483 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
3484 "nxge_dma_mem_alloc: kmem_alloc dmap $%p "
3485 "kaddr $%p alength %d",
3486 dma_p,
3487 kaddrp,
3488 dma_p->alength));
3489 break;
3490 }
3491 break;
3492
3493 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3494 case B_TRUE:
3495 kaddrp = (caddr_t)contig_mem_alloc(length);
3496 if (kaddrp == NULL) {
3497 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3498 "nxge_dma_mem_alloc:contig_mem_alloc failed."));
3499 ddi_dma_free_handle(&dma_p->dma_handle);
3500 return (NXGE_ERROR | NXGE_DDI_FAILED);
3501 }
3502
3503 dma_p->alength = length;
3504 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
3505 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
3506 &dma_p->dma_cookie, &dma_p->ncookies);
3507 if (ddi_status != DDI_DMA_MAPPED) {
3508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3509 "nxge_dma_mem_alloc:di_dma_addr_bind failed "
3510 "(status 0x%x ncookies %d.)", ddi_status,
3511 dma_p->ncookies));
3512
3513 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3514 "==> nxge_dma_mem_alloc: (not mapped)"
3515 "length %lu (0x%x) "
3516 "free contig kaddrp $%p "
3517 "va_to_pa $%p",
3518 length, length,
3519 kaddrp,
3520 va_to_pa(kaddrp)));
3521
3522
3523 contig_mem_free((void *)kaddrp, length);
3524 ddi_dma_free_handle(&dma_p->dma_handle);
3525
3526 dma_p->dma_handle = NULL;
3527 dma_p->acc_handle = NULL;
3528 dma_p->alength = NULL;
3529 dma_p->kaddrp = NULL;
3530
3531 return (NXGE_ERROR | NXGE_DDI_FAILED);
3532 }
3533
3534 if (dma_p->ncookies != 1 ||
3535 (dma_p->dma_cookie.dmac_laddress == NULL)) {
3536 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3537 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
3538 "cookie or "
3539 "dmac_laddress is NULL $%p size %d "
3540 " (status 0x%x ncookies %d.)",
3541 ddi_status,
3542 dma_p->dma_cookie.dmac_laddress,
3543 dma_p->dma_cookie.dmac_size,
3544 dma_p->ncookies));
3545
3546 contig_mem_free((void *)kaddrp, length);
3547 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3548 ddi_dma_free_handle(&dma_p->dma_handle);
3549
3550 dma_p->alength = 0;
3551 dma_p->dma_handle = NULL;
3552 dma_p->acc_handle = NULL;
3553 dma_p->kaddrp = NULL;
3554
3555 return (NXGE_ERROR | NXGE_DDI_FAILED);
3556 }
3557 break;
3558
3559 #else
3560 case B_TRUE:
3561 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3562 "nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
3563 return (NXGE_ERROR | NXGE_DDI_FAILED);
3564 #endif
3565 }
3566
3567 dma_p->kaddrp = kaddrp;
3568 dma_p->last_kaddrp = (unsigned char *)kaddrp +
3569 dma_p->alength - RXBUF_64B_ALIGNED;
3570 #if defined(__i386)
3571 dma_p->ioaddr_pp =
3572 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
3573 #else
3574 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3575 #endif
3576 dma_p->last_ioaddr_pp =
3577 #if defined(__i386)
3578 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
3579 #else
3580 (unsigned char *)dma_p->dma_cookie.dmac_laddress +
3581 #endif
3582 dma_p->alength - RXBUF_64B_ALIGNED;
3583
3584 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
3585
3586 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3587 dma_p->orig_ioaddr_pp =
3588 (unsigned char *)dma_p->dma_cookie.dmac_laddress;
3589 dma_p->orig_alength = length;
3590 dma_p->orig_kaddrp = kaddrp;
3591 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
3592 #endif
3593
3594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
3595 "dma buffer allocated: dma_p $%p "
3596 "return dmac_ladress from cookie $%p cookie dmac_size %d "
3597 "dma_p->ioaddr_p $%p "
3598 "dma_p->orig_ioaddr_p $%p "
3599 "orig_vatopa $%p "
3600 "alength %d (0x%x) "
3601 "kaddrp $%p "
3602 "length %d (0x%x)",
3603 dma_p,
3604 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
3605 dma_p->ioaddr_pp,
3606 dma_p->orig_ioaddr_pp,
3607 dma_p->orig_vatopa,
3608 dma_p->alength, dma_p->alength,
3609 kaddrp,
3610 length, length));
3611
3612 return (NXGE_OK);
3613 }
3614
3615 static void
nxge_dma_mem_free(p_nxge_dma_common_t dma_p)3616 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
3617 {
3618 if (dma_p->dma_handle != NULL) {
3619 if (dma_p->ncookies) {
3620 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3621 dma_p->ncookies = 0;
3622 }
3623 ddi_dma_free_handle(&dma_p->dma_handle);
3624 dma_p->dma_handle = NULL;
3625 }
3626
3627 if (dma_p->acc_handle != NULL) {
3628 ddi_dma_mem_free(&dma_p->acc_handle);
3629 dma_p->acc_handle = NULL;
3630 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3631 }
3632
3633 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3634 if (dma_p->contig_alloc_type &&
3635 dma_p->orig_kaddrp && dma_p->orig_alength) {
3636 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
3637 "kaddrp $%p (orig_kaddrp $%p)"
3638 "mem type %d ",
3639 "orig_alength %d "
3640 "alength 0x%x (%d)",
3641 dma_p->kaddrp,
3642 dma_p->orig_kaddrp,
3643 dma_p->contig_alloc_type,
3644 dma_p->orig_alength,
3645 dma_p->alength, dma_p->alength));
3646
3647 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
3648 dma_p->orig_alength = NULL;
3649 dma_p->orig_kaddrp = NULL;
3650 dma_p->contig_alloc_type = B_FALSE;
3651 }
3652 #endif
3653 dma_p->kaddrp = NULL;
3654 dma_p->alength = NULL;
3655 }
3656
3657 static void
nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)3658 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p)
3659 {
3660 uint64_t kaddr;
3661 uint32_t buf_size;
3662
3663 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf"));
3664
3665 if (dma_p->dma_handle != NULL) {
3666 if (dma_p->ncookies) {
3667 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
3668 dma_p->ncookies = 0;
3669 }
3670 ddi_dma_free_handle(&dma_p->dma_handle);
3671 dma_p->dma_handle = NULL;
3672 }
3673
3674 if (dma_p->acc_handle != NULL) {
3675 ddi_dma_mem_free(&dma_p->acc_handle);
3676 dma_p->acc_handle = NULL;
3677 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
3678 }
3679
3680 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3681 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
3682 dma_p,
3683 dma_p->buf_alloc_state));
3684
3685 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) {
3686 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3687 "<== nxge_dma_free_rx_data_buf: "
3688 "outstanding data buffers"));
3689 return;
3690 }
3691
3692 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3693 if (dma_p->contig_alloc_type &&
3694 dma_p->orig_kaddrp && dma_p->orig_alength) {
3695 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: "
3696 "kaddrp $%p (orig_kaddrp $%p)"
3697 "mem type %d ",
3698 "orig_alength %d "
3699 "alength 0x%x (%d)",
3700 dma_p->kaddrp,
3701 dma_p->orig_kaddrp,
3702 dma_p->contig_alloc_type,
3703 dma_p->orig_alength,
3704 dma_p->alength, dma_p->alength));
3705
3706 kaddr = (uint64_t)dma_p->orig_kaddrp;
3707 buf_size = dma_p->orig_alength;
3708 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size);
3709 dma_p->orig_alength = NULL;
3710 dma_p->orig_kaddrp = NULL;
3711 dma_p->contig_alloc_type = B_FALSE;
3712 dma_p->kaddrp = NULL;
3713 dma_p->alength = NULL;
3714 return;
3715 }
3716 #endif
3717
3718 if (dma_p->kmem_alloc_type) {
3719 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3720 "nxge_dma_free_rx_data_buf: free kmem "
3721 "kaddrp $%p (orig_kaddrp $%p)"
3722 "alloc type %d "
3723 "orig_alength %d "
3724 "alength 0x%x (%d)",
3725 dma_p->kaddrp,
3726 dma_p->orig_kaddrp,
3727 dma_p->kmem_alloc_type,
3728 dma_p->orig_alength,
3729 dma_p->alength, dma_p->alength));
3730 #if defined(__i386)
3731 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp;
3732 #else
3733 kaddr = (uint64_t)dma_p->kaddrp;
3734 #endif
3735 buf_size = dma_p->orig_alength;
3736 NXGE_DEBUG_MSG((NULL, DMA_CTL,
3737 "nxge_dma_free_rx_data_buf: free dmap $%p "
3738 "kaddr $%p buf_size %d",
3739 dma_p,
3740 kaddr, buf_size));
3741 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size);
3742 dma_p->alength = 0;
3743 dma_p->orig_alength = 0;
3744 dma_p->kaddrp = NULL;
3745 dma_p->kmem_alloc_type = B_FALSE;
3746 }
3747
3748 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf"));
3749 }
3750
3751 /*
3752 * nxge_m_start() -- start transmitting and receiving.
3753 *
3754 * This function is called by the MAC layer when the first
3755 * stream is open to prepare the hardware ready for sending
3756 * and transmitting packets.
3757 */
3758 static int
nxge_m_start(void * arg)3759 nxge_m_start(void *arg)
3760 {
3761 p_nxge_t nxgep = (p_nxge_t)arg;
3762
3763 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
3764
3765 /*
3766 * Are we already started?
3767 */
3768 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
3769 return (0);
3770 }
3771
3772 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) {
3773 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
3774 }
3775
3776 /*
3777 * Make sure RX MAC is disabled while we initialize.
3778 */
3779 if (!isLDOMguest(nxgep)) {
3780 (void) nxge_rx_mac_disable(nxgep);
3781 }
3782
3783 /*
3784 * Grab the global lock.
3785 */
3786 MUTEX_ENTER(nxgep->genlock);
3787
3788 /*
3789 * Initialize the driver and hardware.
3790 */
3791 if (nxge_init(nxgep) != NXGE_OK) {
3792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3793 "<== nxge_m_start: initialization failed"));
3794 MUTEX_EXIT(nxgep->genlock);
3795 return (EIO);
3796 }
3797
3798 /*
3799 * Start timer to check the system error and tx hangs
3800 */
3801 if (!isLDOMguest(nxgep))
3802 nxgep->nxge_timerid = nxge_start_timer(nxgep,
3803 nxge_check_hw_state, NXGE_CHECK_TIMER);
3804 #if defined(sun4v)
3805 else
3806 nxge_hio_start_timer(nxgep);
3807 #endif
3808
3809 nxgep->link_notify = B_TRUE;
3810 nxgep->link_check_count = 0;
3811 nxgep->nxge_mac_state = NXGE_MAC_STARTED;
3812
3813 /*
3814 * Let the global lock go, since we are intialized.
3815 */
3816 MUTEX_EXIT(nxgep->genlock);
3817
3818 /*
3819 * Let the MAC start receiving packets, now that
3820 * we are initialized.
3821 */
3822 if (!isLDOMguest(nxgep)) {
3823 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
3824 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3825 "<== nxge_m_start: enable of RX mac failed"));
3826 return (EIO);
3827 }
3828
3829 /*
3830 * Enable hardware interrupts.
3831 */
3832 nxge_intr_hw_enable(nxgep);
3833 }
3834 #if defined(sun4v)
3835 else {
3836 /*
3837 * In guest domain we enable RDCs and their interrupts as
3838 * the last step.
3839 */
3840 if (nxge_hio_rdc_enable(nxgep) != NXGE_OK) {
3841 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3842 "<== nxge_m_start: enable of RDCs failed"));
3843 return (EIO);
3844 }
3845
3846 if (nxge_hio_rdc_intr_arm(nxgep, B_TRUE) != NXGE_OK) {
3847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3848 "<== nxge_m_start: intrs enable for RDCs failed"));
3849 return (EIO);
3850 }
3851 }
3852 #endif
3853 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
3854 return (0);
3855 }
3856
3857 static boolean_t
nxge_check_groups_stopped(p_nxge_t nxgep)3858 nxge_check_groups_stopped(p_nxge_t nxgep)
3859 {
3860 int i;
3861
3862 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
3863 if (nxgep->rx_hio_groups[i].started)
3864 return (B_FALSE);
3865 }
3866
3867 return (B_TRUE);
3868 }
3869
3870 /*
3871 * nxge_m_stop(): stop transmitting and receiving.
3872 */
3873 static void
nxge_m_stop(void * arg)3874 nxge_m_stop(void *arg)
3875 {
3876 p_nxge_t nxgep = (p_nxge_t)arg;
3877 boolean_t groups_stopped;
3878
3879 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
3880
3881 /*
3882 * Are the groups stopped?
3883 */
3884 groups_stopped = nxge_check_groups_stopped(nxgep);
3885 ASSERT(groups_stopped == B_TRUE);
3886 if (!groups_stopped) {
3887 cmn_err(CE_WARN, "nxge(%d): groups are not stopped!\n",
3888 nxgep->instance);
3889 return;
3890 }
3891
3892 if (!isLDOMguest(nxgep)) {
3893 /*
3894 * Disable the RX mac.
3895 */
3896 (void) nxge_rx_mac_disable(nxgep);
3897
3898 /*
3899 * Wait for the IPP to drain.
3900 */
3901 (void) nxge_ipp_drain(nxgep);
3902
3903 /*
3904 * Disable hardware interrupts.
3905 */
3906 nxge_intr_hw_disable(nxgep);
3907 }
3908 #if defined(sun4v)
3909 else {
3910 (void) nxge_hio_rdc_intr_arm(nxgep, B_FALSE);
3911 }
3912 #endif
3913
3914 /*
3915 * Grab the global lock.
3916 */
3917 MUTEX_ENTER(nxgep->genlock);
3918
3919 nxgep->nxge_mac_state = NXGE_MAC_STOPPING;
3920 if (nxgep->nxge_timerid) {
3921 nxge_stop_timer(nxgep, nxgep->nxge_timerid);
3922 nxgep->nxge_timerid = 0;
3923 }
3924
3925 /*
3926 * Clean up.
3927 */
3928 nxge_uninit(nxgep);
3929
3930 nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
3931
3932 /*
3933 * Let go of the global lock.
3934 */
3935 MUTEX_EXIT(nxgep->genlock);
3936 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
3937 }
3938
3939 static int
nxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)3940 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
3941 {
3942 p_nxge_t nxgep = (p_nxge_t)arg;
3943 struct ether_addr addrp;
3944
3945 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3946 "==> nxge_m_multicst: add %d", add));
3947
3948 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
3949 if (add) {
3950 if (nxge_add_mcast_addr(nxgep, &addrp)) {
3951 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3952 "<== nxge_m_multicst: add multicast failed"));
3953 return (EINVAL);
3954 }
3955 } else {
3956 if (nxge_del_mcast_addr(nxgep, &addrp)) {
3957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3958 "<== nxge_m_multicst: del multicast failed"));
3959 return (EINVAL);
3960 }
3961 }
3962
3963 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
3964
3965 return (0);
3966 }
3967
3968 static int
nxge_m_promisc(void * arg,boolean_t on)3969 nxge_m_promisc(void *arg, boolean_t on)
3970 {
3971 p_nxge_t nxgep = (p_nxge_t)arg;
3972
3973 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3974 "==> nxge_m_promisc: on %d", on));
3975
3976 if (nxge_set_promisc(nxgep, on)) {
3977 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3978 "<== nxge_m_promisc: set promisc failed"));
3979 return (EINVAL);
3980 }
3981
3982 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
3983 "<== nxge_m_promisc: on %d", on));
3984
3985 return (0);
3986 }
3987
3988 static void
nxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)3989 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3990 {
3991 p_nxge_t nxgep = (p_nxge_t)arg;
3992 struct iocblk *iocp;
3993 boolean_t need_privilege;
3994 int err;
3995 int cmd;
3996
3997 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
3998
3999 iocp = (struct iocblk *)mp->b_rptr;
4000 iocp->ioc_error = 0;
4001 need_privilege = B_TRUE;
4002 cmd = iocp->ioc_cmd;
4003 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
4004 switch (cmd) {
4005 default:
4006 miocnak(wq, mp, 0, EINVAL);
4007 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
4008 return;
4009
4010 case LB_GET_INFO_SIZE:
4011 case LB_GET_INFO:
4012 case LB_GET_MODE:
4013 need_privilege = B_FALSE;
4014 break;
4015 case LB_SET_MODE:
4016 break;
4017
4018
4019 case NXGE_GET_MII:
4020 case NXGE_PUT_MII:
4021 case NXGE_GET64:
4022 case NXGE_PUT64:
4023 case NXGE_GET_TX_RING_SZ:
4024 case NXGE_GET_TX_DESC:
4025 case NXGE_TX_SIDE_RESET:
4026 case NXGE_RX_SIDE_RESET:
4027 case NXGE_GLOBAL_RESET:
4028 case NXGE_RESET_MAC:
4029 case NXGE_TX_REGS_DUMP:
4030 case NXGE_RX_REGS_DUMP:
4031 case NXGE_INT_REGS_DUMP:
4032 case NXGE_VIR_INT_REGS_DUMP:
4033 case NXGE_PUT_TCAM:
4034 case NXGE_GET_TCAM:
4035 case NXGE_RTRACE:
4036 case NXGE_RDUMP:
4037 case NXGE_RX_CLASS:
4038 case NXGE_RX_HASH:
4039
4040 need_privilege = B_FALSE;
4041 break;
4042 case NXGE_INJECT_ERR:
4043 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
4044 nxge_err_inject(nxgep, wq, mp);
4045 break;
4046 }
4047
4048 if (need_privilege) {
4049 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
4050 if (err != 0) {
4051 miocnak(wq, mp, 0, err);
4052 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4053 "<== nxge_m_ioctl: no priv"));
4054 return;
4055 }
4056 }
4057
4058 switch (cmd) {
4059
4060 case LB_GET_MODE:
4061 case LB_SET_MODE:
4062 case LB_GET_INFO_SIZE:
4063 case LB_GET_INFO:
4064 nxge_loopback_ioctl(nxgep, wq, mp, iocp);
4065 break;
4066
4067 case NXGE_GET_MII:
4068 case NXGE_PUT_MII:
4069 case NXGE_PUT_TCAM:
4070 case NXGE_GET_TCAM:
4071 case NXGE_GET64:
4072 case NXGE_PUT64:
4073 case NXGE_GET_TX_RING_SZ:
4074 case NXGE_GET_TX_DESC:
4075 case NXGE_TX_SIDE_RESET:
4076 case NXGE_RX_SIDE_RESET:
4077 case NXGE_GLOBAL_RESET:
4078 case NXGE_RESET_MAC:
4079 case NXGE_TX_REGS_DUMP:
4080 case NXGE_RX_REGS_DUMP:
4081 case NXGE_INT_REGS_DUMP:
4082 case NXGE_VIR_INT_REGS_DUMP:
4083 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4084 "==> nxge_m_ioctl: cmd 0x%x", cmd));
4085 nxge_hw_ioctl(nxgep, wq, mp, iocp);
4086 break;
4087 case NXGE_RX_CLASS:
4088 if (nxge_rxclass_ioctl(nxgep, wq, mp->b_cont) < 0)
4089 miocnak(wq, mp, 0, EINVAL);
4090 else
4091 miocack(wq, mp, sizeof (rx_class_cfg_t), 0);
4092 break;
4093 case NXGE_RX_HASH:
4094
4095 if (nxge_rxhash_ioctl(nxgep, wq, mp->b_cont) < 0)
4096 miocnak(wq, mp, 0, EINVAL);
4097 else
4098 miocack(wq, mp, sizeof (cfg_cmd_t), 0);
4099 break;
4100 }
4101
4102 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
4103 }
4104
4105 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
4106
4107 void
nxge_mmac_kstat_update(p_nxge_t nxgep,int slot,boolean_t factory)4108 nxge_mmac_kstat_update(p_nxge_t nxgep, int slot, boolean_t factory)
4109 {
4110 p_nxge_mmac_stats_t mmac_stats;
4111 int i;
4112 nxge_mmac_t *mmac_info;
4113
4114 mmac_info = &nxgep->nxge_mmac_info;
4115
4116 mmac_stats = &nxgep->statsp->mmac_stats;
4117 mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
4118 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
4119
4120 for (i = 0; i < ETHERADDRL; i++) {
4121 if (factory) {
4122 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4123 = mmac_info->factory_mac_pool[slot][
4124 (ETHERADDRL-1) - i];
4125 } else {
4126 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
4127 = mmac_info->mac_pool[slot].addr[
4128 (ETHERADDRL - 1) - i];
4129 }
4130 }
4131 }
4132
4133 /*
4134 * nxge_altmac_set() -- Set an alternate MAC address
4135 */
4136 static int
nxge_altmac_set(p_nxge_t nxgep,uint8_t * maddr,int slot,int rdctbl,boolean_t usetbl)4137 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, int slot,
4138 int rdctbl, boolean_t usetbl)
4139 {
4140 uint8_t addrn;
4141 uint8_t portn;
4142 npi_mac_addr_t altmac;
4143 hostinfo_t mac_rdc;
4144 p_nxge_class_pt_cfg_t clscfgp;
4145
4146
4147 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
4148 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
4149 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
4150
4151 portn = nxgep->mac.portnum;
4152 addrn = (uint8_t)slot - 1;
4153
4154 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET,
4155 nxgep->function_num, addrn, &altmac) != NPI_SUCCESS)
4156 return (EIO);
4157
4158 /*
4159 * Set the rdc table number for the host info entry
4160 * for this mac address slot.
4161 */
4162 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
4163 mac_rdc.value = 0;
4164 if (usetbl)
4165 mac_rdc.bits.w0.rdc_tbl_num = rdctbl;
4166 else
4167 mac_rdc.bits.w0.rdc_tbl_num =
4168 clscfgp->mac_host_info[addrn].rdctbl;
4169 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
4170
4171 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
4172 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
4173 return (EIO);
4174 }
4175
4176 /*
4177 * Enable comparison with the alternate MAC address.
4178 * While the first alternate addr is enabled by bit 1 of register
4179 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
4180 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
4181 * accordingly before calling npi_mac_altaddr_entry.
4182 */
4183 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4184 addrn = (uint8_t)slot - 1;
4185 else
4186 addrn = (uint8_t)slot;
4187
4188 if (npi_mac_altaddr_enable(nxgep->npi_handle,
4189 nxgep->function_num, addrn) != NPI_SUCCESS) {
4190 return (EIO);
4191 }
4192
4193 return (0);
4194 }
4195
4196 /*
4197 * nxeg_m_mmac_add_g() - find an unused address slot, set the address
4198 * value to the one specified, enable the port to start filtering on
4199 * the new MAC address. Returns 0 on success.
4200 */
4201 int
nxge_m_mmac_add_g(void * arg,const uint8_t * maddr,int rdctbl,boolean_t usetbl)4202 nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
4203 boolean_t usetbl)
4204 {
4205 p_nxge_t nxgep = arg;
4206 int slot;
4207 nxge_mmac_t *mmac_info;
4208 int err;
4209 nxge_status_t status;
4210
4211 mutex_enter(nxgep->genlock);
4212
4213 /*
4214 * Make sure that nxge is initialized, if _start() has
4215 * not been called.
4216 */
4217 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4218 status = nxge_init(nxgep);
4219 if (status != NXGE_OK) {
4220 mutex_exit(nxgep->genlock);
4221 return (ENXIO);
4222 }
4223 }
4224
4225 mmac_info = &nxgep->nxge_mmac_info;
4226 if (mmac_info->naddrfree == 0) {
4227 mutex_exit(nxgep->genlock);
4228 return (ENOSPC);
4229 }
4230
4231 /*
4232 * Search for the first available slot. Because naddrfree
4233 * is not zero, we are guaranteed to find one.
4234 * Each of the first two ports of Neptune has 16 alternate
4235 * MAC slots but only the first 7 (of 15) slots have assigned factory
4236 * MAC addresses. We first search among the slots without bundled
4237 * factory MACs. If we fail to find one in that range, then we
4238 * search the slots with bundled factory MACs. A factory MAC
4239 * will be wasted while the slot is used with a user MAC address.
4240 * But the slot could be used by factory MAC again after calling
4241 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
4242 */
4243 for (slot = 0; slot <= mmac_info->num_mmac; slot++) {
4244 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
4245 break;
4246 }
4247
4248 ASSERT(slot <= mmac_info->num_mmac);
4249
4250 if ((err = nxge_altmac_set(nxgep, (uint8_t *)maddr, slot, rdctbl,
4251 usetbl)) != 0) {
4252 mutex_exit(nxgep->genlock);
4253 return (err);
4254 }
4255
4256 bcopy(maddr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
4257 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
4258 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
4259 mmac_info->naddrfree--;
4260 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4261
4262 mutex_exit(nxgep->genlock);
4263 return (0);
4264 }
4265
4266 /*
4267 * Remove the specified mac address and update the HW not to filter
4268 * the mac address anymore.
4269 */
4270 int
nxge_m_mmac_remove(void * arg,int slot)4271 nxge_m_mmac_remove(void *arg, int slot)
4272 {
4273 p_nxge_t nxgep = arg;
4274 nxge_mmac_t *mmac_info;
4275 uint8_t addrn;
4276 uint8_t portn;
4277 int err = 0;
4278 nxge_status_t status;
4279
4280 mutex_enter(nxgep->genlock);
4281
4282 /*
4283 * Make sure that nxge is initialized, if _start() has
4284 * not been called.
4285 */
4286 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
4287 status = nxge_init(nxgep);
4288 if (status != NXGE_OK) {
4289 mutex_exit(nxgep->genlock);
4290 return (ENXIO);
4291 }
4292 }
4293
4294 mmac_info = &nxgep->nxge_mmac_info;
4295 if (slot < 1 || slot > mmac_info->num_mmac) {
4296 mutex_exit(nxgep->genlock);
4297 return (EINVAL);
4298 }
4299
4300 portn = nxgep->mac.portnum;
4301 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
4302 addrn = (uint8_t)slot - 1;
4303 else
4304 addrn = (uint8_t)slot;
4305
4306 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
4307 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
4308 == NPI_SUCCESS) {
4309 mmac_info->naddrfree++;
4310 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
4311 /*
4312 * Regardless if the MAC we just stopped filtering
4313 * is a user addr or a facory addr, we must set
4314 * the MMAC_VENDOR_ADDR flag if this slot has an
4315 * associated factory MAC to indicate that a factory
4316 * MAC is available.
4317 */
4318 if (slot <= mmac_info->num_factory_mmac) {
4319 mmac_info->mac_pool[slot].flags
4320 |= MMAC_VENDOR_ADDR;
4321 }
4322 /*
4323 * Clear mac_pool[slot].addr so that kstat shows 0
4324 * alternate MAC address if the slot is not used.
4325 * (But nxge_m_mmac_get returns the factory MAC even
4326 * when the slot is not used!)
4327 */
4328 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
4329 nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
4330 } else {
4331 err = EIO;
4332 }
4333 } else {
4334 err = EINVAL;
4335 }
4336
4337 mutex_exit(nxgep->genlock);
4338 return (err);
4339 }
4340
4341 /*
4342 * The callback to query all the factory addresses. naddr must be the same as
4343 * the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
4344 * mcm_addr is the space allocated for keep all the addresses, whose size is
4345 * naddr * MAXMACADDRLEN.
4346 */
4347 static void
nxge_m_getfactaddr(void * arg,uint_t naddr,uint8_t * addr)4348 nxge_m_getfactaddr(void *arg, uint_t naddr, uint8_t *addr)
4349 {
4350 nxge_t *nxgep = arg;
4351 nxge_mmac_t *mmac_info;
4352 int i;
4353
4354 mutex_enter(nxgep->genlock);
4355
4356 mmac_info = &nxgep->nxge_mmac_info;
4357 ASSERT(naddr == mmac_info->num_factory_mmac);
4358
4359 for (i = 0; i < naddr; i++) {
4360 bcopy(mmac_info->factory_mac_pool[i + 1],
4361 addr + i * MAXMACADDRLEN, ETHERADDRL);
4362 }
4363
4364 mutex_exit(nxgep->genlock);
4365 }
4366
4367
4368 static boolean_t
nxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4369 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4370 {
4371 nxge_t *nxgep = arg;
4372 uint32_t *txflags = cap_data;
4373
4374 switch (cap) {
4375 case MAC_CAPAB_HCKSUM:
4376 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4377 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
4378 if (nxge_cksum_offload <= 1) {
4379 *txflags = HCKSUM_INET_PARTIAL;
4380 }
4381 break;
4382
4383 case MAC_CAPAB_MULTIFACTADDR: {
4384 mac_capab_multifactaddr_t *mfacp = cap_data;
4385
4386 if (!isLDOMguest(nxgep)) {
4387 mutex_enter(nxgep->genlock);
4388 mfacp->mcm_naddr =
4389 nxgep->nxge_mmac_info.num_factory_mmac;
4390 mfacp->mcm_getaddr = nxge_m_getfactaddr;
4391 mutex_exit(nxgep->genlock);
4392 }
4393 break;
4394 }
4395
4396 case MAC_CAPAB_LSO: {
4397 mac_capab_lso_t *cap_lso = cap_data;
4398
4399 if (nxgep->soft_lso_enable) {
4400 if (nxge_cksum_offload <= 1) {
4401 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
4402 if (nxge_lso_max > NXGE_LSO_MAXLEN) {
4403 nxge_lso_max = NXGE_LSO_MAXLEN;
4404 }
4405 cap_lso->lso_basic_tcp_ipv4.lso_max =
4406 nxge_lso_max;
4407 }
4408 break;
4409 } else {
4410 return (B_FALSE);
4411 }
4412 }
4413
4414 case MAC_CAPAB_RINGS: {
4415 mac_capab_rings_t *cap_rings = cap_data;
4416 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
4417
4418 mutex_enter(nxgep->genlock);
4419 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
4420 if (isLDOMguest(nxgep)) {
4421 cap_rings->mr_group_type =
4422 MAC_GROUP_TYPE_STATIC;
4423 cap_rings->mr_rnum =
4424 NXGE_HIO_SHARE_MAX_CHANNELS;
4425 cap_rings->mr_rget = nxge_fill_ring;
4426 cap_rings->mr_gnum = 1;
4427 cap_rings->mr_gget = nxge_hio_group_get;
4428 cap_rings->mr_gaddring = NULL;
4429 cap_rings->mr_gremring = NULL;
4430 } else {
4431 /*
4432 * Service Domain.
4433 */
4434 cap_rings->mr_group_type =
4435 MAC_GROUP_TYPE_DYNAMIC;
4436 cap_rings->mr_rnum = p_cfgp->max_rdcs;
4437 cap_rings->mr_rget = nxge_fill_ring;
4438 cap_rings->mr_gnum = p_cfgp->max_rdc_grpids;
4439 cap_rings->mr_gget = nxge_hio_group_get;
4440 cap_rings->mr_gaddring = nxge_group_add_ring;
4441 cap_rings->mr_gremring = nxge_group_rem_ring;
4442 }
4443
4444 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4445 "==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
4446 p_cfgp->max_rdcs, p_cfgp->max_rdc_grpids));
4447 } else {
4448 /*
4449 * TX Rings.
4450 */
4451 if (isLDOMguest(nxgep)) {
4452 cap_rings->mr_group_type =
4453 MAC_GROUP_TYPE_STATIC;
4454 cap_rings->mr_rnum =
4455 NXGE_HIO_SHARE_MAX_CHANNELS;
4456 cap_rings->mr_rget = nxge_fill_ring;
4457 cap_rings->mr_gnum = 0;
4458 cap_rings->mr_gget = NULL;
4459 cap_rings->mr_gaddring = NULL;
4460 cap_rings->mr_gremring = NULL;
4461 } else {
4462 /*
4463 * Service Domain.
4464 */
4465 cap_rings->mr_group_type =
4466 MAC_GROUP_TYPE_DYNAMIC;
4467 cap_rings->mr_rnum = p_cfgp->tdc.count;
4468 cap_rings->mr_rget = nxge_fill_ring;
4469
4470 /*
4471 * Share capable.
4472 *
4473 * Do not report the default group: hence -1
4474 */
4475 cap_rings->mr_gnum =
4476 NXGE_MAX_TDC_GROUPS / nxgep->nports - 1;
4477 cap_rings->mr_gget = nxge_hio_group_get;
4478 cap_rings->mr_gaddring = nxge_group_add_ring;
4479 cap_rings->mr_gremring = nxge_group_rem_ring;
4480 }
4481
4482 NXGE_DEBUG_MSG((nxgep, TX_CTL,
4483 "==> nxge_m_getcapab: tx rings # of rings %d",
4484 p_cfgp->tdc.count));
4485 }
4486 mutex_exit(nxgep->genlock);
4487 break;
4488 }
4489
4490 #if defined(sun4v)
4491 case MAC_CAPAB_SHARES: {
4492 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data;
4493
4494 /*
4495 * Only the service domain driver responds to
4496 * this capability request.
4497 */
4498 mutex_enter(nxgep->genlock);
4499 if (isLDOMservice(nxgep)) {
4500 mshares->ms_snum = 3;
4501 mshares->ms_handle = (void *)nxgep;
4502 mshares->ms_salloc = nxge_hio_share_alloc;
4503 mshares->ms_sfree = nxge_hio_share_free;
4504 mshares->ms_sadd = nxge_hio_share_add_group;
4505 mshares->ms_sremove = nxge_hio_share_rem_group;
4506 mshares->ms_squery = nxge_hio_share_query;
4507 mshares->ms_sbind = nxge_hio_share_bind;
4508 mshares->ms_sunbind = nxge_hio_share_unbind;
4509 mutex_exit(nxgep->genlock);
4510 } else {
4511 mutex_exit(nxgep->genlock);
4512 return (B_FALSE);
4513 }
4514 break;
4515 }
4516 #endif
4517 default:
4518 return (B_FALSE);
4519 }
4520 return (B_TRUE);
4521 }
4522
4523 static boolean_t
nxge_param_locked(mac_prop_id_t pr_num)4524 nxge_param_locked(mac_prop_id_t pr_num)
4525 {
4526 /*
4527 * All adv_* parameters are locked (read-only) while
4528 * the device is in any sort of loopback mode ...
4529 */
4530 switch (pr_num) {
4531 case MAC_PROP_ADV_1000FDX_CAP:
4532 case MAC_PROP_EN_1000FDX_CAP:
4533 case MAC_PROP_ADV_1000HDX_CAP:
4534 case MAC_PROP_EN_1000HDX_CAP:
4535 case MAC_PROP_ADV_100FDX_CAP:
4536 case MAC_PROP_EN_100FDX_CAP:
4537 case MAC_PROP_ADV_100HDX_CAP:
4538 case MAC_PROP_EN_100HDX_CAP:
4539 case MAC_PROP_ADV_10FDX_CAP:
4540 case MAC_PROP_EN_10FDX_CAP:
4541 case MAC_PROP_ADV_10HDX_CAP:
4542 case MAC_PROP_EN_10HDX_CAP:
4543 case MAC_PROP_AUTONEG:
4544 case MAC_PROP_FLOWCTRL:
4545 return (B_TRUE);
4546 }
4547 return (B_FALSE);
4548 }
4549
4550 /*
4551 * callback functions for set/get of properties
4552 */
4553 static int
nxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)4554 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4555 uint_t pr_valsize, const void *pr_val)
4556 {
4557 nxge_t *nxgep = barg;
4558 p_nxge_param_t param_arr = nxgep->param_arr;
4559 p_nxge_stats_t statsp = nxgep->statsp;
4560 int err = 0;
4561
4562 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop"));
4563
4564 mutex_enter(nxgep->genlock);
4565 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4566 nxge_param_locked(pr_num)) {
4567 /*
4568 * All adv_* parameters are locked (read-only)
4569 * while the device is in any sort of loopback mode.
4570 */
4571 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4572 "==> nxge_m_setprop: loopback mode: read only"));
4573 mutex_exit(nxgep->genlock);
4574 return (EBUSY);
4575 }
4576
4577 switch (pr_num) {
4578 case MAC_PROP_EN_1000FDX_CAP:
4579 nxgep->param_en_1000fdx =
4580 param_arr[param_anar_1000fdx].value = *(uint8_t *)pr_val;
4581 goto reprogram;
4582
4583 case MAC_PROP_EN_100FDX_CAP:
4584 nxgep->param_en_100fdx =
4585 param_arr[param_anar_100fdx].value = *(uint8_t *)pr_val;
4586 goto reprogram;
4587
4588 case MAC_PROP_EN_10FDX_CAP:
4589 nxgep->param_en_10fdx =
4590 param_arr[param_anar_10fdx].value = *(uint8_t *)pr_val;
4591 goto reprogram;
4592
4593 case MAC_PROP_AUTONEG:
4594 param_arr[param_autoneg].value = *(uint8_t *)pr_val;
4595 goto reprogram;
4596
4597 case MAC_PROP_MTU: {
4598 uint32_t cur_mtu, new_mtu, old_framesize;
4599
4600 cur_mtu = nxgep->mac.default_mtu;
4601 ASSERT(pr_valsize >= sizeof (new_mtu));
4602 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
4603
4604 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4605 "==> nxge_m_setprop: set MTU: %d is_jumbo %d",
4606 new_mtu, nxgep->mac.is_jumbo));
4607
4608 if (new_mtu == cur_mtu) {
4609 err = 0;
4610 break;
4611 }
4612
4613 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) {
4614 err = EBUSY;
4615 break;
4616 }
4617
4618 if ((new_mtu < NXGE_DEFAULT_MTU) ||
4619 (new_mtu > NXGE_MAXIMUM_MTU)) {
4620 err = EINVAL;
4621 break;
4622 }
4623
4624 old_framesize = (uint32_t)nxgep->mac.maxframesize;
4625 nxgep->mac.maxframesize = (uint16_t)
4626 (new_mtu + NXGE_EHEADER_VLAN_CRC);
4627 if (nxge_mac_set_framesize(nxgep)) {
4628 nxgep->mac.maxframesize =
4629 (uint16_t)old_framesize;
4630 err = EINVAL;
4631 break;
4632 }
4633
4634 nxgep->mac.default_mtu = new_mtu;
4635 nxgep->mac.is_jumbo = (new_mtu > NXGE_DEFAULT_MTU);
4636
4637 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4638 "==> nxge_m_setprop: set MTU: %d maxframe %d",
4639 new_mtu, nxgep->mac.maxframesize));
4640 break;
4641 }
4642
4643 case MAC_PROP_FLOWCTRL: {
4644 link_flowctrl_t fl;
4645
4646 ASSERT(pr_valsize >= sizeof (fl));
4647 bcopy(pr_val, &fl, sizeof (fl));
4648
4649 switch (fl) {
4650 case LINK_FLOWCTRL_NONE:
4651 param_arr[param_anar_pause].value = 0;
4652 break;
4653
4654 case LINK_FLOWCTRL_RX:
4655 param_arr[param_anar_pause].value = 1;
4656 break;
4657
4658 case LINK_FLOWCTRL_TX:
4659 case LINK_FLOWCTRL_BI:
4660 err = EINVAL;
4661 break;
4662 default:
4663 err = EINVAL;
4664 break;
4665 }
4666 reprogram:
4667 if ((err == 0) && !isLDOMguest(nxgep)) {
4668 if (!nxge_param_link_update(nxgep)) {
4669 err = EINVAL;
4670 }
4671 } else {
4672 err = EINVAL;
4673 }
4674 break;
4675 }
4676
4677 case MAC_PROP_PRIVATE:
4678 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4679 "==> nxge_m_setprop: private property"));
4680 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, pr_val);
4681 break;
4682
4683 default:
4684 err = ENOTSUP;
4685 break;
4686 }
4687
4688 mutex_exit(nxgep->genlock);
4689
4690 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4691 "<== nxge_m_setprop (return %d)", err));
4692 return (err);
4693 }
4694
4695 static int
nxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)4696 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4697 uint_t pr_valsize, void *pr_val)
4698 {
4699 nxge_t *nxgep = barg;
4700 p_nxge_param_t param_arr = nxgep->param_arr;
4701 p_nxge_stats_t statsp = nxgep->statsp;
4702
4703 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4704 "==> nxge_m_getprop: pr_num %d", pr_num));
4705
4706 switch (pr_num) {
4707 case MAC_PROP_DUPLEX:
4708 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
4709 break;
4710
4711 case MAC_PROP_SPEED: {
4712 uint64_t val = statsp->mac_stats.link_speed * 1000000ull;
4713
4714 ASSERT(pr_valsize >= sizeof (val));
4715 bcopy(&val, pr_val, sizeof (val));
4716 break;
4717 }
4718
4719 case MAC_PROP_STATUS: {
4720 link_state_t state = statsp->mac_stats.link_up ?
4721 LINK_STATE_UP : LINK_STATE_DOWN;
4722
4723 ASSERT(pr_valsize >= sizeof (state));
4724 bcopy(&state, pr_val, sizeof (state));
4725 break;
4726 }
4727
4728 case MAC_PROP_AUTONEG:
4729 *(uint8_t *)pr_val = param_arr[param_autoneg].value;
4730 break;
4731
4732 case MAC_PROP_FLOWCTRL: {
4733 link_flowctrl_t fl = param_arr[param_anar_pause].value != 0 ?
4734 LINK_FLOWCTRL_RX : LINK_FLOWCTRL_NONE;
4735
4736 ASSERT(pr_valsize >= sizeof (fl));
4737 bcopy(&fl, pr_val, sizeof (fl));
4738 break;
4739 }
4740
4741 case MAC_PROP_ADV_1000FDX_CAP:
4742 *(uint8_t *)pr_val = param_arr[param_anar_1000fdx].value;
4743 break;
4744
4745 case MAC_PROP_EN_1000FDX_CAP:
4746 *(uint8_t *)pr_val = nxgep->param_en_1000fdx;
4747 break;
4748
4749 case MAC_PROP_ADV_100FDX_CAP:
4750 *(uint8_t *)pr_val = param_arr[param_anar_100fdx].value;
4751 break;
4752
4753 case MAC_PROP_EN_100FDX_CAP:
4754 *(uint8_t *)pr_val = nxgep->param_en_100fdx;
4755 break;
4756
4757 case MAC_PROP_ADV_10FDX_CAP:
4758 *(uint8_t *)pr_val = param_arr[param_anar_10fdx].value;
4759 break;
4760
4761 case MAC_PROP_EN_10FDX_CAP:
4762 *(uint8_t *)pr_val = nxgep->param_en_10fdx;
4763 break;
4764
4765 case MAC_PROP_PRIVATE:
4766 return (nxge_get_priv_prop(nxgep, pr_name, pr_valsize,
4767 pr_val));
4768
4769 default:
4770 return (ENOTSUP);
4771 }
4772
4773 return (0);
4774 }
4775
4776 static void
nxge_m_propinfo(void * barg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)4777 nxge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
4778 mac_prop_info_handle_t prh)
4779 {
4780 nxge_t *nxgep = barg;
4781 p_nxge_stats_t statsp = nxgep->statsp;
4782
4783 /*
4784 * By default permissions are read/write unless specified
4785 * otherwise by the driver.
4786 */
4787
4788 switch (pr_num) {
4789 case MAC_PROP_DUPLEX:
4790 case MAC_PROP_SPEED:
4791 case MAC_PROP_STATUS:
4792 case MAC_PROP_EN_1000HDX_CAP:
4793 case MAC_PROP_EN_100HDX_CAP:
4794 case MAC_PROP_EN_10HDX_CAP:
4795 case MAC_PROP_ADV_1000FDX_CAP:
4796 case MAC_PROP_ADV_1000HDX_CAP:
4797 case MAC_PROP_ADV_100FDX_CAP:
4798 case MAC_PROP_ADV_100HDX_CAP:
4799 case MAC_PROP_ADV_10FDX_CAP:
4800 case MAC_PROP_ADV_10HDX_CAP:
4801 /*
4802 * Note that read-only properties don't need to
4803 * provide default values since they cannot be
4804 * changed by the administrator.
4805 */
4806 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4807 break;
4808
4809 case MAC_PROP_EN_1000FDX_CAP:
4810 case MAC_PROP_EN_100FDX_CAP:
4811 case MAC_PROP_EN_10FDX_CAP:
4812 mac_prop_info_set_default_uint8(prh, 1);
4813 break;
4814
4815 case MAC_PROP_AUTONEG:
4816 mac_prop_info_set_default_uint8(prh, 1);
4817 break;
4818
4819 case MAC_PROP_FLOWCTRL:
4820 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_RX);
4821 break;
4822
4823 case MAC_PROP_MTU:
4824 mac_prop_info_set_range_uint32(prh,
4825 NXGE_DEFAULT_MTU, NXGE_MAXIMUM_MTU);
4826 break;
4827
4828 case MAC_PROP_PRIVATE:
4829 nxge_priv_propinfo(pr_name, prh);
4830 break;
4831 }
4832
4833 mutex_enter(nxgep->genlock);
4834 if (statsp->port_stats.lb_mode != nxge_lb_normal &&
4835 nxge_param_locked(pr_num)) {
4836 /*
4837 * Some properties are locked (read-only) while the
4838 * device is in any sort of loopback mode.
4839 */
4840 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4841 }
4842 mutex_exit(nxgep->genlock);
4843 }
4844
4845 static void
nxge_priv_propinfo(const char * pr_name,mac_prop_info_handle_t prh)4846 nxge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t prh)
4847 {
4848 char valstr[64];
4849
4850 bzero(valstr, sizeof (valstr));
4851
4852 if (strcmp(pr_name, "_function_number") == 0 ||
4853 strcmp(pr_name, "_fw_version") == 0 ||
4854 strcmp(pr_name, "_port_mode") == 0 ||
4855 strcmp(pr_name, "_hot_swap_phy") == 0) {
4856 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
4857
4858 } else if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4859 (void) snprintf(valstr, sizeof (valstr),
4860 "%d", RXDMA_RCR_TO_DEFAULT);
4861
4862 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4863 (void) snprintf(valstr, sizeof (valstr),
4864 "%d", RXDMA_RCR_PTHRES_DEFAULT);
4865
4866 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
4867 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
4868 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
4869 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
4870 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
4871 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
4872 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
4873 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
4874 (void) snprintf(valstr, sizeof (valstr), "%x",
4875 NXGE_CLASS_FLOW_GEN_SERVER);
4876
4877 } else if (strcmp(pr_name, "_soft_lso_enable") == 0) {
4878 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
4879
4880 } else if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
4881 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4882
4883 } else if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4884 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
4885 }
4886
4887 if (strlen(valstr) > 0)
4888 mac_prop_info_set_default_str(prh, valstr);
4889 }
4890
4891 /* ARGSUSED */
4892 static int
nxge_set_priv_prop(p_nxge_t nxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)4893 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
4894 const void *pr_val)
4895 {
4896 p_nxge_param_t param_arr = nxgep->param_arr;
4897 int err = 0;
4898 long result;
4899
4900 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4901 "==> nxge_set_priv_prop: name %s", pr_name));
4902
4903 /* Blanking */
4904 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
4905 err = nxge_param_rx_intr_time(nxgep, NULL, NULL,
4906 (char *)pr_val,
4907 (caddr_t)¶m_arr[param_rxdma_intr_time]);
4908 if (err) {
4909 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4910 "<== nxge_set_priv_prop: "
4911 "unable to set (%s)", pr_name));
4912 err = EINVAL;
4913 } else {
4914 err = 0;
4915 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4916 "<== nxge_set_priv_prop: "
4917 "set (%s)", pr_name));
4918 }
4919
4920 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4921 "<== nxge_set_priv_prop: name %s (value %d)",
4922 pr_name, result));
4923
4924 return (err);
4925 }
4926
4927 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
4928 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL,
4929 (char *)pr_val,
4930 (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
4931 if (err) {
4932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4933 "<== nxge_set_priv_prop: "
4934 "unable to set (%s)", pr_name));
4935 err = EINVAL;
4936 } else {
4937 err = 0;
4938 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4939 "<== nxge_set_priv_prop: "
4940 "set (%s)", pr_name));
4941 }
4942
4943 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4944 "<== nxge_set_priv_prop: name %s (value %d)",
4945 pr_name, result));
4946
4947 return (err);
4948 }
4949
4950 /* Classification */
4951 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
4952 if (pr_val == NULL) {
4953 err = EINVAL;
4954 return (err);
4955 }
4956 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4957
4958 err = nxge_param_set_ip_opt(nxgep, NULL,
4959 NULL, (char *)pr_val,
4960 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
4961
4962 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4963 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4964 pr_name, result));
4965
4966 return (err);
4967 }
4968
4969 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
4970 if (pr_val == NULL) {
4971 err = EINVAL;
4972 return (err);
4973 }
4974 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4975
4976 err = nxge_param_set_ip_opt(nxgep, NULL,
4977 NULL, (char *)pr_val,
4978 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
4979
4980 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4981 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4982 pr_name, result));
4983
4984 return (err);
4985 }
4986 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
4987 if (pr_val == NULL) {
4988 err = EINVAL;
4989 return (err);
4990 }
4991 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4992
4993 err = nxge_param_set_ip_opt(nxgep, NULL,
4994 NULL, (char *)pr_val,
4995 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
4996
4997 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
4998 "<== nxge_set_priv_prop: name %s (value 0x%x)",
4999 pr_name, result));
5000
5001 return (err);
5002 }
5003 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5004 if (pr_val == NULL) {
5005 err = EINVAL;
5006 return (err);
5007 }
5008 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5009
5010 err = nxge_param_set_ip_opt(nxgep, NULL,
5011 NULL, (char *)pr_val,
5012 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5013
5014 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5015 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5016 pr_name, result));
5017
5018 return (err);
5019 }
5020
5021 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5022 if (pr_val == NULL) {
5023 err = EINVAL;
5024 return (err);
5025 }
5026 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5027
5028 err = nxge_param_set_ip_opt(nxgep, NULL,
5029 NULL, (char *)pr_val,
5030 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5031
5032 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5033 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5034 pr_name, result));
5035
5036 return (err);
5037 }
5038
5039 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5040 if (pr_val == NULL) {
5041 err = EINVAL;
5042 return (err);
5043 }
5044 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5045
5046 err = nxge_param_set_ip_opt(nxgep, NULL,
5047 NULL, (char *)pr_val,
5048 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5049
5050 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5051 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5052 pr_name, result));
5053
5054 return (err);
5055 }
5056 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5057 if (pr_val == NULL) {
5058 err = EINVAL;
5059 return (err);
5060 }
5061 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5062
5063 err = nxge_param_set_ip_opt(nxgep, NULL,
5064 NULL, (char *)pr_val,
5065 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5066
5067 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5068 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5069 pr_name, result));
5070
5071 return (err);
5072 }
5073 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5074 if (pr_val == NULL) {
5075 err = EINVAL;
5076 return (err);
5077 }
5078 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5079
5080 err = nxge_param_set_ip_opt(nxgep, NULL,
5081 NULL, (char *)pr_val,
5082 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5083
5084 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5085 "<== nxge_set_priv_prop: name %s (value 0x%x)",
5086 pr_name, result));
5087
5088 return (err);
5089 }
5090
5091 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5092 if (pr_val == NULL) {
5093 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5094 "==> nxge_set_priv_prop: name %s (null)", pr_name));
5095 err = EINVAL;
5096 return (err);
5097 }
5098
5099 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
5100 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5101 "<== nxge_set_priv_prop: name %s "
5102 "(lso %d pr_val %s value %d)",
5103 pr_name, nxgep->soft_lso_enable, pr_val, result));
5104
5105 if (result > 1 || result < 0) {
5106 err = EINVAL;
5107 } else {
5108 if (nxgep->soft_lso_enable == (uint32_t)result) {
5109 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5110 "no change (%d %d)",
5111 nxgep->soft_lso_enable, result));
5112 return (0);
5113 }
5114 }
5115
5116 nxgep->soft_lso_enable = (int)result;
5117
5118 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5119 "<== nxge_set_priv_prop: name %s (value %d)",
5120 pr_name, result));
5121
5122 return (err);
5123 }
5124 /*
5125 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the
5126 * following code to be executed.
5127 */
5128 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5129 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5130 (caddr_t)¶m_arr[param_anar_10gfdx]);
5131 return (err);
5132 }
5133 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5134 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val,
5135 (caddr_t)¶m_arr[param_anar_pause]);
5136 return (err);
5137 }
5138
5139 return (EINVAL);
5140 }
5141
5142 static int
nxge_get_priv_prop(p_nxge_t nxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)5143 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize,
5144 void *pr_val)
5145 {
5146 p_nxge_param_t param_arr = nxgep->param_arr;
5147 char valstr[MAXNAMELEN];
5148 int err = EINVAL;
5149 uint_t strsize;
5150
5151 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5152 "==> nxge_get_priv_prop: property %s", pr_name));
5153
5154 /* function number */
5155 if (strcmp(pr_name, "_function_number") == 0) {
5156 (void) snprintf(valstr, sizeof (valstr), "%d",
5157 nxgep->function_num);
5158 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5159 "==> nxge_get_priv_prop: name %s "
5160 "(value %d valstr %s)",
5161 pr_name, nxgep->function_num, valstr));
5162
5163 err = 0;
5164 goto done;
5165 }
5166
5167 /* Neptune firmware version */
5168 if (strcmp(pr_name, "_fw_version") == 0) {
5169 (void) snprintf(valstr, sizeof (valstr), "%s",
5170 nxgep->vpd_info.ver);
5171 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5172 "==> nxge_get_priv_prop: name %s "
5173 "(value %d valstr %s)",
5174 pr_name, nxgep->vpd_info.ver, valstr));
5175
5176 err = 0;
5177 goto done;
5178 }
5179
5180 /* port PHY mode */
5181 if (strcmp(pr_name, "_port_mode") == 0) {
5182 switch (nxgep->mac.portmode) {
5183 case PORT_1G_COPPER:
5184 (void) snprintf(valstr, sizeof (valstr), "1G copper %s",
5185 nxgep->hot_swappable_phy ?
5186 "[Hot Swappable]" : "");
5187 break;
5188 case PORT_1G_FIBER:
5189 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s",
5190 nxgep->hot_swappable_phy ?
5191 "[hot swappable]" : "");
5192 break;
5193 case PORT_10G_COPPER:
5194 (void) snprintf(valstr, sizeof (valstr),
5195 "10G copper %s",
5196 nxgep->hot_swappable_phy ?
5197 "[hot swappable]" : "");
5198 break;
5199 case PORT_10G_FIBER:
5200 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s",
5201 nxgep->hot_swappable_phy ?
5202 "[hot swappable]" : "");
5203 break;
5204 case PORT_10G_SERDES:
5205 (void) snprintf(valstr, sizeof (valstr),
5206 "10G serdes %s", nxgep->hot_swappable_phy ?
5207 "[hot swappable]" : "");
5208 break;
5209 case PORT_1G_SERDES:
5210 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s",
5211 nxgep->hot_swappable_phy ?
5212 "[hot swappable]" : "");
5213 break;
5214 case PORT_1G_TN1010:
5215 (void) snprintf(valstr, sizeof (valstr),
5216 "1G TN1010 copper %s", nxgep->hot_swappable_phy ?
5217 "[hot swappable]" : "");
5218 break;
5219 case PORT_10G_TN1010:
5220 (void) snprintf(valstr, sizeof (valstr),
5221 "10G TN1010 copper %s", nxgep->hot_swappable_phy ?
5222 "[hot swappable]" : "");
5223 break;
5224 case PORT_1G_RGMII_FIBER:
5225 (void) snprintf(valstr, sizeof (valstr),
5226 "1G rgmii fiber %s", nxgep->hot_swappable_phy ?
5227 "[hot swappable]" : "");
5228 break;
5229 case PORT_HSP_MODE:
5230 (void) snprintf(valstr, sizeof (valstr),
5231 "phy not present[hot swappable]");
5232 break;
5233 default:
5234 (void) snprintf(valstr, sizeof (valstr), "unknown %s",
5235 nxgep->hot_swappable_phy ?
5236 "[hot swappable]" : "");
5237 break;
5238 }
5239
5240 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5241 "==> nxge_get_priv_prop: name %s (value %s)",
5242 pr_name, valstr));
5243
5244 err = 0;
5245 goto done;
5246 }
5247
5248 /* Hot swappable PHY */
5249 if (strcmp(pr_name, "_hot_swap_phy") == 0) {
5250 (void) snprintf(valstr, sizeof (valstr), "%s",
5251 nxgep->hot_swappable_phy ?
5252 "yes" : "no");
5253
5254 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5255 "==> nxge_get_priv_prop: name %s "
5256 "(value %d valstr %s)",
5257 pr_name, nxgep->hot_swappable_phy, valstr));
5258
5259 err = 0;
5260 goto done;
5261 }
5262
5263
5264 /* Receive Interrupt Blanking Parameters */
5265 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
5266 err = 0;
5267 (void) snprintf(valstr, sizeof (valstr), "%d",
5268 nxgep->intr_timeout);
5269 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5270 "==> nxge_get_priv_prop: name %s (value %d)",
5271 pr_name,
5272 (uint32_t)nxgep->intr_timeout));
5273 goto done;
5274 }
5275
5276 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
5277 err = 0;
5278 (void) snprintf(valstr, sizeof (valstr), "%d",
5279 nxgep->intr_threshold);
5280 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5281 "==> nxge_get_priv_prop: name %s (value %d)",
5282 pr_name, (uint32_t)nxgep->intr_threshold));
5283
5284 goto done;
5285 }
5286
5287 /* Classification and Load Distribution Configuration */
5288 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
5289 err = nxge_dld_get_ip_opt(nxgep,
5290 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
5291
5292 (void) snprintf(valstr, sizeof (valstr), "%x",
5293 (int)param_arr[param_class_opt_ipv4_tcp].value);
5294
5295 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5296 "==> nxge_get_priv_prop: %s", valstr));
5297 goto done;
5298 }
5299
5300 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
5301 err = nxge_dld_get_ip_opt(nxgep,
5302 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
5303
5304 (void) snprintf(valstr, sizeof (valstr), "%x",
5305 (int)param_arr[param_class_opt_ipv4_udp].value);
5306
5307 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5308 "==> nxge_get_priv_prop: %s", valstr));
5309 goto done;
5310 }
5311 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
5312 err = nxge_dld_get_ip_opt(nxgep,
5313 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
5314
5315 (void) snprintf(valstr, sizeof (valstr), "%x",
5316 (int)param_arr[param_class_opt_ipv4_ah].value);
5317
5318 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5319 "==> nxge_get_priv_prop: %s", valstr));
5320 goto done;
5321 }
5322
5323 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
5324 err = nxge_dld_get_ip_opt(nxgep,
5325 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
5326
5327 (void) snprintf(valstr, sizeof (valstr), "%x",
5328 (int)param_arr[param_class_opt_ipv4_sctp].value);
5329
5330 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5331 "==> nxge_get_priv_prop: %s", valstr));
5332 goto done;
5333 }
5334
5335 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
5336 err = nxge_dld_get_ip_opt(nxgep,
5337 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
5338
5339 (void) snprintf(valstr, sizeof (valstr), "%x",
5340 (int)param_arr[param_class_opt_ipv6_tcp].value);
5341
5342 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5343 "==> nxge_get_priv_prop: %s", valstr));
5344 goto done;
5345 }
5346
5347 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
5348 err = nxge_dld_get_ip_opt(nxgep,
5349 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
5350
5351 (void) snprintf(valstr, sizeof (valstr), "%x",
5352 (int)param_arr[param_class_opt_ipv6_udp].value);
5353
5354 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5355 "==> nxge_get_priv_prop: %s", valstr));
5356 goto done;
5357 }
5358
5359 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
5360 err = nxge_dld_get_ip_opt(nxgep,
5361 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
5362
5363 (void) snprintf(valstr, sizeof (valstr), "%x",
5364 (int)param_arr[param_class_opt_ipv6_ah].value);
5365
5366 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5367 "==> nxge_get_priv_prop: %s", valstr));
5368 goto done;
5369 }
5370
5371 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
5372 err = nxge_dld_get_ip_opt(nxgep,
5373 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
5374
5375 (void) snprintf(valstr, sizeof (valstr), "%x",
5376 (int)param_arr[param_class_opt_ipv6_sctp].value);
5377
5378 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5379 "==> nxge_get_priv_prop: %s", valstr));
5380 goto done;
5381 }
5382
5383 /* Software LSO */
5384 if (strcmp(pr_name, "_soft_lso_enable") == 0) {
5385 (void) snprintf(valstr, sizeof (valstr),
5386 "%d", nxgep->soft_lso_enable);
5387 err = 0;
5388 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5389 "==> nxge_get_priv_prop: name %s (value %d)",
5390 pr_name, nxgep->soft_lso_enable));
5391
5392 goto done;
5393 }
5394 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) {
5395 err = 0;
5396 if (nxgep->param_arr[param_anar_10gfdx].value != 0) {
5397 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5398 goto done;
5399 } else {
5400 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5401 goto done;
5402 }
5403 }
5404 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
5405 err = 0;
5406 if (nxgep->param_arr[param_anar_pause].value != 0) {
5407 (void) snprintf(valstr, sizeof (valstr), "%d", 1);
5408 goto done;
5409 } else {
5410 (void) snprintf(valstr, sizeof (valstr), "%d", 0);
5411 goto done;
5412 }
5413 }
5414
5415 done:
5416 if (err == 0) {
5417 strsize = (uint_t)strlen(valstr);
5418 if (pr_valsize < strsize) {
5419 err = ENOBUFS;
5420 } else {
5421 (void) strlcpy(pr_val, valstr, pr_valsize);
5422 }
5423 }
5424
5425 NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
5426 "<== nxge_get_priv_prop: return %d", err));
5427 return (err);
5428 }
5429
5430 /*
5431 * Module loading and removing entry points.
5432 */
5433
5434 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach,
5435 nodev, NULL, D_MP, NULL, nxge_quiesce);
5436
5437 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet"
5438
5439 /*
5440 * Module linkage information for the kernel.
5441 */
5442 static struct modldrv nxge_modldrv = {
5443 &mod_driverops,
5444 NXGE_DESC_VER,
5445 &nxge_dev_ops
5446 };
5447
5448 static struct modlinkage modlinkage = {
5449 MODREV_1, (void *) &nxge_modldrv, NULL
5450 };
5451
5452 int
_init(void)5453 _init(void)
5454 {
5455 int status;
5456
5457 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
5458
5459 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
5460
5461 mac_init_ops(&nxge_dev_ops, "nxge");
5462
5463 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
5464 if (status != 0) {
5465 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
5466 "failed to init device soft state"));
5467 goto _init_exit;
5468 }
5469
5470 status = mod_install(&modlinkage);
5471 if (status != 0) {
5472 ddi_soft_state_fini(&nxge_list);
5473 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
5474 goto _init_exit;
5475 }
5476
5477 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
5478
5479 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5480 return (status);
5481
5482 _init_exit:
5483 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _init status = 0x%X", status));
5484 MUTEX_DESTROY(&nxgedebuglock);
5485 return (status);
5486 }
5487
5488 int
_fini(void)5489 _fini(void)
5490 {
5491 int status;
5492
5493 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
5494 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
5495
5496 if (nxge_mblks_pending)
5497 return (EBUSY);
5498
5499 status = mod_remove(&modlinkage);
5500 if (status != DDI_SUCCESS) {
5501 NXGE_DEBUG_MSG((NULL, MOD_CTL,
5502 "Module removal failed 0x%08x",
5503 status));
5504 goto _fini_exit;
5505 }
5506
5507 mac_fini_ops(&nxge_dev_ops);
5508
5509 ddi_soft_state_fini(&nxge_list);
5510
5511 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5512
5513 MUTEX_DESTROY(&nxge_common_lock);
5514 MUTEX_DESTROY(&nxgedebuglock);
5515 return (status);
5516
5517 _fini_exit:
5518 NXGE_DEBUG_MSG((NULL, MOD_CTL, "<== _fini status = 0x%08x", status));
5519 return (status);
5520 }
5521
5522 int
_info(struct modinfo * modinfop)5523 _info(struct modinfo *modinfop)
5524 {
5525 int status;
5526
5527 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
5528 status = mod_info(&modlinkage, modinfop);
5529 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
5530
5531 return (status);
5532 }
5533
5534 /*ARGSUSED*/
5535 static int
nxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)5536 nxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5537 {
5538 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5539 p_nxge_t nxgep = rhp->nxgep;
5540 uint32_t channel;
5541 p_tx_ring_t ring;
5542
5543 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5544 ring = nxgep->tx_rings->rings[channel];
5545
5546 MUTEX_ENTER(&ring->lock);
5547 ASSERT(ring->tx_ring_handle == NULL);
5548 ring->tx_ring_handle = rhp->ring_handle;
5549 MUTEX_EXIT(&ring->lock);
5550
5551 return (0);
5552 }
5553
5554 static void
nxge_tx_ring_stop(mac_ring_driver_t rdriver)5555 nxge_tx_ring_stop(mac_ring_driver_t rdriver)
5556 {
5557 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5558 p_nxge_t nxgep = rhp->nxgep;
5559 uint32_t channel;
5560 p_tx_ring_t ring;
5561
5562 channel = nxgep->pt_config.hw_config.tdc.start + rhp->index;
5563 ring = nxgep->tx_rings->rings[channel];
5564
5565 MUTEX_ENTER(&ring->lock);
5566 ASSERT(ring->tx_ring_handle != NULL);
5567 ring->tx_ring_handle = (mac_ring_handle_t)NULL;
5568 MUTEX_EXIT(&ring->lock);
5569 }
5570
5571 int
nxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)5572 nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
5573 {
5574 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5575 p_nxge_t nxgep = rhp->nxgep;
5576 uint32_t channel;
5577 p_rx_rcr_ring_t ring;
5578 int i;
5579
5580 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5581 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5582
5583 MUTEX_ENTER(&ring->lock);
5584
5585 if (ring->started) {
5586 ASSERT(ring->started == B_FALSE);
5587 MUTEX_EXIT(&ring->lock);
5588 return (0);
5589 }
5590
5591 /* set rcr_ring */
5592 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5593 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5594 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5595 ring->ldvp = &nxgep->ldgvp->ldvp[i];
5596 ring->ldgp = nxgep->ldgvp->ldvp[i].ldgp;
5597 }
5598 }
5599
5600 ring->rcr_mac_handle = rhp->ring_handle;
5601 ring->rcr_gen_num = mr_gen_num;
5602 ring->started = B_TRUE;
5603 rhp->ring_gen_num = mr_gen_num;
5604 MUTEX_EXIT(&ring->lock);
5605
5606 return (0);
5607 }
5608
5609 static void
nxge_rx_ring_stop(mac_ring_driver_t rdriver)5610 nxge_rx_ring_stop(mac_ring_driver_t rdriver)
5611 {
5612 p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver;
5613 p_nxge_t nxgep = rhp->nxgep;
5614 uint32_t channel;
5615 p_rx_rcr_ring_t ring;
5616
5617 channel = nxgep->pt_config.hw_config.start_rdc + rhp->index;
5618 ring = nxgep->rx_rcr_rings->rcr_rings[channel];
5619
5620 MUTEX_ENTER(&ring->lock);
5621 ASSERT(ring->started == B_TRUE);
5622 ring->rcr_mac_handle = NULL;
5623 ring->ldvp = NULL;
5624 ring->ldgp = NULL;
5625 ring->started = B_FALSE;
5626 MUTEX_EXIT(&ring->lock);
5627 }
5628
5629 static int
nxge_ring_get_htable_idx(p_nxge_t nxgep,mac_ring_type_t type,uint32_t channel)5630 nxge_ring_get_htable_idx(p_nxge_t nxgep, mac_ring_type_t type, uint32_t channel)
5631 {
5632 int i;
5633
5634 #if defined(sun4v)
5635 if (isLDOMguest(nxgep)) {
5636 return (nxge_hio_get_dc_htable_idx(nxgep,
5637 (type == MAC_RING_TYPE_TX) ? VP_BOUND_TX : VP_BOUND_RX,
5638 channel));
5639 }
5640 #endif
5641
5642 ASSERT(nxgep->ldgvp != NULL);
5643
5644 switch (type) {
5645 case MAC_RING_TYPE_TX:
5646 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5647 if ((nxgep->ldgvp->ldvp[i].is_txdma) &&
5648 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5649 return ((int)
5650 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5651 }
5652 }
5653 break;
5654
5655 case MAC_RING_TYPE_RX:
5656 for (i = 0; i < nxgep->ldgvp->maxldvs; i++) {
5657 if ((nxgep->ldgvp->ldvp[i].is_rxdma) &&
5658 (nxgep->ldgvp->ldvp[i].channel == channel)) {
5659 return ((int)
5660 nxgep->ldgvp->ldvp[i].ldgp->htable_idx);
5661 }
5662 }
5663 }
5664
5665 return (-1);
5666 }
5667
5668 /*
5669 * Callback funtion for MAC layer to register all rings.
5670 */
5671 static void
nxge_fill_ring(void * arg,mac_ring_type_t rtype,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)5672 nxge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
5673 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5674 {
5675 p_nxge_t nxgep = (p_nxge_t)arg;
5676 p_nxge_hw_pt_cfg_t p_cfgp = &nxgep->pt_config.hw_config;
5677 p_nxge_intr_t intrp;
5678 uint32_t channel;
5679 int htable_idx;
5680 p_nxge_ring_handle_t rhandlep;
5681
5682 ASSERT(nxgep != NULL);
5683 ASSERT(p_cfgp != NULL);
5684 ASSERT(infop != NULL);
5685
5686 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5687 "==> nxge_fill_ring 0x%x index %d", rtype, index));
5688
5689
5690 switch (rtype) {
5691 case MAC_RING_TYPE_TX: {
5692 mac_intr_t *mintr = &infop->mri_intr;
5693
5694 NXGE_DEBUG_MSG((nxgep, TX_CTL,
5695 "==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
5696 rtype, index, p_cfgp->tdc.count));
5697
5698 ASSERT((index >= 0) && (index < p_cfgp->tdc.count));
5699 rhandlep = &nxgep->tx_ring_handles[index];
5700 rhandlep->nxgep = nxgep;
5701 rhandlep->index = index;
5702 rhandlep->ring_handle = rh;
5703
5704 channel = nxgep->pt_config.hw_config.tdc.start + index;
5705 rhandlep->channel = channel;
5706 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5707 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5708 channel);
5709 if (htable_idx >= 0)
5710 mintr->mi_ddi_handle = intrp->htable[htable_idx];
5711 else
5712 mintr->mi_ddi_handle = NULL;
5713
5714 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5715 infop->mri_start = nxge_tx_ring_start;
5716 infop->mri_stop = nxge_tx_ring_stop;
5717 infop->mri_tx = nxge_tx_ring_send;
5718 infop->mri_stat = nxge_tx_ring_stat;
5719 infop->mri_flags = MAC_RING_TX_SERIALIZE;
5720 break;
5721 }
5722
5723 case MAC_RING_TYPE_RX: {
5724 mac_intr_t nxge_mac_intr;
5725 int nxge_rindex;
5726 p_nxge_intr_t intrp;
5727
5728 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5729
5730 NXGE_DEBUG_MSG((nxgep, RX_CTL,
5731 "==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
5732 rtype, index, p_cfgp->max_rdcs));
5733
5734 /*
5735 * 'index' is the ring index within the group.
5736 * Find the ring index in the nxge instance.
5737 */
5738 nxge_rindex = nxge_get_rxring_index(nxgep, rg_index, index);
5739 channel = nxgep->pt_config.hw_config.start_rdc + index;
5740 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
5741
5742 ASSERT((nxge_rindex >= 0) && (nxge_rindex < p_cfgp->max_rdcs));
5743 rhandlep = &nxgep->rx_ring_handles[nxge_rindex];
5744 rhandlep->nxgep = nxgep;
5745 rhandlep->index = nxge_rindex;
5746 rhandlep->ring_handle = rh;
5747 rhandlep->channel = channel;
5748
5749 /*
5750 * Entrypoint to enable interrupt (disable poll) and
5751 * disable interrupt (enable poll).
5752 */
5753 bzero(&nxge_mac_intr, sizeof (nxge_mac_intr));
5754 nxge_mac_intr.mi_handle = (mac_intr_handle_t)rhandlep;
5755 nxge_mac_intr.mi_enable = (mac_intr_enable_t)nxge_disable_poll;
5756 nxge_mac_intr.mi_disable = (mac_intr_disable_t)nxge_enable_poll;
5757
5758 htable_idx = nxge_ring_get_htable_idx(nxgep, rtype,
5759 channel);
5760 if (htable_idx >= 0)
5761 nxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
5762 else
5763 nxge_mac_intr.mi_ddi_handle = NULL;
5764
5765 infop->mri_driver = (mac_ring_driver_t)rhandlep;
5766 infop->mri_start = nxge_rx_ring_start;
5767 infop->mri_stop = nxge_rx_ring_stop;
5768 infop->mri_intr = nxge_mac_intr;
5769 infop->mri_poll = nxge_rx_poll;
5770 infop->mri_stat = nxge_rx_ring_stat;
5771 infop->mri_flags = MAC_RING_RX_ENQUEUE;
5772 break;
5773 }
5774
5775 default:
5776 break;
5777 }
5778
5779 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_fill_ring 0x%x", rtype));
5780 }
5781
5782 static void
nxge_group_add_ring(mac_group_driver_t gh,mac_ring_driver_t rh,mac_ring_type_t type)5783 nxge_group_add_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5784 mac_ring_type_t type)
5785 {
5786 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5787 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5788 nxge_t *nxge;
5789 nxge_grp_t *grp;
5790 nxge_rdc_grp_t *rdc_grp;
5791 uint16_t channel; /* device-wise ring id */
5792 int dev_gindex;
5793 int rv;
5794
5795 nxge = rgroup->nxgep;
5796
5797 switch (type) {
5798 case MAC_RING_TYPE_TX:
5799 /*
5800 * nxge_grp_dc_add takes a channel number which is a
5801 * "devise" ring ID.
5802 */
5803 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5804
5805 /*
5806 * Remove the ring from the default group
5807 */
5808 if (rgroup->gindex != 0) {
5809 (void) nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5810 }
5811
5812 /*
5813 * nxge->tx_set.group[] is an array of groups indexed by
5814 * a "port" group ID.
5815 */
5816 grp = nxge->tx_set.group[rgroup->gindex];
5817 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5818 if (rv != 0) {
5819 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5820 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5821 }
5822 break;
5823
5824 case MAC_RING_TYPE_RX:
5825 /*
5826 * nxge->rx_set.group[] is an array of groups indexed by
5827 * a "port" group ID.
5828 */
5829 grp = nxge->rx_set.group[rgroup->gindex];
5830
5831 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5832 rgroup->gindex;
5833 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5834
5835 /*
5836 * nxge_grp_dc_add takes a channel number which is a
5837 * "devise" ring ID.
5838 */
5839 channel = nxge->pt_config.hw_config.start_rdc + rhandle->index;
5840 rv = nxge_grp_dc_add(nxge, grp, VP_BOUND_RX, channel);
5841 if (rv != 0) {
5842 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
5843 "nxge_group_add_ring: nxge_grp_dc_add failed"));
5844 }
5845
5846 rdc_grp->map |= (1 << channel);
5847 rdc_grp->max_rdcs++;
5848
5849 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5850 break;
5851 }
5852 }
5853
5854 static void
nxge_group_rem_ring(mac_group_driver_t gh,mac_ring_driver_t rh,mac_ring_type_t type)5855 nxge_group_rem_ring(mac_group_driver_t gh, mac_ring_driver_t rh,
5856 mac_ring_type_t type)
5857 {
5858 nxge_ring_group_t *rgroup = (nxge_ring_group_t *)gh;
5859 nxge_ring_handle_t *rhandle = (nxge_ring_handle_t *)rh;
5860 nxge_t *nxge;
5861 uint16_t channel; /* device-wise ring id */
5862 nxge_rdc_grp_t *rdc_grp;
5863 int dev_gindex;
5864
5865 nxge = rgroup->nxgep;
5866
5867 switch (type) {
5868 case MAC_RING_TYPE_TX:
5869 dev_gindex = nxge->pt_config.hw_config.def_mac_txdma_grpid +
5870 rgroup->gindex;
5871 channel = nxge->pt_config.hw_config.tdc.start + rhandle->index;
5872 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
5873
5874 /*
5875 * Add the ring back to the default group
5876 */
5877 if (rgroup->gindex != 0) {
5878 nxge_grp_t *grp;
5879 grp = nxge->tx_set.group[0];
5880 (void) nxge_grp_dc_add(nxge, grp, VP_BOUND_TX, channel);
5881 }
5882 break;
5883
5884 case MAC_RING_TYPE_RX:
5885 dev_gindex = nxge->pt_config.hw_config.def_mac_rxdma_grpid +
5886 rgroup->gindex;
5887 rdc_grp = &nxge->pt_config.rdc_grps[dev_gindex];
5888 channel = rdc_grp->start_rdc + rhandle->index;
5889 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
5890
5891 rdc_grp->map &= ~(1 << channel);
5892 rdc_grp->max_rdcs--;
5893
5894 (void) nxge_init_fzc_rdc_tbl(nxge, rdc_grp, rgroup->rdctbl);
5895 break;
5896 }
5897 }
5898
5899
5900 /*ARGSUSED*/
5901 static nxge_status_t
nxge_add_intrs(p_nxge_t nxgep)5902 nxge_add_intrs(p_nxge_t nxgep)
5903 {
5904
5905 int intr_types;
5906 int type = 0;
5907 int ddi_status = DDI_SUCCESS;
5908 nxge_status_t status = NXGE_OK;
5909
5910 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
5911
5912 nxgep->nxge_intr_type.intr_registered = B_FALSE;
5913 nxgep->nxge_intr_type.intr_enabled = B_FALSE;
5914 nxgep->nxge_intr_type.msi_intx_cnt = 0;
5915 nxgep->nxge_intr_type.intr_added = 0;
5916 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
5917 nxgep->nxge_intr_type.intr_type = 0;
5918
5919 if (nxgep->niu_type == N2_NIU) {
5920 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5921 } else if (nxge_msi_enable) {
5922 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
5923 }
5924
5925 /* Get the supported interrupt types */
5926 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
5927 != DDI_SUCCESS) {
5928 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
5929 "ddi_intr_get_supported_types failed: status 0x%08x",
5930 ddi_status));
5931 return (NXGE_ERROR | NXGE_DDI_FAILED);
5932 }
5933 nxgep->nxge_intr_type.intr_types = intr_types;
5934
5935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5936 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5937
5938 /*
5939 * Solaris MSIX is not supported yet. use MSI for now.
5940 * nxge_msi_enable (1):
5941 * 1 - MSI 2 - MSI-X others - FIXED
5942 */
5943 switch (nxge_msi_enable) {
5944 default:
5945 type = DDI_INTR_TYPE_FIXED;
5946 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5947 "use fixed (intx emulation) type %08x",
5948 type));
5949 break;
5950
5951 case 2:
5952 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5953 "ddi_intr_get_supported_types: 0x%08x", intr_types));
5954 if (intr_types & DDI_INTR_TYPE_MSIX) {
5955 type = DDI_INTR_TYPE_MSIX;
5956 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5957 "ddi_intr_get_supported_types: MSIX 0x%08x",
5958 type));
5959 } else if (intr_types & DDI_INTR_TYPE_MSI) {
5960 type = DDI_INTR_TYPE_MSI;
5961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5962 "ddi_intr_get_supported_types: MSI 0x%08x",
5963 type));
5964 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5965 type = DDI_INTR_TYPE_FIXED;
5966 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5967 "ddi_intr_get_supported_types: MSXED0x%08x",
5968 type));
5969 }
5970 break;
5971
5972 case 1:
5973 if (intr_types & DDI_INTR_TYPE_MSI) {
5974 type = DDI_INTR_TYPE_MSI;
5975 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
5976 "ddi_intr_get_supported_types: MSI 0x%08x",
5977 type));
5978 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
5979 type = DDI_INTR_TYPE_MSIX;
5980 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5981 "ddi_intr_get_supported_types: MSIX 0x%08x",
5982 type));
5983 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
5984 type = DDI_INTR_TYPE_FIXED;
5985 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
5986 "ddi_intr_get_supported_types: MSXED0x%08x",
5987 type));
5988 }
5989 }
5990
5991 nxgep->nxge_intr_type.intr_type = type;
5992 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
5993 type == DDI_INTR_TYPE_FIXED) &&
5994 nxgep->nxge_intr_type.niu_msi_enable) {
5995 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
5996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5997 " nxge_add_intrs: "
5998 " nxge_add_intrs_adv failed: status 0x%08x",
5999 status));
6000 return (status);
6001 } else {
6002 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
6003 "interrupts registered : type %d", type));
6004 nxgep->nxge_intr_type.intr_registered = B_TRUE;
6005
6006 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6007 "\nAdded advanced nxge add_intr_adv "
6008 "intr type 0x%x\n", type));
6009
6010 return (status);
6011 }
6012 }
6013
6014 if (!nxgep->nxge_intr_type.intr_registered) {
6015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
6016 "failed to register interrupts"));
6017 return (NXGE_ERROR | NXGE_DDI_FAILED);
6018 }
6019
6020 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
6021 return (status);
6022 }
6023
6024 static nxge_status_t
nxge_add_intrs_adv(p_nxge_t nxgep)6025 nxge_add_intrs_adv(p_nxge_t nxgep)
6026 {
6027 int intr_type;
6028 p_nxge_intr_t intrp;
6029
6030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
6031
6032 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6033 intr_type = intrp->intr_type;
6034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
6035 intr_type));
6036
6037 switch (intr_type) {
6038 case DDI_INTR_TYPE_MSI: /* 0x2 */
6039 case DDI_INTR_TYPE_MSIX: /* 0x4 */
6040 return (nxge_add_intrs_adv_type(nxgep, intr_type));
6041
6042 case DDI_INTR_TYPE_FIXED: /* 0x1 */
6043 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
6044
6045 default:
6046 return (NXGE_ERROR);
6047 }
6048 }
6049
6050
6051 /*ARGSUSED*/
6052 static nxge_status_t
nxge_add_intrs_adv_type(p_nxge_t nxgep,uint32_t int_type)6053 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
6054 {
6055 dev_info_t *dip = nxgep->dip;
6056 p_nxge_ldg_t ldgp;
6057 p_nxge_intr_t intrp;
6058 uint_t *inthandler;
6059 void *arg1, *arg2;
6060 int behavior;
6061 int nintrs, navail, nrequest;
6062 int nactual, nrequired;
6063 int inum = 0;
6064 int x, y;
6065 int ddi_status = DDI_SUCCESS;
6066 nxge_status_t status = NXGE_OK;
6067
6068 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
6069 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6070 intrp->start_inum = 0;
6071
6072 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6073 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6074 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6075 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6076 "nintrs: %d", ddi_status, nintrs));
6077 return (NXGE_ERROR | NXGE_DDI_FAILED);
6078 }
6079
6080 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6081 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6082 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6083 "ddi_intr_get_navail() failed, status: 0x%x%, "
6084 "nintrs: %d", ddi_status, navail));
6085 return (NXGE_ERROR | NXGE_DDI_FAILED);
6086 }
6087
6088 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6089 "ddi_intr_get_navail() returned: nintrs %d, navail %d",
6090 nintrs, navail));
6091
6092 /* PSARC/2007/453 MSI-X interrupt limit override */
6093 if (int_type == DDI_INTR_TYPE_MSIX) {
6094 nrequest = nxge_create_msi_property(nxgep);
6095 if (nrequest < navail) {
6096 navail = nrequest;
6097 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6098 "nxge_add_intrs_adv_type: nintrs %d "
6099 "navail %d (nrequest %d)",
6100 nintrs, navail, nrequest));
6101 }
6102 }
6103
6104 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
6105 /* MSI must be power of 2 */
6106 if ((navail & 16) == 16) {
6107 navail = 16;
6108 } else if ((navail & 8) == 8) {
6109 navail = 8;
6110 } else if ((navail & 4) == 4) {
6111 navail = 4;
6112 } else if ((navail & 2) == 2) {
6113 navail = 2;
6114 } else {
6115 navail = 1;
6116 }
6117 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6118 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
6119 "navail %d", nintrs, navail));
6120 }
6121
6122 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6123 DDI_INTR_ALLOC_NORMAL);
6124 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6125 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6126 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6127 navail, &nactual, behavior);
6128 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6129 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6130 " ddi_intr_alloc() failed: %d",
6131 ddi_status));
6132 kmem_free(intrp->htable, intrp->intr_size);
6133 return (NXGE_ERROR | NXGE_DDI_FAILED);
6134 }
6135
6136 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6137 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6138 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6139 " ddi_intr_get_pri() failed: %d",
6140 ddi_status));
6141 /* Free already allocated interrupts */
6142 for (y = 0; y < nactual; y++) {
6143 (void) ddi_intr_free(intrp->htable[y]);
6144 }
6145
6146 kmem_free(intrp->htable, intrp->intr_size);
6147 return (NXGE_ERROR | NXGE_DDI_FAILED);
6148 }
6149
6150 nrequired = 0;
6151 switch (nxgep->niu_type) {
6152 default:
6153 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6154 break;
6155
6156 case N2_NIU:
6157 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6158 break;
6159 }
6160
6161 if (status != NXGE_OK) {
6162 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6163 "nxge_add_intrs_adv_typ:nxge_ldgv_init "
6164 "failed: 0x%x", status));
6165 /* Free already allocated interrupts */
6166 for (y = 0; y < nactual; y++) {
6167 (void) ddi_intr_free(intrp->htable[y]);
6168 }
6169
6170 kmem_free(intrp->htable, intrp->intr_size);
6171 return (status);
6172 }
6173
6174 ldgp = nxgep->ldgvp->ldgp;
6175 for (x = 0; x < nrequired; x++, ldgp++) {
6176 ldgp->vector = (uint8_t)x;
6177 ldgp->intdata = SID_DATA(ldgp->func, x);
6178 arg1 = ldgp->ldvp;
6179 arg2 = nxgep;
6180 if (ldgp->nldvs == 1) {
6181 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6182 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6183 "nxge_add_intrs_adv_type: "
6184 "arg1 0x%x arg2 0x%x: "
6185 "1-1 int handler (entry %d intdata 0x%x)\n",
6186 arg1, arg2,
6187 x, ldgp->intdata));
6188 } else if (ldgp->nldvs > 1) {
6189 inthandler = (uint_t *)ldgp->sys_intr_handler;
6190 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6191 "nxge_add_intrs_adv_type: "
6192 "arg1 0x%x arg2 0x%x: "
6193 "nldevs %d int handler "
6194 "(entry %d intdata 0x%x)\n",
6195 arg1, arg2,
6196 ldgp->nldvs, x, ldgp->intdata));
6197 }
6198
6199 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6200 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
6201 "htable 0x%llx", x, intrp->htable[x]));
6202
6203 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6204 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6205 != DDI_SUCCESS) {
6206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6207 "==> nxge_add_intrs_adv_type: failed #%d "
6208 "status 0x%x", x, ddi_status));
6209 for (y = 0; y < intrp->intr_added; y++) {
6210 (void) ddi_intr_remove_handler(
6211 intrp->htable[y]);
6212 }
6213 /* Free already allocated intr */
6214 for (y = 0; y < nactual; y++) {
6215 (void) ddi_intr_free(intrp->htable[y]);
6216 }
6217 kmem_free(intrp->htable, intrp->intr_size);
6218
6219 (void) nxge_ldgv_uninit(nxgep);
6220
6221 return (NXGE_ERROR | NXGE_DDI_FAILED);
6222 }
6223
6224 ldgp->htable_idx = x;
6225 intrp->intr_added++;
6226 }
6227
6228 intrp->msi_intx_cnt = nactual;
6229
6230 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6231 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
6232 navail, nactual,
6233 intrp->msi_intx_cnt,
6234 intrp->intr_added));
6235
6236 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6237
6238 (void) nxge_intr_ldgv_init(nxgep);
6239
6240 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
6241
6242 return (status);
6243 }
6244
6245 /*ARGSUSED*/
6246 static nxge_status_t
nxge_add_intrs_adv_type_fix(p_nxge_t nxgep,uint32_t int_type)6247 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
6248 {
6249 dev_info_t *dip = nxgep->dip;
6250 p_nxge_ldg_t ldgp;
6251 p_nxge_intr_t intrp;
6252 uint_t *inthandler;
6253 void *arg1, *arg2;
6254 int behavior;
6255 int nintrs, navail;
6256 int nactual, nrequired;
6257 int inum = 0;
6258 int x, y;
6259 int ddi_status = DDI_SUCCESS;
6260 nxge_status_t status = NXGE_OK;
6261
6262 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
6263 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6264 intrp->start_inum = 0;
6265
6266 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
6267 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
6268 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6269 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
6270 "nintrs: %d", status, nintrs));
6271 return (NXGE_ERROR | NXGE_DDI_FAILED);
6272 }
6273
6274 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
6275 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
6276 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6277 "ddi_intr_get_navail() failed, status: 0x%x%, "
6278 "nintrs: %d", ddi_status, navail));
6279 return (NXGE_ERROR | NXGE_DDI_FAILED);
6280 }
6281
6282 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6283 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
6284 nintrs, navail));
6285
6286 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
6287 DDI_INTR_ALLOC_NORMAL);
6288 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
6289 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
6290 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
6291 navail, &nactual, behavior);
6292 if (ddi_status != DDI_SUCCESS || nactual == 0) {
6293 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6294 " ddi_intr_alloc() failed: %d",
6295 ddi_status));
6296 kmem_free(intrp->htable, intrp->intr_size);
6297 return (NXGE_ERROR | NXGE_DDI_FAILED);
6298 }
6299
6300 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
6301 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
6302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6303 " ddi_intr_get_pri() failed: %d",
6304 ddi_status));
6305 /* Free already allocated interrupts */
6306 for (y = 0; y < nactual; y++) {
6307 (void) ddi_intr_free(intrp->htable[y]);
6308 }
6309
6310 kmem_free(intrp->htable, intrp->intr_size);
6311 return (NXGE_ERROR | NXGE_DDI_FAILED);
6312 }
6313
6314 nrequired = 0;
6315 switch (nxgep->niu_type) {
6316 default:
6317 status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
6318 break;
6319
6320 case N2_NIU:
6321 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
6322 break;
6323 }
6324
6325 if (status != NXGE_OK) {
6326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6327 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
6328 "failed: 0x%x", status));
6329 /* Free already allocated interrupts */
6330 for (y = 0; y < nactual; y++) {
6331 (void) ddi_intr_free(intrp->htable[y]);
6332 }
6333
6334 kmem_free(intrp->htable, intrp->intr_size);
6335 return (status);
6336 }
6337
6338 ldgp = nxgep->ldgvp->ldgp;
6339 for (x = 0; x < nrequired; x++, ldgp++) {
6340 ldgp->vector = (uint8_t)x;
6341 if (nxgep->niu_type != N2_NIU) {
6342 ldgp->intdata = SID_DATA(ldgp->func, x);
6343 }
6344
6345 arg1 = ldgp->ldvp;
6346 arg2 = nxgep;
6347 if (ldgp->nldvs == 1) {
6348 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
6349 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6350 "nxge_add_intrs_adv_type_fix: "
6351 "1-1 int handler(%d) ldg %d ldv %d "
6352 "arg1 $%p arg2 $%p\n",
6353 x, ldgp->ldg, ldgp->ldvp->ldv,
6354 arg1, arg2));
6355 } else if (ldgp->nldvs > 1) {
6356 inthandler = (uint_t *)ldgp->sys_intr_handler;
6357 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6358 "nxge_add_intrs_adv_type_fix: "
6359 "shared ldv %d int handler(%d) ldv %d ldg %d"
6360 "arg1 0x%016llx arg2 0x%016llx\n",
6361 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
6362 arg1, arg2));
6363 }
6364
6365 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
6366 (ddi_intr_handler_t *)inthandler, arg1, arg2))
6367 != DDI_SUCCESS) {
6368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
6369 "==> nxge_add_intrs_adv_type_fix: failed #%d "
6370 "status 0x%x", x, ddi_status));
6371 for (y = 0; y < intrp->intr_added; y++) {
6372 (void) ddi_intr_remove_handler(
6373 intrp->htable[y]);
6374 }
6375 for (y = 0; y < nactual; y++) {
6376 (void) ddi_intr_free(intrp->htable[y]);
6377 }
6378 /* Free already allocated intr */
6379 kmem_free(intrp->htable, intrp->intr_size);
6380
6381 (void) nxge_ldgv_uninit(nxgep);
6382
6383 return (NXGE_ERROR | NXGE_DDI_FAILED);
6384 }
6385
6386 ldgp->htable_idx = x;
6387 intrp->intr_added++;
6388 }
6389
6390 intrp->msi_intx_cnt = nactual;
6391
6392 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
6393
6394 status = nxge_intr_ldgv_init(nxgep);
6395 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
6396
6397 return (status);
6398 }
6399
6400 static void
nxge_remove_intrs(p_nxge_t nxgep)6401 nxge_remove_intrs(p_nxge_t nxgep)
6402 {
6403 int i, inum;
6404 p_nxge_intr_t intrp;
6405
6406 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
6407 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6408 if (!intrp->intr_registered) {
6409 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6410 "<== nxge_remove_intrs: interrupts not registered"));
6411 return;
6412 }
6413
6414 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
6415
6416 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6417 (void) ddi_intr_block_disable(intrp->htable,
6418 intrp->intr_added);
6419 } else {
6420 for (i = 0; i < intrp->intr_added; i++) {
6421 (void) ddi_intr_disable(intrp->htable[i]);
6422 }
6423 }
6424
6425 for (inum = 0; inum < intrp->intr_added; inum++) {
6426 if (intrp->htable[inum]) {
6427 (void) ddi_intr_remove_handler(intrp->htable[inum]);
6428 }
6429 }
6430
6431 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
6432 if (intrp->htable[inum]) {
6433 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6434 "nxge_remove_intrs: ddi_intr_free inum %d "
6435 "msi_intx_cnt %d intr_added %d",
6436 inum,
6437 intrp->msi_intx_cnt,
6438 intrp->intr_added));
6439
6440 (void) ddi_intr_free(intrp->htable[inum]);
6441 }
6442 }
6443
6444 kmem_free(intrp->htable, intrp->intr_size);
6445 intrp->intr_registered = B_FALSE;
6446 intrp->intr_enabled = B_FALSE;
6447 intrp->msi_intx_cnt = 0;
6448 intrp->intr_added = 0;
6449
6450 (void) nxge_ldgv_uninit(nxgep);
6451
6452 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
6453 "#msix-request");
6454
6455 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
6456 }
6457
6458 /*ARGSUSED*/
6459 static void
nxge_intrs_enable(p_nxge_t nxgep)6460 nxge_intrs_enable(p_nxge_t nxgep)
6461 {
6462 p_nxge_intr_t intrp;
6463 int i;
6464 int status;
6465
6466 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
6467
6468 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6469
6470 if (!intrp->intr_registered) {
6471 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
6472 "interrupts are not registered"));
6473 return;
6474 }
6475
6476 if (intrp->intr_enabled) {
6477 NXGE_DEBUG_MSG((nxgep, INT_CTL,
6478 "<== nxge_intrs_enable: already enabled"));
6479 return;
6480 }
6481
6482 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6483 status = ddi_intr_block_enable(intrp->htable,
6484 intrp->intr_added);
6485 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6486 "block enable - status 0x%x total inums #%d\n",
6487 status, intrp->intr_added));
6488 } else {
6489 for (i = 0; i < intrp->intr_added; i++) {
6490 status = ddi_intr_enable(intrp->htable[i]);
6491 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
6492 "ddi_intr_enable:enable - status 0x%x "
6493 "total inums %d enable inum #%d\n",
6494 status, intrp->intr_added, i));
6495 if (status == DDI_SUCCESS) {
6496 intrp->intr_enabled = B_TRUE;
6497 }
6498 }
6499 }
6500
6501 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
6502 }
6503
6504 /*ARGSUSED*/
6505 static void
nxge_intrs_disable(p_nxge_t nxgep)6506 nxge_intrs_disable(p_nxge_t nxgep)
6507 {
6508 p_nxge_intr_t intrp;
6509 int i;
6510
6511 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
6512
6513 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
6514
6515 if (!intrp->intr_registered) {
6516 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
6517 "interrupts are not registered"));
6518 return;
6519 }
6520
6521 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
6522 (void) ddi_intr_block_disable(intrp->htable,
6523 intrp->intr_added);
6524 } else {
6525 for (i = 0; i < intrp->intr_added; i++) {
6526 (void) ddi_intr_disable(intrp->htable[i]);
6527 }
6528 }
6529
6530 intrp->intr_enabled = B_FALSE;
6531 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
6532 }
6533
6534 nxge_status_t
nxge_mac_register(p_nxge_t nxgep)6535 nxge_mac_register(p_nxge_t nxgep)
6536 {
6537 mac_register_t *macp;
6538 int status;
6539
6540 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
6541
6542 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
6543 return (NXGE_ERROR);
6544
6545 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
6546 macp->m_driver = nxgep;
6547 macp->m_dip = nxgep->dip;
6548 if (!isLDOMguest(nxgep)) {
6549 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
6550 } else {
6551 macp->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6552 macp->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
6553 (void) memset(macp->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
6554 }
6555 macp->m_callbacks = &nxge_m_callbacks;
6556 macp->m_min_sdu = 0;
6557 nxgep->mac.default_mtu = nxgep->mac.maxframesize -
6558 NXGE_EHEADER_VLAN_CRC;
6559 macp->m_max_sdu = nxgep->mac.default_mtu;
6560 macp->m_margin = VLAN_TAGSZ;
6561 macp->m_priv_props = nxge_priv_props;
6562 if (isLDOMguest(nxgep))
6563 macp->m_v12n = MAC_VIRT_LEVEL1;
6564 else
6565 macp->m_v12n = MAC_VIRT_HIO | MAC_VIRT_LEVEL1;
6566
6567 NXGE_DEBUG_MSG((nxgep, MAC_CTL,
6568 "==> nxge_mac_register: instance %d "
6569 "max_sdu %d margin %d maxframe %d (header %d)",
6570 nxgep->instance,
6571 macp->m_max_sdu, macp->m_margin,
6572 nxgep->mac.maxframesize,
6573 NXGE_EHEADER_VLAN_CRC));
6574
6575 status = mac_register(macp, &nxgep->mach);
6576 if (isLDOMguest(nxgep)) {
6577 KMEM_FREE(macp->m_src_addr, MAXMACADDRLEN);
6578 KMEM_FREE(macp->m_dst_addr, MAXMACADDRLEN);
6579 }
6580 mac_free(macp);
6581
6582 if (status != 0) {
6583 cmn_err(CE_WARN,
6584 "!nxge_mac_register failed (status %d instance %d)",
6585 status, nxgep->instance);
6586 return (NXGE_ERROR);
6587 }
6588
6589 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
6590 "(instance %d)", nxgep->instance));
6591
6592 return (NXGE_OK);
6593 }
6594
6595 void
nxge_err_inject(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)6596 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
6597 {
6598 ssize_t size;
6599 mblk_t *nmp;
6600 uint8_t blk_id;
6601 uint8_t chan;
6602 uint32_t err_id;
6603 err_inject_t *eip;
6604
6605 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
6606
6607 size = 1024;
6608 nmp = mp->b_cont;
6609 eip = (err_inject_t *)nmp->b_rptr;
6610 blk_id = eip->blk_id;
6611 err_id = eip->err_id;
6612 chan = eip->chan;
6613 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
6614 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
6615 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
6616 switch (blk_id) {
6617 case MAC_BLK_ID:
6618 break;
6619 case TXMAC_BLK_ID:
6620 break;
6621 case RXMAC_BLK_ID:
6622 break;
6623 case MIF_BLK_ID:
6624 break;
6625 case IPP_BLK_ID:
6626 nxge_ipp_inject_err(nxgep, err_id);
6627 break;
6628 case TXC_BLK_ID:
6629 nxge_txc_inject_err(nxgep, err_id);
6630 break;
6631 case TXDMA_BLK_ID:
6632 nxge_txdma_inject_err(nxgep, err_id, chan);
6633 break;
6634 case RXDMA_BLK_ID:
6635 nxge_rxdma_inject_err(nxgep, err_id, chan);
6636 break;
6637 case ZCP_BLK_ID:
6638 nxge_zcp_inject_err(nxgep, err_id);
6639 break;
6640 case ESPC_BLK_ID:
6641 break;
6642 case FFLP_BLK_ID:
6643 break;
6644 case PHY_BLK_ID:
6645 break;
6646 case ETHER_SERDES_BLK_ID:
6647 break;
6648 case PCIE_SERDES_BLK_ID:
6649 break;
6650 case VIR_BLK_ID:
6651 break;
6652 }
6653
6654 nmp->b_wptr = nmp->b_rptr + size;
6655 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
6656
6657 miocack(wq, mp, (int)size, 0);
6658 }
6659
6660 static int
nxge_init_common_dev(p_nxge_t nxgep)6661 nxge_init_common_dev(p_nxge_t nxgep)
6662 {
6663 p_nxge_hw_list_t hw_p;
6664 dev_info_t *p_dip;
6665
6666 ASSERT(nxgep != NULL);
6667
6668 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
6669
6670 p_dip = nxgep->p_dip;
6671 MUTEX_ENTER(&nxge_common_lock);
6672 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6673 "==> nxge_init_common_dev:func # %d",
6674 nxgep->function_num));
6675 /*
6676 * Loop through existing per neptune hardware list.
6677 */
6678 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6679 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6680 "==> nxge_init_common_device:func # %d "
6681 "hw_p $%p parent dip $%p",
6682 nxgep->function_num,
6683 hw_p,
6684 p_dip));
6685 if (hw_p->parent_devp == p_dip) {
6686 nxgep->nxge_hw_p = hw_p;
6687 hw_p->ndevs++;
6688 hw_p->nxge_p[nxgep->function_num] = nxgep;
6689 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6690 "==> nxge_init_common_device:func # %d "
6691 "hw_p $%p parent dip $%p "
6692 "ndevs %d (found)",
6693 nxgep->function_num,
6694 hw_p,
6695 p_dip,
6696 hw_p->ndevs));
6697 break;
6698 }
6699 }
6700
6701 if (hw_p == NULL) {
6702
6703 char **prop_val;
6704 uint_t prop_len;
6705 int i;
6706
6707 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6708 "==> nxge_init_common_device:func # %d "
6709 "parent dip $%p (new)",
6710 nxgep->function_num,
6711 p_dip));
6712 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
6713 hw_p->parent_devp = p_dip;
6714 hw_p->magic = NXGE_NEPTUNE_MAGIC;
6715 nxgep->nxge_hw_p = hw_p;
6716 hw_p->ndevs++;
6717 hw_p->nxge_p[nxgep->function_num] = nxgep;
6718 hw_p->next = nxge_hw_list;
6719 if (nxgep->niu_type == N2_NIU) {
6720 hw_p->niu_type = N2_NIU;
6721 hw_p->platform_type = P_NEPTUNE_NIU;
6722 hw_p->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
6723 } else {
6724 hw_p->niu_type = NIU_TYPE_NONE;
6725 hw_p->platform_type = P_NEPTUNE_NONE;
6726 hw_p->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
6727 }
6728
6729 hw_p->tcam = KMEM_ZALLOC(sizeof (tcam_flow_spec_t) *
6730 hw_p->tcam_size, KM_SLEEP);
6731
6732 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
6733 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
6734 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
6735 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
6736
6737 nxge_hw_list = hw_p;
6738
6739 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nxgep->dip, 0,
6740 "compatible", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
6741 for (i = 0; i < prop_len; i++) {
6742 if ((strcmp((caddr_t)prop_val[i],
6743 NXGE_ROCK_COMPATIBLE) == 0)) {
6744 hw_p->platform_type = P_NEPTUNE_ROCK;
6745 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6746 "ROCK hw_p->platform_type %d",
6747 hw_p->platform_type));
6748 break;
6749 }
6750 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6751 "nxge_init_common_dev: read compatible"
6752 " property[%d] val[%s]",
6753 i, (caddr_t)prop_val[i]));
6754 }
6755 }
6756
6757 ddi_prop_free(prop_val);
6758
6759 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
6760 }
6761
6762 MUTEX_EXIT(&nxge_common_lock);
6763
6764 nxgep->platform_type = hw_p->platform_type;
6765 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxgep->platform_type %d",
6766 nxgep->platform_type));
6767 if (nxgep->niu_type != N2_NIU) {
6768 nxgep->niu_type = hw_p->niu_type;
6769 }
6770
6771 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6772 "==> nxge_init_common_device (nxge_hw_list) $%p",
6773 nxge_hw_list));
6774 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
6775
6776 return (NXGE_OK);
6777 }
6778
6779 static void
nxge_uninit_common_dev(p_nxge_t nxgep)6780 nxge_uninit_common_dev(p_nxge_t nxgep)
6781 {
6782 p_nxge_hw_list_t hw_p, h_hw_p;
6783 p_nxge_dma_pt_cfg_t p_dma_cfgp;
6784 p_nxge_hw_pt_cfg_t p_cfgp;
6785 dev_info_t *p_dip;
6786
6787 ASSERT(nxgep != NULL);
6788
6789 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
6790 if (nxgep->nxge_hw_p == NULL) {
6791 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6792 "<== nxge_uninit_common_device (no common)"));
6793 return;
6794 }
6795
6796 MUTEX_ENTER(&nxge_common_lock);
6797 h_hw_p = nxge_hw_list;
6798 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
6799 p_dip = hw_p->parent_devp;
6800 if (nxgep->nxge_hw_p == hw_p &&
6801 p_dip == nxgep->p_dip &&
6802 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
6803 hw_p->magic == NXGE_NEPTUNE_MAGIC) {
6804
6805 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6806 "==> nxge_uninit_common_device:func # %d "
6807 "hw_p $%p parent dip $%p "
6808 "ndevs %d (found)",
6809 nxgep->function_num,
6810 hw_p,
6811 p_dip,
6812 hw_p->ndevs));
6813
6814 /*
6815 * Release the RDC table, a shared resoruce
6816 * of the nxge hardware. The RDC table was
6817 * assigned to this instance of nxge in
6818 * nxge_use_cfg_dma_config().
6819 */
6820 if (!isLDOMguest(nxgep)) {
6821 p_dma_cfgp =
6822 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
6823 p_cfgp =
6824 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
6825 (void) nxge_fzc_rdc_tbl_unbind(nxgep,
6826 p_cfgp->def_mac_rxdma_grpid);
6827
6828 /* Cleanup any outstanding groups. */
6829 nxge_grp_cleanup(nxgep);
6830 }
6831
6832 if (hw_p->ndevs) {
6833 hw_p->ndevs--;
6834 }
6835 hw_p->nxge_p[nxgep->function_num] = NULL;
6836 if (!hw_p->ndevs) {
6837 KMEM_FREE(hw_p->tcam,
6838 sizeof (tcam_flow_spec_t) *
6839 hw_p->tcam_size);
6840 MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
6841 MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
6842 MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
6843 MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
6844 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6845 "==> nxge_uninit_common_device: "
6846 "func # %d "
6847 "hw_p $%p parent dip $%p "
6848 "ndevs %d (last)",
6849 nxgep->function_num,
6850 hw_p,
6851 p_dip,
6852 hw_p->ndevs));
6853
6854 nxge_hio_uninit(nxgep);
6855
6856 if (hw_p == nxge_hw_list) {
6857 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6858 "==> nxge_uninit_common_device:"
6859 "remove head func # %d "
6860 "hw_p $%p parent dip $%p "
6861 "ndevs %d (head)",
6862 nxgep->function_num,
6863 hw_p,
6864 p_dip,
6865 hw_p->ndevs));
6866 nxge_hw_list = hw_p->next;
6867 } else {
6868 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6869 "==> nxge_uninit_common_device:"
6870 "remove middle func # %d "
6871 "hw_p $%p parent dip $%p "
6872 "ndevs %d (middle)",
6873 nxgep->function_num,
6874 hw_p,
6875 p_dip,
6876 hw_p->ndevs));
6877 h_hw_p->next = hw_p->next;
6878 }
6879
6880 nxgep->nxge_hw_p = NULL;
6881 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
6882 }
6883 break;
6884 } else {
6885 h_hw_p = hw_p;
6886 }
6887 }
6888
6889 MUTEX_EXIT(&nxge_common_lock);
6890 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6891 "==> nxge_uninit_common_device (nxge_hw_list) $%p",
6892 nxge_hw_list));
6893
6894 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
6895 }
6896
6897 /*
6898 * Determines the number of ports from the niu_type or the platform type.
6899 * Returns the number of ports, or returns zero on failure.
6900 */
6901
6902 int
nxge_get_nports(p_nxge_t nxgep)6903 nxge_get_nports(p_nxge_t nxgep)
6904 {
6905 int nports = 0;
6906
6907 switch (nxgep->niu_type) {
6908 case N2_NIU:
6909 case NEPTUNE_2_10GF:
6910 nports = 2;
6911 break;
6912 case NEPTUNE_4_1GC:
6913 case NEPTUNE_2_10GF_2_1GC:
6914 case NEPTUNE_1_10GF_3_1GC:
6915 case NEPTUNE_1_1GC_1_10GF_2_1GC:
6916 case NEPTUNE_2_10GF_2_1GRF:
6917 nports = 4;
6918 break;
6919 default:
6920 switch (nxgep->platform_type) {
6921 case P_NEPTUNE_NIU:
6922 case P_NEPTUNE_ATLAS_2PORT:
6923 nports = 2;
6924 break;
6925 case P_NEPTUNE_ATLAS_4PORT:
6926 case P_NEPTUNE_MARAMBA_P0:
6927 case P_NEPTUNE_MARAMBA_P1:
6928 case P_NEPTUNE_ROCK:
6929 case P_NEPTUNE_ALONSO:
6930 nports = 4;
6931 break;
6932 default:
6933 break;
6934 }
6935 break;
6936 }
6937
6938 return (nports);
6939 }
6940
6941 /*
6942 * The following two functions are to support
6943 * PSARC/2007/453 MSI-X interrupt limit override.
6944 */
6945 static int
nxge_create_msi_property(p_nxge_t nxgep)6946 nxge_create_msi_property(p_nxge_t nxgep)
6947 {
6948 int nmsi;
6949 extern int ncpus;
6950
6951 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
6952
6953 switch (nxgep->mac.portmode) {
6954 case PORT_10G_COPPER:
6955 case PORT_10G_FIBER:
6956 case PORT_10G_TN1010:
6957 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6958 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6959 /*
6960 * The maximum MSI-X requested will be 8.
6961 * If the # of CPUs is less than 8, we will request
6962 * # MSI-X based on the # of CPUs (default).
6963 */
6964 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6965 "==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
6966 nxge_msix_10g_intrs));
6967 if ((nxge_msix_10g_intrs == 0) ||
6968 (nxge_msix_10g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
6969 nmsi = NXGE_MSIX_REQUEST_10G;
6970 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6971 "==>nxge_create_msi_property (10G): reset to 8"));
6972 } else {
6973 nmsi = nxge_msix_10g_intrs;
6974 }
6975
6976 /*
6977 * If # of interrupts requested is 8 (default),
6978 * the checking of the number of cpus will be
6979 * be maintained.
6980 */
6981 if ((nmsi == NXGE_MSIX_REQUEST_10G) &&
6982 (ncpus < nmsi)) {
6983 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6984 "==>nxge_create_msi_property (10G): reset to 8"));
6985 nmsi = ncpus;
6986 }
6987 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6988 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
6989 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
6990 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
6991 break;
6992
6993 default:
6994 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
6995 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
6996 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
6997 "==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
6998 nxge_msix_1g_intrs));
6999 if ((nxge_msix_1g_intrs == 0) ||
7000 (nxge_msix_1g_intrs > NXGE_MSIX_MAX_ALLOWED)) {
7001 nmsi = NXGE_MSIX_REQUEST_1G;
7002 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7003 "==>nxge_create_msi_property (1G): reset to 2"));
7004 } else {
7005 nmsi = nxge_msix_1g_intrs;
7006 }
7007 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
7008 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
7009 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
7010 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
7011 break;
7012 }
7013
7014 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
7015 return (nmsi);
7016 }
7017
7018 /*
7019 * The following is a software around for the Neptune hardware's
7020 * interrupt bugs; The Neptune hardware may generate spurious interrupts when
7021 * an interrupr handler is removed.
7022 */
7023 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98
7024 #define NXGE_PIM_RESET (1ULL << 29)
7025 #define NXGE_GLU_RESET (1ULL << 30)
7026 #define NXGE_NIU_RESET (1ULL << 31)
7027 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \
7028 NXGE_GLU_RESET | \
7029 NXGE_NIU_RESET)
7030
7031 #define NXGE_WAIT_QUITE_TIME 200000
7032 #define NXGE_WAIT_QUITE_RETRY 40
7033 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */
7034
7035 static void
nxge_niu_peu_reset(p_nxge_t nxgep)7036 nxge_niu_peu_reset(p_nxge_t nxgep)
7037 {
7038 uint32_t rvalue;
7039 p_nxge_hw_list_t hw_p;
7040 p_nxge_t fnxgep;
7041 int i, j;
7042
7043 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset"));
7044 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
7045 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7046 "==> nxge_niu_peu_reset: NULL hardware pointer"));
7047 return;
7048 }
7049
7050 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7051 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
7052 hw_p->flags, nxgep->nxge_link_poll_timerid,
7053 nxgep->nxge_timerid));
7054
7055 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
7056 /*
7057 * Make sure other instances from the same hardware
7058 * stop sending PIO and in quiescent state.
7059 */
7060 for (i = 0; i < NXGE_MAX_PORTS; i++) {
7061 fnxgep = hw_p->nxge_p[i];
7062 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7063 "==> nxge_niu_peu_reset: checking entry %d "
7064 "nxgep $%p", i, fnxgep));
7065 #ifdef NXGE_DEBUG
7066 if (fnxgep) {
7067 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7068 "==> nxge_niu_peu_reset: entry %d (function %d) "
7069 "link timer id %d hw timer id %d",
7070 i, fnxgep->function_num,
7071 fnxgep->nxge_link_poll_timerid,
7072 fnxgep->nxge_timerid));
7073 }
7074 #endif
7075 if (fnxgep && fnxgep != nxgep &&
7076 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) {
7077 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7078 "==> nxge_niu_peu_reset: checking $%p "
7079 "(function %d) timer ids",
7080 fnxgep, fnxgep->function_num));
7081 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
7082 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7083 "==> nxge_niu_peu_reset: waiting"));
7084 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7085 if (!fnxgep->nxge_timerid &&
7086 !fnxgep->nxge_link_poll_timerid) {
7087 break;
7088 }
7089 }
7090 NXGE_DELAY(NXGE_WAIT_QUITE_TIME);
7091 if (fnxgep->nxge_timerid ||
7092 fnxgep->nxge_link_poll_timerid) {
7093 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7095 "<== nxge_niu_peu_reset: cannot reset "
7096 "hardware (devices are still in use)"));
7097 return;
7098 }
7099 }
7100 }
7101
7102 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) {
7103 hw_p->flags |= COMMON_RESET_NIU_PCI;
7104 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh,
7105 NXGE_PCI_PORT_LOGIC_OFFSET);
7106 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7107 "nxge_niu_peu_reset: read offset 0x%x (%d) "
7108 "(data 0x%x)",
7109 NXGE_PCI_PORT_LOGIC_OFFSET,
7110 NXGE_PCI_PORT_LOGIC_OFFSET,
7111 rvalue));
7112
7113 rvalue |= NXGE_PCI_RESET_ALL;
7114 pci_config_put32(nxgep->dev_regs->nxge_pciregh,
7115 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue);
7116 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
7117 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
7118 rvalue));
7119
7120 NXGE_DELAY(NXGE_PCI_RESET_WAIT);
7121 }
7122
7123 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
7124 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset"));
7125 }
7126
7127 static void
nxge_set_pci_replay_timeout(p_nxge_t nxgep)7128 nxge_set_pci_replay_timeout(p_nxge_t nxgep)
7129 {
7130 p_dev_regs_t dev_regs;
7131 uint32_t value;
7132
7133 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout"));
7134
7135 if (!nxge_set_replay_timer) {
7136 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7137 "==> nxge_set_pci_replay_timeout: will not change "
7138 "the timeout"));
7139 return;
7140 }
7141
7142 dev_regs = nxgep->dev_regs;
7143 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7144 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
7145 dev_regs, dev_regs->nxge_pciregh));
7146
7147 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) {
7148 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7149 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
7150 "no PCI handle",
7151 dev_regs));
7152 return;
7153 }
7154 value = (pci_config_get32(dev_regs->nxge_pciregh,
7155 PCI_REPLAY_TIMEOUT_CFG_OFFSET) |
7156 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT));
7157
7158 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7159 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
7160 "(timeout value to set 0x%x at offset 0x%x) value 0x%x",
7161 pci_config_get32(dev_regs->nxge_pciregh,
7162 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout,
7163 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value));
7164
7165 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET,
7166 value);
7167
7168 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7169 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
7170 pci_config_get32(dev_regs->nxge_pciregh,
7171 PCI_REPLAY_TIMEOUT_CFG_OFFSET)));
7172
7173 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout"));
7174 }
7175
7176 /*
7177 * quiesce(9E) entry point.
7178 *
7179 * This function is called when the system is single-threaded at high
7180 * PIL with preemption disabled. Therefore, this function must not be
7181 * blocked.
7182 *
7183 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7184 * DDI_FAILURE indicates an error condition and should almost never happen.
7185 */
7186 static int
nxge_quiesce(dev_info_t * dip)7187 nxge_quiesce(dev_info_t *dip)
7188 {
7189 int instance = ddi_get_instance(dip);
7190 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
7191
7192 if (nxgep == NULL)
7193 return (DDI_FAILURE);
7194
7195 /* Turn off debugging */
7196 nxge_debug_level = NO_DEBUG;
7197 nxgep->nxge_debug_level = NO_DEBUG;
7198 npi_debug_level = NO_DEBUG;
7199
7200 /*
7201 * Stop link monitor only when linkchkmod is interrupt based
7202 */
7203 if (nxgep->mac.linkchkmode == LINKCHK_INTR) {
7204 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
7205 }
7206
7207 (void) nxge_intr_hw_disable(nxgep);
7208
7209 /*
7210 * Reset the receive MAC side.
7211 */
7212 (void) nxge_rx_mac_disable(nxgep);
7213
7214 /* Disable and soft reset the IPP */
7215 if (!isLDOMguest(nxgep))
7216 (void) nxge_ipp_disable(nxgep);
7217
7218 /*
7219 * Reset the transmit/receive DMA side.
7220 */
7221 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
7222 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
7223
7224 /*
7225 * Reset the transmit MAC side.
7226 */
7227 (void) nxge_tx_mac_disable(nxgep);
7228
7229 return (DDI_SUCCESS);
7230 }
7231