xref: /titanic_50/usr/src/uts/common/io/hxge/hxge_ndd.c (revision 25c6ff4b77fcddf4097ce78a8277275ca603b46c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <hxge_impl.h>
29 #include <inet/common.h>
30 #include <inet/mi.h>
31 #include <inet/nd.h>
32 
33 extern uint64_t hpi_debug_level;
34 
35 #define	HXGE_PARAM_MAC_RW \
36 	HXGE_PARAM_RW | HXGE_PARAM_MAC | \
37 	HXGE_PARAM_NDD_WR_OK | HXGE_PARAM_READ_PROP
38 
39 #define	HXGE_PARAM_RXDMA_RW	HXGE_PARAM_RWP | HXGE_PARAM_RXDMA | \
40 	HXGE_PARAM_NDD_WR_OK | HXGE_PARAM_READ_PROP
41 
42 #define	HXGE_PARAM_L2CLASS_CFG	\
43 	HXGE_PARAM_RW | HXGE_PARAM_PROP_ARR32 | \
44 	HXGE_PARAM_READ_PROP | HXGE_PARAM_NDD_WR_OK
45 
46 #define	HXGE_PARAM_CLASS_RWS \
47 	HXGE_PARAM_RWS | HXGE_PARAM_READ_PROP
48 
49 #define	HXGE_PARAM_ARRAY_INIT_SIZE	0x20ULL
50 
51 #define	BASE_ANY	0
52 #define	BASE_BINARY	2
53 #define	BASE_HEX	16
54 #define	BASE_DECIMAL	10
55 #define	ALL_FF_64	0xFFFFFFFFFFFFFFFFULL
56 #define	ALL_FF_32	0xFFFFFFFFUL
57 
58 #define	HXGE_NDD_INFODUMP_BUFF_SIZE	2048	/* is 2k enough? */
59 #define	HXGE_NDD_INFODUMP_BUFF_8K	8192
60 #define	HXGE_NDD_INFODUMP_BUFF_16K	0x2000
61 #define	HXGE_NDD_INFODUMP_BUFF_64K	0x8000
62 
63 #define	PARAM_OUTOF_RANGE(vptr, eptr, rval, pa)	\
64 	((vptr == eptr) || (rval < pa->minimum) || (rval > pa->maximum))
65 
66 #define	ADVANCE_PRINT_BUFFER(pmp, plen, rlen) { \
67 	((mblk_t *)pmp)->b_wptr += plen; \
68 	rlen -= plen; \
69 }
70 
71 static int hxge_param_rx_intr_pkts(p_hxge_t hxgep, queue_t *,
72 	mblk_t *, char *, caddr_t);
73 static int hxge_param_rx_intr_time(p_hxge_t hxgep, queue_t *,
74 	mblk_t *, char *, caddr_t);
75 static int hxge_param_set_mac(p_hxge_t, queue_t *,
76 	mblk_t *, char *, caddr_t);
77 static int hxge_param_set_ether_usr(p_hxge_t hxgep, queue_t *, mblk_t *,
78 	char *, caddr_t);
79 static int hxge_param_set_ip_opt(p_hxge_t hxgep,
80 	queue_t *, mblk_t *, char *, caddr_t);
81 static int hxge_param_pfc_hash_init(p_hxge_t hxgep,
82 	queue_t *, mblk_t *, char *, caddr_t);
83 static int hxge_param_tcam_enable(p_hxge_t hxgep, queue_t *,
84 	mblk_t *, char *, caddr_t);
85 static int hxge_param_get_rxdma_info(p_hxge_t hxgep, queue_t *q,
86 	p_mblk_t mp, caddr_t cp);
87 static int hxge_param_set_vlan_ids(p_hxge_t hxgep, queue_t *q,
88 	mblk_t *mp, char *value, caddr_t cp);
89 static int hxge_param_get_vlan_ids(p_hxge_t hxgep, queue_t *q,
90 	p_mblk_t mp, caddr_t cp);
91 static int hxge_param_get_ip_opt(p_hxge_t hxgep,
92 	queue_t *, mblk_t *, caddr_t);
93 static int hxge_param_get_mac(p_hxge_t hxgep, queue_t *q, p_mblk_t mp,
94 	caddr_t cp);
95 static int hxge_param_get_debug_flag(p_hxge_t hxgep, queue_t *q,
96 	p_mblk_t mp, caddr_t cp);
97 static int hxge_param_set_hxge_debug_flag(p_hxge_t hxgep,
98 	queue_t *, mblk_t *, char *, caddr_t);
99 static int hxge_param_set_hpi_debug_flag(p_hxge_t hxgep,
100 	queue_t *, mblk_t *, char *, caddr_t);
101 static int hxge_param_dump_ptrs(p_hxge_t hxgep, queue_t *q,
102 	p_mblk_t mp, caddr_t cp);
103 
104 /*
105  * Global array of Hydra changable parameters.
106  * This array is initialized to correspond to the default
107  * Hydra configuration. This array would be copied
108  * into the parameter structure and modifed per
109  * fcode and hxge.conf configuration. Later, the parameters are
110  * exported to ndd to display and run-time configuration (at least
111  * some of them).
112  */
113 
114 static hxge_param_t hxge_param_arr[] = {
115 	/* min	max	value	old	hw-name 	conf-name	*/
116 	{hxge_param_get_generic, NULL, HXGE_PARAM_READ,
117 		0, 999, 1000, 0, "instance", "instance"},
118 
119 	/* MTU cannot be propagated to the stack from here, so don't show it */
120 	{hxge_param_get_mac, hxge_param_set_mac,
121 		HXGE_PARAM_MAC_RW | HXGE_PARAM_DONT_SHOW,
122 		0, 1, 0, 0, "accept-jumbo", "accept_jumbo"},
123 
124 	{hxge_param_get_rxdma_info, NULL,
125 		HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
126 		HXGE_RBR_RBB_MIN, HXGE_RBR_RBB_MAX, HXGE_RBR_RBB_DEFAULT, 0,
127 		"rx-rbr-size", "rx_rbr_size"},
128 
129 	{hxge_param_get_rxdma_info, NULL,
130 		HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
131 		HXGE_RCR_MIN, HXGE_RCR_MAX, HXGE_RCR_DEFAULT, 0,
132 		"rx-rcr-size", "rx_rcr_size"},
133 
134 	{hxge_param_get_generic, hxge_param_rx_intr_time,
135 		HXGE_PARAM_RXDMA_RW,
136 		HXGE_RDC_RCR_TIMEOUT_MIN, HXGE_RDC_RCR_TIMEOUT_MAX,
137 		RXDMA_RCR_TO_DEFAULT, 0, "rxdma-intr-time", "rxdma_intr_time"},
138 
139 	{hxge_param_get_generic, hxge_param_rx_intr_pkts,
140 		HXGE_PARAM_RXDMA_RW,
141 		HXGE_RDC_RCR_THRESHOLD_MIN, HXGE_RDC_RCR_THRESHOLD_MAX,
142 		RXDMA_RCR_PTHRES_DEFAULT, 0,
143 		"rxdma-intr-pkts", "rxdma_intr_pkts"},
144 
145 	/* Hardware VLAN is not used currently, so don't show it */
146 	{hxge_param_get_vlan_ids, hxge_param_set_vlan_ids,
147 		HXGE_PARAM_L2CLASS_CFG | HXGE_PARAM_DONT_SHOW,
148 		VLAN_ID_MIN, VLAN_ID_MAX, 0, 0, "vlan-ids", "vlan_ids"},
149 
150 	/* Hardware VLAN is not used currently, so don't show it */
151 	{hxge_param_get_generic, hxge_param_set_generic,
152 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
153 		VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_IMPLICIT, VLAN_ID_IMPLICIT,
154 		"implicit-vlan-id", "implicit_vlan_id"},
155 
156 	{hxge_param_get_generic, hxge_param_tcam_enable,
157 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
158 		0, 0x1, 0x0, 0, "tcam-enable", "tcam_enable"},
159 
160 	{hxge_param_get_generic, hxge_param_pfc_hash_init,
161 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
162 		0, ALL_FF_32, ALL_FF_32, 0,
163 		"hash-init-value", "hash_init_value"},
164 
165 	{hxge_param_get_generic, hxge_param_set_ether_usr,
166 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
167 		0, ALL_FF_32, 0x0, 0,
168 		"class-cfg-ether-usr1", "class_cfg_ether_usr1"},
169 
170 	{hxge_param_get_generic, hxge_param_set_ether_usr,
171 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
172 		0, ALL_FF_32, 0x0, 0,
173 		"class-cfg-ether-usr2", "class_cfg_ether_usr2"},
174 
175 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
176 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
177 		"class-opt-ipv4-tcp", "class_opt_ipv4_tcp"},
178 
179 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
180 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
181 		"class-opt-ipv4-udp", "class_opt_ipv4_udp"},
182 
183 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
184 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
185 		"class-opt-ipv4-ah", "class_opt_ipv4_ah"},
186 
187 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
188 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
189 		"class-opt-ipv4-sctp", "class_opt_ipv4_sctp"},
190 
191 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
192 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
193 		"class-opt-ipv6-tcp", "class_opt_ipv6_tcp"},
194 
195 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
196 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
197 		"class-opt-ipv6-udp", "class_opt_ipv6_udp"},
198 
199 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
200 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
201 		"class-opt-ipv6-ah", "class_opt_ipv6_ah"},
202 
203 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
204 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
205 		"class-opt-ipv6-sctp", "class_opt_ipv6_sctp"},
206 
207 	{hxge_param_get_debug_flag, hxge_param_set_hxge_debug_flag,
208 		HXGE_PARAM_RW | HXGE_PARAM_DONT_SHOW,
209 		0ULL, ALL_FF_64, 0ULL, 0ULL,
210 		"hxge-debug-flag", "hxge_debug_flag"},
211 
212 	{hxge_param_get_debug_flag, hxge_param_set_hpi_debug_flag,
213 		HXGE_PARAM_RW | HXGE_PARAM_DONT_SHOW,
214 		0ULL, ALL_FF_64, 0ULL, 0ULL,
215 		"hpi-debug-flag", "hpi_debug_flag"},
216 
217 	{hxge_param_dump_ptrs, NULL, HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
218 		0, 0x0fffffff, 0x0fffffff, 0, "dump-ptrs", "dump_ptrs"},
219 
220 	{NULL, NULL, HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
221 		0, 0x0fffffff, 0x0fffffff, 0, "end", "end"},
222 };
223 
224 extern void *hxge_list;
225 
226 /*
227  * Update the NDD array from the soft properties.
228  */
229 void
230 hxge_get_param_soft_properties(p_hxge_t hxgep)
231 {
232 	p_hxge_param_t	param_arr;
233 	uint_t		prop_len;
234 	int		i, j;
235 	uint32_t	param_count;
236 	uint32_t	*int_prop_val;
237 
238 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, " ==> hxge_get_param_soft_properties"));
239 
240 	param_arr = hxgep->param_arr;
241 	param_count = hxgep->param_count;
242 	for (i = 0; i < param_count; i++) {
243 
244 		if ((param_arr[i].type & HXGE_PARAM_READ_PROP) == 0)
245 			continue;
246 
247 		if ((param_arr[i].type & HXGE_PARAM_PROP_STR))
248 			continue;
249 
250 		if ((param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
251 		    (param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
252 
253 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
254 			    hxgep->dip, 0, param_arr[i].fcode_name,
255 			    (int **)&int_prop_val, (uint_t *)&prop_len) ==
256 			    DDI_PROP_SUCCESS) {
257 				uint64_t *cfg_value;
258 				uint64_t prop_count;
259 
260 				if (prop_len > HXGE_PARAM_ARRAY_INIT_SIZE)
261 					prop_len = HXGE_PARAM_ARRAY_INIT_SIZE;
262 #if defined(__i386)
263 				cfg_value =
264 				    (uint64_t *)(int32_t)param_arr[i].value;
265 #else
266 				cfg_value = (uint64_t *)param_arr[i].value;
267 #endif
268 				for (j = 0; j < prop_len; j++) {
269 					cfg_value[j] = int_prop_val[j];
270 				}
271 				prop_count = prop_len;
272 				param_arr[i].type |=
273 				    (prop_count << HXGE_PARAM_ARRAY_CNT_SHIFT);
274 
275 				ddi_prop_free(int_prop_val);
276 			}
277 			continue;
278 		}
279 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0,
280 		    param_arr[i].fcode_name, (int **)&int_prop_val,
281 		    &prop_len) == DDI_PROP_SUCCESS) {
282 			if ((*int_prop_val >= param_arr[i].minimum) &&
283 			    (*int_prop_val <= param_arr[i].maximum))
284 				param_arr[i].value = *int_prop_val;
285 			ddi_prop_free(int_prop_val);
286 		}
287 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0,
288 		    param_arr[i].name, (int **)&int_prop_val, &prop_len) ==
289 		    DDI_PROP_SUCCESS) {
290 			if ((*int_prop_val >= param_arr[i].minimum) &&
291 			    (*int_prop_val <= param_arr[i].maximum))
292 				param_arr[i].value = *int_prop_val;
293 			ddi_prop_free(int_prop_val);
294 		}
295 	}
296 }
297 
298 static int
299 hxge_private_param_register(p_hxge_t hxgep, p_hxge_param_t param_arr)
300 {
301 	int		status = B_TRUE;
302 	int		channel;
303 	char		*prop_name;
304 	char		*end;
305 	uint32_t	name_chars;
306 
307 	HXGE_DEBUG_MSG((hxgep, NDD2_CTL, " hxge_private_param_register %s",
308 	    param_arr->name));
309 
310 	if ((param_arr->type & HXGE_PARAM_PRIV) != HXGE_PARAM_PRIV)
311 		return (B_TRUE);
312 	prop_name = param_arr->name;
313 	if (param_arr->type & HXGE_PARAM_RXDMA) {
314 		if (strncmp("rxdma_intr", prop_name, 10) == 0)
315 			return (B_TRUE);
316 		else
317 			return (B_FALSE);
318 	}
319 
320 	if (param_arr->type & HXGE_PARAM_TXDMA) {
321 		name_chars = strlen("txdma");
322 		if (strncmp("txdma", prop_name, name_chars) == 0) {
323 			prop_name += name_chars;
324 			channel = mi_strtol(prop_name, &end, 10);
325 			/* now check if this rdc is in config */
326 			HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
327 			    " hxge_private_param_register: %d", channel));
328 			return (hxge_check_txdma_port_member(hxgep, channel));
329 		}
330 		return (B_FALSE);
331 	}
332 
333 	status = B_FALSE;
334 	HXGE_DEBUG_MSG((hxgep, NDD2_CTL, "<== hxge_private_param_register"));
335 
336 	return (status);
337 }
338 
339 void
340 hxge_setup_param(p_hxge_t hxgep)
341 {
342 	p_hxge_param_t	param_arr;
343 	int		i;
344 	pfi_t		set_pfi;
345 
346 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_setup_param"));
347 	/*
348 	 * Make sure the param_instance is set to a valid device instance.
349 	 */
350 	if (hxge_param_arr[param_instance].value == 1000)
351 		hxge_param_arr[param_instance].value = hxgep->instance;
352 
353 	param_arr = hxgep->param_arr;
354 	param_arr[param_instance].value = hxgep->instance;
355 
356 	for (i = 0; i < hxgep->param_count; i++) {
357 		if ((param_arr[i].type & HXGE_PARAM_PRIV) &&
358 		    (hxge_private_param_register(hxgep, &param_arr[i]) ==
359 		    B_FALSE)) {
360 			param_arr[i].setf = NULL;
361 			param_arr[i].getf = NULL;
362 		}
363 		if (param_arr[i].type & HXGE_PARAM_CMPLX)
364 			param_arr[i].setf = NULL;
365 
366 		if (param_arr[i].type & HXGE_PARAM_DONT_SHOW) {
367 			param_arr[i].setf = NULL;
368 			param_arr[i].getf = NULL;
369 		}
370 		set_pfi = (pfi_t)param_arr[i].setf;
371 
372 		if ((set_pfi) && (param_arr[i].type & HXGE_PARAM_INIT_ONLY)) {
373 			set_pfi = NULL;
374 		}
375 		if (!hxge_nd_load(&hxgep->param_list, param_arr[i].name,
376 		    (pfi_t)param_arr[i].getf, set_pfi,
377 		    (caddr_t)&param_arr[i])) {
378 			(void) hxge_nd_free(&hxgep->param_list);
379 			break;
380 		}
381 	}
382 
383 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_setup_param"));
384 }
385 
386 /*
387  * Called from the attached function, it allocates memory for
388  * the parameter array and some members.
389  */
390 void
391 hxge_init_param(p_hxge_t hxgep)
392 {
393 	p_hxge_param_t	param_arr;
394 	int		i, alloc_size;
395 	uint64_t	alloc_count;
396 
397 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_init_param"));
398 	/*
399 	 * Make sure the param_instance is set to a valid device instance.
400 	 */
401 	if (hxge_param_arr[param_instance].value == 1000)
402 		hxge_param_arr[param_instance].value = hxgep->instance;
403 
404 	param_arr = hxgep->param_arr;
405 	if (param_arr == NULL) {
406 		param_arr = (p_hxge_param_t)KMEM_ZALLOC(
407 		    sizeof (hxge_param_arr), KM_SLEEP);
408 	}
409 	for (i = 0; i < sizeof (hxge_param_arr) / sizeof (hxge_param_t); i++) {
410 		param_arr[i] = hxge_param_arr[i];
411 		if ((param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
412 		    (param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
413 			alloc_count = HXGE_PARAM_ARRAY_INIT_SIZE;
414 			alloc_size = alloc_count * sizeof (uint64_t);
415 #if defined(__i386)
416 			param_arr[i].value =
417 			    (uint64_t)(uint32_t)KMEM_ZALLOC(alloc_size,
418 			    KM_SLEEP);
419 			param_arr[i].old_value =
420 			    (uint64_t)(uint32_t)KMEM_ZALLOC(alloc_size,
421 			    KM_SLEEP);
422 #else
423 			param_arr[i].value =
424 			    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
425 			param_arr[i].old_value =
426 			    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
427 #endif
428 			param_arr[i].type |=
429 			    (alloc_count << HXGE_PARAM_ARRAY_ALLOC_SHIFT);
430 		}
431 	}
432 
433 	hxgep->param_arr = param_arr;
434 	hxgep->param_count = sizeof (hxge_param_arr) / sizeof (hxge_param_t);
435 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init_param: count %d",
436 	    hxgep->param_count));
437 }
438 
439 /*
440  * Called from the attached functions, it frees memory for the parameter array
441  */
442 void
443 hxge_destroy_param(p_hxge_t hxgep)
444 {
445 	int		i;
446 	uint64_t	free_size, free_count;
447 
448 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_param"));
449 	/*
450 	 * Make sure the param_instance is set to a valid device instance.
451 	 */
452 	if (hxge_param_arr[param_instance].value == hxgep->instance) {
453 		for (i = 0; i <= hxge_param_arr[param_instance].maximum; i++) {
454 			if ((ddi_get_soft_state(hxge_list, i) != NULL) &&
455 			    (i != hxgep->instance))
456 				break;
457 		}
458 		hxge_param_arr[param_instance].value = i;
459 	}
460 	if (hxgep->param_list)
461 		hxge_nd_free(&hxgep->param_list);
462 	for (i = 0; i < hxgep->param_count; i++) {
463 		if ((hxgep->param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
464 		    (hxgep->param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
465 			free_count = ((hxgep->param_arr[i].type &
466 			    HXGE_PARAM_ARRAY_ALLOC_MASK) >>
467 			    HXGE_PARAM_ARRAY_ALLOC_SHIFT);
468 			free_count = HXGE_PARAM_ARRAY_INIT_SIZE;
469 			free_size = sizeof (uint64_t) * free_count;
470 #if defined(__i386)
471 			KMEM_FREE((void *)(uint32_t)
472 			    hxgep->param_arr[i].value, free_size);
473 			KMEM_FREE((void *)(uint32_t)
474 			    hxgep->param_arr[i].old_value, free_size);
475 #else
476 			KMEM_FREE((void *) hxgep->param_arr[i].value,
477 			    free_size);
478 			KMEM_FREE((void *) hxgep->param_arr[i].old_value,
479 			    free_size);
480 #endif
481 		}
482 	}
483 
484 	KMEM_FREE(hxgep->param_arr, sizeof (hxge_param_arr));
485 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_param"));
486 }
487 
488 /*
489  * Extracts the value from the 'hxge' parameter array and prints the
490  * parameter value. cp points to the required parameter.
491  */
492 /* ARGSUSED */
493 int
494 hxge_param_get_generic(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
495 {
496 	p_hxge_param_t pa = (p_hxge_param_t)cp;
497 
498 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " ==> hxge_param_get_generic name %s ",
499 	    pa->name));
500 
501 	if (pa->value > 0xffffffff)
502 		(void) mi_mpprintf(mp, "%x%x", (int)(pa->value >> 32),
503 		    (int)(pa->value & 0xffffffff));
504 	else
505 		(void) mi_mpprintf(mp, "%x", (int)pa->value);
506 
507 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_generic"));
508 	return (0);
509 }
510 
511 /* ARGSUSED */
512 static int
513 hxge_param_get_mac(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
514 {
515 	p_hxge_param_t pa = (p_hxge_param_t)cp;
516 
517 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_mac"));
518 
519 	(void) mi_mpprintf(mp, "%d", (uint32_t)pa->value);
520 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_mac"));
521 	return (0);
522 }
523 
524 /* ARGSUSED */
525 int
526 hxge_param_get_rxdma_info(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
527 {
528 	uint_t			print_len, buf_len;
529 	p_mblk_t		np;
530 	int			rdc;
531 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
532 	p_hxge_hw_pt_cfg_t	p_cfgp;
533 	int			buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_SIZE;
534 
535 	p_rx_rcr_rings_t rx_rcr_rings;
536 	p_rx_rcr_ring_t *rcr_rings;
537 	p_rx_rbr_rings_t rx_rbr_rings;
538 	p_rx_rbr_ring_t *rbr_rings;
539 
540 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_rxdma_info"));
541 
542 	(void) mi_mpprintf(mp, "RXDMA Information\n");
543 
544 	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
545 		/* The following may work even if we cannot get a large buf. */
546 		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
547 		return (0);
548 	}
549 	buf_len = buff_alloc_size;
550 
551 	mp->b_cont = np;
552 
553 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
554 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
555 
556 	rx_rcr_rings = hxgep->rx_rcr_rings;
557 	rcr_rings = rx_rcr_rings->rcr_rings;
558 	rx_rbr_rings = hxgep->rx_rbr_rings;
559 	rbr_rings = rx_rbr_rings->rbr_rings;
560 
561 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
562 	    "Total RDCs\t %d\n", p_cfgp->max_rdcs);
563 	((mblk_t *)np)->b_wptr += print_len;
564 	buf_len -= print_len;
565 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
566 	    "RDC\t HW RDC\t Timeout\t Packets RBR ptr \t"
567 	    "chunks\t RCR ptr\n");
568 	((mblk_t *)np)->b_wptr += print_len;
569 	buf_len -= print_len;
570 	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
571 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
572 		    " %d\t  %d\t $%p\t 0x%x\t $%p\n",
573 		    rdc, hxgep->rdc[rdc], (void *)rbr_rings[rdc],
574 		    rbr_rings[rdc]->num_blocks, (void *)rcr_rings[rdc]);
575 		((mblk_t *)np)->b_wptr += print_len;
576 		buf_len -= print_len;
577 	}
578 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_rxdma_info"));
579 	return (0);
580 }
581 
582 int
583 hxge_mk_mblk_tail_space(p_mblk_t mp, p_mblk_t *nmp, size_t size)
584 {
585 	p_mblk_t tmp;
586 
587 	tmp = mp;
588 	while (tmp->b_cont)
589 		tmp = tmp->b_cont;
590 	if ((tmp->b_wptr + size) >= tmp->b_datap->db_lim) {
591 		tmp->b_cont = allocb(1024, BPRI_HI);
592 		tmp = tmp->b_cont;
593 		if (!tmp)
594 			return (ENOMEM);
595 	}
596 	*nmp = tmp;
597 	return (0);
598 }
599 
600 /*
601  * Sets the ge parameter to the value in the hxge_param_register using
602  * hxge_nd_load().
603  */
604 /* ARGSUSED */
605 int
606 hxge_param_set_generic(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
607 	char *value, caddr_t cp)
608 {
609 	char		*end;
610 	uint32_t	new_value;
611 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
612 
613 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, " ==> hxge_param_set_generic"));
614 	new_value = (uint32_t)mi_strtol(value, &end, 10);
615 	if (end == value || new_value < pa->minimum ||
616 	    new_value > pa->maximum) {
617 		return (EINVAL);
618 	}
619 	pa->value = new_value;
620 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, " <== hxge_param_set_generic"));
621 	return (0);
622 }
623 
624 /* ARGSUSED */
625 int
626 hxge_param_set_mac(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
627 	char *value, caddr_t cp)
628 {
629 	char		*end;
630 	uint32_t	new_value;
631 	int		status = 0;
632 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
633 
634 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_mac"));
635 	new_value = (uint32_t)mi_strtol(value, &end, BASE_DECIMAL);
636 	if (PARAM_OUTOF_RANGE(value, end, new_value, pa)) {
637 		return (EINVAL);
638 	}
639 
640 	if (pa->value != new_value) {
641 		pa->old_value = pa->value;
642 		pa->value = new_value;
643 	}
644 
645 	if (pa->value != pa->old_value) {
646 		RW_ENTER_WRITER(&hxgep->filter_lock);
647 		(void) hxge_rx_vmac_disable(hxgep);
648 		(void) hxge_tx_vmac_disable(hxgep);
649 
650 		/*
651 		 * Apply the new jumbo parameter here.
652 		 * The order of the following two calls is important.
653 		 */
654 		(void) hxge_tx_vmac_enable(hxgep);
655 		(void) hxge_rx_vmac_enable(hxgep);
656 		RW_EXIT(&hxgep->filter_lock);
657 	}
658 
659 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_mac"));
660 	return (status);
661 }
662 
663 /* ARGSUSED */
664 static int
665 hxge_param_rx_intr_pkts(p_hxge_t hxgep, queue_t *q,
666 	mblk_t *mp, char *value, caddr_t cp)
667 {
668 	char		*end;
669 	uint32_t	cfg_value;
670 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
671 
672 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_rx_intr_pkts"));
673 
674 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
675 
676 	if ((cfg_value > HXGE_RDC_RCR_THRESHOLD_MAX) ||
677 	    (cfg_value < HXGE_RDC_RCR_THRESHOLD_MIN)) {
678 		return (EINVAL);
679 	}
680 
681 	if ((pa->value != cfg_value)) {
682 		pa->old_value = pa->value;
683 		pa->value = cfg_value;
684 		hxgep->intr_threshold = pa->value;
685 	}
686 
687 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_rx_intr_pkts"));
688 	return (0);
689 }
690 
691 /* ARGSUSED */
692 static int
693 hxge_param_rx_intr_time(p_hxge_t hxgep, queue_t *q,
694 	mblk_t *mp, char *value, caddr_t cp)
695 {
696 	char		*end;
697 	uint32_t	cfg_value;
698 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
699 
700 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_rx_intr_time"));
701 
702 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_ANY);
703 
704 	if ((cfg_value > HXGE_RDC_RCR_TIMEOUT_MAX) ||
705 	    (cfg_value < HXGE_RDC_RCR_TIMEOUT_MIN)) {
706 		return (EINVAL);
707 	}
708 
709 	if ((pa->value != cfg_value)) {
710 		pa->old_value = pa->value;
711 		pa->value = cfg_value;
712 		hxgep->intr_timeout = pa->value;
713 	}
714 
715 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_rx_intr_time"));
716 	return (0);
717 }
718 
719 /* ARGSUSED */
720 static int
721 hxge_param_set_vlan_ids(p_hxge_t hxgep, queue_t *q, mblk_t *mp, char *value,
722     caddr_t cp)
723 {
724 	char			*end;
725 	uint32_t		status = 0, cfg_value;
726 	p_hxge_param_t		pa = (p_hxge_param_t)cp;
727 	uint32_t		cfg_it = B_FALSE;
728 	uint32_t		*val_ptr, *old_val_ptr;
729 	hxge_param_map_t	*vmap, *old_map;
730 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
731 	uint64_t		cfgd_vlans;
732 	int			i, inc = 0, cfg_position;
733 	hxge_mv_cfg_t		*vlan_tbl;
734 	hpi_handle_t		handle;
735 
736 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_vlan_ids "));
737 
738 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
739 	vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
740 	handle = hxgep->hpi_reg_handle;
741 
742 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
743 
744 	/* now do decoding */
745 	cfgd_vlans = ((pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
746 	    HXGE_PARAM_ARRAY_CNT_SHIFT);
747 
748 	if (cfgd_vlans >= HXGE_PARAM_ARRAY_INIT_SIZE) {
749 		/*
750 		 * for now, we process only upto HXGE_PARAM_ARRAY_INIT_SIZE
751 		 * parameters In the future, we may want to expand
752 		 * the storage array and continue
753 		 */
754 		return (EINVAL);
755 	}
756 
757 	vmap = (hxge_param_map_t *)&cfg_value;
758 	if ((vmap->param_id == 0) || (vmap->param_id > VLAN_ID_MAX)) {
759 		return (EINVAL);
760 	}
761 
762 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " hxge_param_set_vlan_ids id %d",
763 	    vmap->param_id));
764 #if defined(__i386)
765 	val_ptr = (uint32_t *)(uint32_t)pa->value;
766 	old_val_ptr = (uint32_t *)(uint32_t)pa->old_value;
767 #else
768 	val_ptr = (uint32_t *)pa->value;
769 	old_val_ptr = (uint32_t *)pa->old_value;
770 #endif
771 
772 	/* Search to see if this vlan id is already configured */
773 	for (i = 0; i < cfgd_vlans; i++) {
774 		old_map = (hxge_param_map_t *)&val_ptr[i];
775 		if ((old_map->param_id == 0) ||
776 		    (vmap->param_id == old_map->param_id) ||
777 		    (vlan_tbl[vmap->param_id].flag)) {
778 			cfg_position = i;
779 			break;
780 		}
781 	}
782 
783 	if (cfgd_vlans == 0) {
784 		cfg_position = 0;
785 		inc++;
786 	}
787 
788 	if (i == cfgd_vlans) {
789 		cfg_position = i;
790 		inc++;
791 	}
792 
793 	HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
794 	    " set_vlan_ids mapping i %d cfgd_vlans %llx position %d ",
795 	    i, cfgd_vlans, cfg_position));
796 
797 	if (val_ptr[cfg_position] != cfg_value) {
798 		old_val_ptr[cfg_position] = val_ptr[cfg_position];
799 		val_ptr[cfg_position] = cfg_value;
800 		vlan_tbl[vmap->param_id].flag = 1;
801 		cfg_it = B_TRUE;
802 		if (inc) {
803 			cfgd_vlans++;
804 			pa->type &= ~HXGE_PARAM_ARRAY_CNT_MASK;
805 			pa->type |= (cfgd_vlans << HXGE_PARAM_ARRAY_CNT_SHIFT);
806 
807 		}
808 
809 		HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
810 		    " after: param_set_vlan_ids cfg_vlans %llx position %d \n",
811 		    cfgd_vlans, cfg_position));
812 	}
813 
814 	if (cfg_it == B_TRUE) {
815 		status = hpi_pfc_cfg_vlan_table_entry_set(handle,
816 		    (vlan_id_t)vmap->param_id);
817 		if (status != HPI_SUCCESS)
818 			return (EINVAL);
819 	}
820 
821 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_vlan_ids"));
822 
823 	return (0);
824 }
825 
826 
827 /* ARGSUSED */
828 static int
829 hxge_param_get_vlan_ids(p_hxge_t hxgep, queue_t *q, mblk_t *mp, caddr_t cp)
830 {
831 	uint_t			print_len, buf_len;
832 	p_mblk_t		np;
833 	int			i;
834 	uint32_t		*val_ptr;
835 	hxge_param_map_t	*vmap;
836 	p_hxge_param_t		pa = (p_hxge_param_t)cp;
837 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
838 	uint64_t		cfgd_vlans = 0;
839 	int buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_SIZE * 32;
840 
841 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_vlan_ids "));
842 	(void) mi_mpprintf(mp, "VLAN Information\n");
843 
844 	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
845 		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
846 		return (0);
847 	}
848 
849 	buf_len = buff_alloc_size;
850 	mp->b_cont = np;
851 	cfgd_vlans = (pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
852 	    HXGE_PARAM_ARRAY_CNT_SHIFT;
853 
854 	i = (int)cfgd_vlans;
855 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
856 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
857 	    "Configured VLANs %d\n VLAN ID\n", i);
858 	((mblk_t *)np)->b_wptr += print_len;
859 	buf_len -= print_len;
860 
861 #if defined(__i386)
862 	val_ptr = (uint32_t *)(uint32_t)pa->value;
863 #else
864 	val_ptr = (uint32_t *)pa->value;
865 #endif
866 
867 	for (i = 0; i < cfgd_vlans; i++) {
868 		vmap = (hxge_param_map_t *)&val_ptr[i];
869 		if (p_class_cfgp->vlan_tbl[vmap->param_id].flag) {
870 			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
871 			    buf_len, "  %d\n", vmap->param_id);
872 			((mblk_t *)np)->b_wptr += print_len;
873 			buf_len -= print_len;
874 		}
875 	}
876 
877 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_vlan_ids"));
878 
879 	return (0);
880 }
881 
882 /* ARGSUSED */
883 static int
884 hxge_param_tcam_enable(p_hxge_t hxgep, queue_t *q,
885 	mblk_t *mp, char *value, caddr_t cp)
886 {
887 	uint32_t	status = 0, cfg_value;
888 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
889 	uint32_t	cfg_it = B_FALSE;
890 	char		*end;
891 
892 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_tcam_enable"));
893 
894 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
895 	if (pa->value != cfg_value) {
896 		pa->old_value = pa->value;
897 		pa->value = cfg_value;
898 		cfg_it = B_TRUE;
899 	}
900 	if (cfg_it == B_TRUE) {
901 		if (pa->value)
902 			status = hxge_pfc_config_tcam_enable(hxgep);
903 		else
904 			status = hxge_pfc_config_tcam_disable(hxgep);
905 		if (status != HXGE_OK)
906 			return (EINVAL);
907 	}
908 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " <== hxge_param_tcam_enable"));
909 	return (0);
910 }
911 
912 /* ARGSUSED */
913 static int
914 hxge_param_set_ether_usr(p_hxge_t hxgep, queue_t *q,
915 	mblk_t *mp, char *value, caddr_t cp)
916 {
917 	char		*end;
918 	uint32_t	status = 0, cfg_value;
919 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
920 
921 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_ether_usr"));
922 
923 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
924 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
925 		return (EINVAL);
926 	}
927 	if (pa->value != cfg_value) {
928 		pa->old_value = pa->value;
929 		pa->value = cfg_value;
930 	}
931 
932 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_ether_usr"));
933 	return (status);
934 }
935 
936 static int
937 hxge_class_name_2value(p_hxge_t hxgep, char *name)
938 {
939 	int		i;
940 	int		class_instance = param_class_opt_ipv4_tcp;
941 	p_hxge_param_t	param_arr;
942 
943 	param_arr = hxgep->param_arr;
944 	for (i = TCAM_CLASS_TCP_IPV4; i <= TCAM_CLASS_SCTP_IPV6; i++) {
945 		if (strcmp(param_arr[class_instance].name, name) == 0)
946 			return (i);
947 		class_instance++;
948 	}
949 	return (-1);
950 }
951 
952 /* ARGSUSED */
953 static int
954 hxge_param_set_ip_opt(p_hxge_t hxgep, queue_t *q,
955 	mblk_t *mp, char *value, caddr_t cp)
956 {
957 	char		*end;
958 	uint32_t	status, cfg_value;
959 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
960 	tcam_class_t	class;
961 	uint32_t	cfg_it = B_FALSE;
962 
963 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_ip_opt"));
964 
965 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
966 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
967 		return (EINVAL);
968 	}
969 	if (pa->value != cfg_value) {
970 		pa->old_value = pa->value;
971 		pa->value = cfg_value;
972 		cfg_it = B_TRUE;
973 	}
974 	if (cfg_it == B_TRUE) {
975 		/* do the actual hw setup  */
976 		class = hxge_class_name_2value(hxgep, pa->name);
977 		if (class == -1)
978 			return (EINVAL);
979 
980 		status = hxge_pfc_ip_class_config(hxgep, class, pa->value);
981 		if (status != HXGE_OK)
982 			return (EINVAL);
983 	}
984 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_ip_opt"));
985 	return (0);
986 }
987 
988 /* ARGSUSED */
989 static int
990 hxge_param_get_ip_opt(p_hxge_t hxgep, queue_t *q, mblk_t *mp, caddr_t cp)
991 {
992 	uint32_t	status, cfg_value;
993 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
994 	tcam_class_t	class;
995 
996 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_ip_opt"));
997 
998 	/* do the actual hw setup  */
999 	class = hxge_class_name_2value(hxgep, pa->name);
1000 	if (class == -1)
1001 		return (EINVAL);
1002 	cfg_value = 0;
1003 	status = hxge_pfc_ip_class_config_get(hxgep, class, &cfg_value);
1004 	if (status != HXGE_OK)
1005 		return (EINVAL);
1006 	HXGE_DEBUG_MSG((hxgep, NDD_CTL,
1007 	    "hxge_param_get_ip_opt_get %x ", cfg_value));
1008 	pa->value = cfg_value;
1009 
1010 	(void) mi_mpprintf(mp, "%x", cfg_value);
1011 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_ip_opt status "));
1012 	return (0);
1013 }
1014 
1015 /* ARGSUSED */
1016 static int
1017 hxge_param_pfc_hash_init(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
1018 	char *value, caddr_t cp)
1019 {
1020 	char		*end;
1021 	uint32_t	status, cfg_value;
1022 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1023 	uint32_t	cfg_it = B_FALSE;
1024 
1025 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_pfc_hash_init"));
1026 
1027 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
1028 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
1029 		return (EINVAL);
1030 	}
1031 
1032 	HXGE_DEBUG_MSG((hxgep, NDD_CTL,
1033 	    " hxge_param_pfc_hash_init value %x", cfg_value));
1034 	if (pa->value != cfg_value) {
1035 		pa->old_value = pa->value;
1036 		pa->value = cfg_value;
1037 		cfg_it = B_TRUE;
1038 	}
1039 
1040 	if (cfg_it == B_TRUE) {
1041 		status = hxge_pfc_set_hash(hxgep, (uint32_t)pa->value);
1042 		if (status != HXGE_OK)
1043 			return (EINVAL);
1044 	}
1045 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " <== hxge_param_pfc_hash_init"));
1046 	return (0);
1047 }
1048 
1049 /* ARGSUSED */
1050 static int
1051 hxge_param_set_hxge_debug_flag(p_hxge_t hxgep, queue_t *q,
1052 	mblk_t *mp, char *value, caddr_t cp)
1053 {
1054 	char		*end;
1055 	uint32_t	status = 0;
1056 	uint64_t	cfg_value = 0;
1057 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1058 	uint32_t	cfg_it = B_FALSE;
1059 
1060 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_hxge_debug_flag"));
1061 	cfg_value = mi_strtol(value, &end, BASE_HEX);
1062 
1063 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
1064 		HXGE_DEBUG_MSG((hxgep, NDD_CTL,
1065 		    " hxge_param_set_hxge_debug_flag"
1066 		    " outof range %llx", cfg_value));
1067 		return (EINVAL);
1068 	}
1069 	if (pa->value != cfg_value) {
1070 		pa->old_value = pa->value;
1071 		pa->value = cfg_value;
1072 		cfg_it = B_TRUE;
1073 	}
1074 	if (cfg_it == B_TRUE)
1075 		hxgep->hxge_debug_level = pa->value;
1076 
1077 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_hxge_debug_flag"));
1078 	return (status);
1079 }
1080 
1081 /* ARGSUSED */
1082 static int
1083 hxge_param_get_debug_flag(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
1084 {
1085 	int		status = 0;
1086 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1087 
1088 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_debug_flag"));
1089 
1090 	if (pa->value > 0xffffffff)
1091 		(void) mi_mpprintf(mp, "%x%x", (int)(pa->value >> 32),
1092 		    (int)(pa->value & 0xffffffff));
1093 	else
1094 		(void) mi_mpprintf(mp, "%x", (int)pa->value);
1095 
1096 
1097 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_debug_flag"));
1098 	return (status);
1099 }
1100 
1101 /* ARGSUSED */
1102 static int
1103 hxge_param_set_hpi_debug_flag(p_hxge_t hxgep, queue_t *q,
1104 	mblk_t *mp, char *value, caddr_t cp)
1105 {
1106 	char		*end;
1107 	uint32_t	status = 0;
1108 	uint64_t	cfg_value = 0;
1109 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1110 	uint32_t	cfg_it = B_FALSE;
1111 
1112 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_hpi_debug_flag"));
1113 	cfg_value = mi_strtol(value, &end, BASE_HEX);
1114 
1115 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
1116 		HXGE_DEBUG_MSG((hxgep, NDD_CTL, " hxge_param_set_hpi_debug_flag"
1117 		    " outof range %llx", cfg_value));
1118 		return (EINVAL);
1119 	}
1120 	if (pa->value != cfg_value) {
1121 		pa->old_value = pa->value;
1122 		pa->value = cfg_value;
1123 		cfg_it = B_TRUE;
1124 	}
1125 	if (cfg_it == B_TRUE) {
1126 		hpi_debug_level = pa->value;
1127 	}
1128 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_debug_flag"));
1129 	return (status);
1130 }
1131 
1132 typedef struct block_info {
1133 	char *name;
1134 	uint32_t offset;
1135 } block_info_t;
1136 
1137 block_info_t reg_block[] = {
1138 	{"PIO", PIO_BASE_ADDR},
1139 	{"PIO_LDSV", PIO_LDSV_BASE_ADDR},
1140 	{"PIO_LDMASK", PIO_LDMASK_BASE_ADDR},
1141 	{"PFC", PFC_BASE_ADDR},
1142 	{"RDC", RDC_BASE_ADDR},
1143 	{"TDC", TDC_BASE_ADDR},
1144 	{"VMAC", VMAC_BASE_ADDR},
1145 	{"END", ALL_FF_32},
1146 };
1147 
1148 /* ARGSUSED */
1149 static int
1150 hxge_param_dump_ptrs(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
1151 {
1152 	uint_t			print_len, buf_len;
1153 	p_mblk_t		np;
1154 	int			rdc, tdc, block;
1155 	uint64_t		base;
1156 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
1157 	p_hxge_hw_pt_cfg_t	p_cfgp;
1158 	int			buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_8K;
1159 	p_tx_ring_t		*tx_rings;
1160 	p_rx_rcr_rings_t	rx_rcr_rings;
1161 	p_rx_rcr_ring_t		*rcr_rings;
1162 	p_rx_rbr_rings_t	rx_rbr_rings;
1163 	p_rx_rbr_ring_t		*rbr_rings;
1164 
1165 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_param_dump_ptrs"));
1166 
1167 	(void) mi_mpprintf(mp, "ptr information\n");
1168 
1169 	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
1170 		/* The following may work even if we cannot get a large buf. */
1171 		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
1172 		return (0);
1173 	}
1174 	buf_len = buff_alloc_size;
1175 
1176 	mp->b_cont = np;
1177 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1178 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1179 
1180 	rx_rcr_rings = hxgep->rx_rcr_rings;
1181 	rcr_rings = rx_rcr_rings->rcr_rings;
1182 	rx_rbr_rings = hxgep->rx_rbr_rings;
1183 	rbr_rings = rx_rbr_rings->rbr_rings;
1184 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1185 	    "hxgep (hxge_t) $%p\n dev_regs (dev_regs_t) $%p\n",
1186 	    (void *)hxgep, (void *)hxgep->dev_regs);
1187 
1188 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1189 	/* do register pointers */
1190 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1191 	    "reg base (hpi_reg_ptr_t) $%p\t pci reg (hpi_reg_ptr_t) $%p\n",
1192 	    (void *)hxgep->dev_regs->hxge_regp,
1193 	    (void *)hxgep->dev_regs->hxge_pciregp);
1194 
1195 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1196 
1197 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1198 	    "\nBlock \t Offset \n");
1199 
1200 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1201 	block = 0;
1202 #if defined(__i386)
1203 	base = (uint64_t)(uint32_t)hxgep->dev_regs->hxge_regp;
1204 #else
1205 	base = (uint64_t)hxgep->dev_regs->hxge_regp;
1206 #endif
1207 	while (reg_block[block].offset != ALL_FF_32) {
1208 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1209 		    "%9s\t 0x%llx\n", reg_block[block].name,
1210 		    (unsigned long long) (reg_block[block].offset + base));
1211 		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1212 		block++;
1213 	}
1214 
1215 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1216 	    "\nRDC\t rcrp (rx_rcr_ring_t)\t rbrp (rx_rbr_ring_t)\n");
1217 
1218 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1219 
1220 	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
1221 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1222 		    " %d\t  $%p\t\t   $%p\n",
1223 		    rdc, (void *)rcr_rings[rdc], (void *)rbr_rings[rdc]);
1224 		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1225 	}
1226 
1227 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1228 	    "\nTDC\t tdcp (tx_ring_t)\n");
1229 
1230 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1231 	tx_rings = hxgep->tx_rings->rings;
1232 	for (tdc = 0; tdc < p_cfgp->max_tdcs; tdc++) {
1233 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1234 		    " %d\t  $%p\n", tdc, (void *)tx_rings[tdc]);
1235 		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1236 	}
1237 
1238 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len, "\n\n");
1239 
1240 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1241 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_param_dump_ptrs"));
1242 	return (0);
1243 }
1244 
1245 /*
1246  * Load 'name' into the named dispatch table pointed to by 'ndp'.
1247  * 'ndp' should be the address of a char pointer cell.  If the table
1248  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
1249  * is stuffed.  If there is not enough space in the table for a new
1250  * entry, more space is allocated.
1251  */
1252 boolean_t
1253 hxge_nd_load(caddr_t *pparam, char *name,
1254 	pfi_t get_pfi, pfi_t set_pfi, caddr_t data)
1255 {
1256 	ND	*nd;
1257 	NDE	*nde;
1258 
1259 	HXGE_DEBUG_MSG((NULL, NDD2_CTL, " ==> hxge_nd_load: %s", name));
1260 	if (!pparam)
1261 		return (B_FALSE);
1262 	if ((nd = (ND *) * pparam) == NULL) {
1263 		if ((nd = (ND *) KMEM_ZALLOC(sizeof (ND), KM_NOSLEEP)) == NULL)
1264 			return (B_FALSE);
1265 		*pparam = (caddr_t)nd;
1266 	}
1267 	if (nd->nd_tbl) {
1268 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
1269 			if (strcmp(name, nde->nde_name) == 0)
1270 				goto fill_it;
1271 		}
1272 	}
1273 	if (nd->nd_free_count <= 1) {
1274 		if ((nde = (NDE *) KMEM_ZALLOC(nd->nd_size +
1275 		    NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
1276 			return (B_FALSE);
1277 		nd->nd_free_count += NDE_ALLOC_COUNT;
1278 		if (nd->nd_tbl) {
1279 			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
1280 			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
1281 		} else {
1282 			nd->nd_free_count--;
1283 			nde->nde_name = "?";
1284 			nde->nde_get_pfi = hxge_nd_get_names;
1285 			nde->nde_set_pfi = hxge_set_default;
1286 		}
1287 		nde->nde_data = (caddr_t)nd;
1288 		nd->nd_tbl = nde;
1289 		nd->nd_size += NDE_ALLOC_SIZE;
1290 	}
1291 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
1292 		noop;
1293 	nd->nd_free_count--;
1294 fill_it:
1295 	nde->nde_name = name;
1296 	nde->nde_get_pfi = get_pfi;
1297 	nde->nde_set_pfi = set_pfi;
1298 	nde->nde_data = data;
1299 	HXGE_DEBUG_MSG((NULL, NDD2_CTL, " <== hxge_nd_load"));
1300 
1301 	return (B_TRUE);
1302 }
1303 
1304 /*
1305  * Free the table pointed to by 'pparam'
1306  */
1307 void
1308 hxge_nd_free(caddr_t *pparam)
1309 {
1310 	ND *nd;
1311 
1312 	if ((nd = (ND *)*pparam) != NULL) {
1313 		if (nd->nd_tbl)
1314 			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
1315 		KMEM_FREE((char *)nd, sizeof (ND));
1316 		*pparam = nil(caddr_t);
1317 	}
1318 }
1319 
1320 int
1321 hxge_nd_getset(p_hxge_t hxgep, queue_t *q, caddr_t param, p_mblk_t mp)
1322 {
1323 	int		err;
1324 	IOCP		iocp;
1325 	p_mblk_t	mp1, mp2;
1326 	ND		*nd;
1327 	NDE		*nde;
1328 	char		*valp;
1329 
1330 	size_t		avail;
1331 
1332 	if (!param) {
1333 		return (B_FALSE);
1334 	}
1335 	nd = (ND *) param;
1336 	iocp = (IOCP) mp->b_rptr;
1337 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
1338 		mp->b_datap->db_type = M_IOCACK;
1339 		iocp->ioc_count = 0;
1340 		iocp->ioc_error = EINVAL;
1341 		return (B_FALSE);
1342 	}
1343 	/*
1344 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
1345 	 * However, existing code sends in some big buffers.
1346 	 */
1347 	avail = iocp->ioc_count;
1348 	if (mp1->b_cont) {
1349 		freemsg(mp1->b_cont);
1350 		mp1->b_cont = NULL;
1351 	}
1352 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
1353 	for (valp = (char *)mp1->b_rptr; *valp != '\0'; valp++) {
1354 		if (*valp == '-')
1355 			*valp = '_';
1356 	}
1357 
1358 	valp = (char *)mp1->b_rptr;
1359 
1360 	for (nde = nd->nd_tbl; /* */; nde++) {
1361 		if (!nde->nde_name)
1362 			return (B_FALSE);
1363 		if (strcmp(nde->nde_name, valp) == 0)
1364 			break;
1365 	}
1366 	err = EINVAL;
1367 	while (*valp++)
1368 		noop;
1369 	if (!*valp || valp >= (char *)mp1->b_wptr)
1370 		valp = nilp(char);
1371 	switch (iocp->ioc_cmd) {
1372 	case ND_GET:
1373 		/*
1374 		 * (temporary) hack: "*valp" is size of user buffer for
1375 		 * copyout. If result of action routine is too big, free excess
1376 		 * and return ioc_rval as buffer size needed. Return as many
1377 		 * mblocks as will fit, free the rest.  For backward
1378 		 * compatibility, assume size of original ioctl buffer if
1379 		 * "*valp" bad or not given.
1380 		 */
1381 		if (valp)
1382 			avail = mi_strtol(valp, (char **)0, 10);
1383 		/*
1384 		 * We overwrite the name/value with the reply data
1385 		 */
1386 		mp2 = mp1;
1387 		while (mp2) {
1388 			mp2->b_wptr = mp2->b_rptr;
1389 			mp2 = mp2->b_cont;
1390 		}
1391 
1392 		err = (*nde->nde_get_pfi) (hxgep, q, mp1, nde->nde_data);
1393 
1394 		if (!err) {
1395 			size_t size_out = 0;
1396 			ssize_t excess;
1397 
1398 			iocp->ioc_rval = 0;
1399 
1400 			/* Tack on the null */
1401 			err = hxge_mk_mblk_tail_space(mp1, &mp2, 1);
1402 			if (!err) {
1403 				*mp2->b_wptr++ = '\0';
1404 				size_out = msgdsize(mp1);
1405 				excess = size_out - avail;
1406 				if (excess > 0) {
1407 					iocp->ioc_rval = (int)size_out;
1408 					size_out -= excess;
1409 					(void) adjmsg(mp1, -(excess + 1));
1410 					err = hxge_mk_mblk_tail_space(
1411 					    mp1, &mp2, 1);
1412 					if (!err)
1413 						*mp2->b_wptr++ = '\0';
1414 					else
1415 						size_out = 0;
1416 				}
1417 			} else
1418 				size_out = 0;
1419 			iocp->ioc_count = size_out;
1420 		}
1421 		break;
1422 
1423 	case ND_SET:
1424 		if (valp) {
1425 			if (nde->nde_set_pfi) {
1426 				err = (*nde->nde_set_pfi) (hxgep, q, mp1, valp,
1427 				    nde->nde_data);
1428 				iocp->ioc_count = 0;
1429 				freemsg(mp1);
1430 				mp->b_cont = NULL;
1431 			}
1432 		}
1433 		break;
1434 
1435 	default:
1436 		break;
1437 	}
1438 	iocp->ioc_error = err;
1439 	mp->b_datap->db_type = M_IOCACK;
1440 	return (B_TRUE);
1441 }
1442 
1443 /* ARGSUSED */
1444 int
1445 hxge_nd_get_names(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t param)
1446 {
1447 	ND		*nd;
1448 	NDE		*nde;
1449 	char		*rwtag;
1450 	boolean_t	get_ok, set_ok;
1451 	size_t		param_len;
1452 	int		status = 0;
1453 
1454 	nd = (ND *) param;
1455 	if (!nd)
1456 		return (ENOENT);
1457 
1458 	for (nde = nd->nd_tbl; nde->nde_name; nde++) {
1459 		get_ok = (nde->nde_get_pfi != hxge_get_default) &&
1460 		    (nde->nde_get_pfi != NULL);
1461 		set_ok = (nde->nde_set_pfi != hxge_set_default) &&
1462 		    (nde->nde_set_pfi != NULL);
1463 		if (get_ok) {
1464 			if (set_ok)
1465 				rwtag = "read and write";
1466 			else
1467 				rwtag = "read only";
1468 		} else if (set_ok)
1469 			rwtag = "write only";
1470 		else {
1471 			continue;
1472 		}
1473 		param_len = strlen(rwtag);
1474 		param_len += strlen(nde->nde_name);
1475 		param_len += 4;
1476 
1477 		(void) mi_mpprintf(mp, "%s (%s)", nde->nde_name, rwtag);
1478 	}
1479 	return (status);
1480 }
1481 
1482 /* ARGSUSED */
1483 int
1484 hxge_get_default(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t data)
1485 {
1486 	return (EACCES);
1487 }
1488 
1489 /* ARGSUSED */
1490 int
1491 hxge_set_default(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, char *value,
1492 	caddr_t data)
1493 {
1494 	return (EACCES);
1495 }
1496 
1497 void
1498 hxge_param_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1499 {
1500 	int cmd;
1501 	int status = B_FALSE;
1502 
1503 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_param_ioctl"));
1504 	cmd = iocp->ioc_cmd;
1505 	switch (cmd) {
1506 	default:
1507 		HXGE_DEBUG_MSG((hxgep, IOC_CTL,
1508 		    "hxge_param_ioctl: bad cmd 0x%0x", cmd));
1509 		break;
1510 
1511 	case ND_GET:
1512 	case ND_SET:
1513 		HXGE_DEBUG_MSG((hxgep, IOC_CTL,
1514 		    "hxge_param_ioctl: cmd 0x%0x", cmd));
1515 		if (!hxge_nd_getset(hxgep, wq, hxgep->param_list, mp)) {
1516 			HXGE_DEBUG_MSG((hxgep, IOC_CTL,
1517 			    "false ret from hxge_nd_getset"));
1518 			break;
1519 		}
1520 		status = B_TRUE;
1521 		break;
1522 	}
1523 
1524 	if (status) {
1525 		qreply(wq, mp);
1526 	} else {
1527 		miocnak(wq, mp, 0, EINVAL);
1528 	}
1529 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_param_ioctl"));
1530 }
1531