xref: /titanic_51/usr/src/uts/common/io/hxge/hxge_ndd.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <hxge_impl.h>
27 #include <inet/common.h>
28 #include <inet/mi.h>
29 #include <inet/nd.h>
30 
31 extern uint64_t hpi_debug_level;
32 
33 #define	HXGE_PARAM_MAC_RW \
34 	HXGE_PARAM_RW | HXGE_PARAM_MAC | \
35 	HXGE_PARAM_NDD_WR_OK | HXGE_PARAM_READ_PROP
36 
37 #define	HXGE_PARAM_RXDMA_RW	HXGE_PARAM_RWP | HXGE_PARAM_RXDMA | \
38 	HXGE_PARAM_NDD_WR_OK | HXGE_PARAM_READ_PROP
39 
40 #define	HXGE_PARAM_L2CLASS_CFG	\
41 	HXGE_PARAM_RW | HXGE_PARAM_PROP_ARR32 | \
42 	HXGE_PARAM_READ_PROP | HXGE_PARAM_NDD_WR_OK
43 
44 #define	HXGE_PARAM_CLASS_RWS \
45 	HXGE_PARAM_RWS | HXGE_PARAM_READ_PROP
46 
47 #define	HXGE_PARAM_ARRAY_INIT_SIZE	0x20ULL
48 
49 #define	BASE_ANY	0
50 #define	BASE_BINARY	2
51 #define	BASE_HEX	16
52 #define	BASE_DECIMAL	10
53 #define	ALL_FF_64	0xFFFFFFFFFFFFFFFFULL
54 #define	ALL_FF_32	0xFFFFFFFFUL
55 
56 #define	HXGE_NDD_INFODUMP_BUFF_SIZE	2048	/* is 2k enough? */
57 #define	HXGE_NDD_INFODUMP_BUFF_8K	8192
58 #define	HXGE_NDD_INFODUMP_BUFF_16K	0x2000
59 #define	HXGE_NDD_INFODUMP_BUFF_64K	0x8000
60 
61 #define	PARAM_OUTOF_RANGE(vptr, eptr, rval, pa)	\
62 	((vptr == eptr) || (rval < pa->minimum) || (rval > pa->maximum))
63 
64 #define	ADVANCE_PRINT_BUFFER(pmp, plen, rlen) { \
65 	((mblk_t *)pmp)->b_wptr += plen; \
66 	rlen -= plen; \
67 }
68 
69 int hxge_param_rx_intr_pkts(p_hxge_t hxgep, queue_t *,
70 	mblk_t *, char *, caddr_t);
71 int hxge_param_rx_intr_time(p_hxge_t hxgep, queue_t *,
72 	mblk_t *, char *, caddr_t);
73 static int hxge_param_set_mac(p_hxge_t, queue_t *,
74 	mblk_t *, char *, caddr_t);
75 static int hxge_param_set_ether_usr(p_hxge_t hxgep, queue_t *, mblk_t *,
76 	char *, caddr_t);
77 int hxge_param_set_ip_opt(p_hxge_t hxgep,
78 	queue_t *, mblk_t *, char *, caddr_t);
79 static int hxge_param_pfc_hash_init(p_hxge_t hxgep,
80 	queue_t *, mblk_t *, char *, caddr_t);
81 static int hxge_param_tcam_enable(p_hxge_t hxgep, queue_t *,
82 	mblk_t *, char *, caddr_t);
83 static int hxge_param_get_rxdma_info(p_hxge_t hxgep, queue_t *q,
84 	p_mblk_t mp, caddr_t cp);
85 static int hxge_param_set_vlan_ids(p_hxge_t hxgep, queue_t *q,
86 	mblk_t *mp, char *value, caddr_t cp);
87 static int hxge_param_get_vlan_ids(p_hxge_t hxgep, queue_t *q,
88 	p_mblk_t mp, caddr_t cp);
89 int hxge_param_get_ip_opt(p_hxge_t hxgep,
90 	queue_t *, mblk_t *, caddr_t);
91 static int hxge_param_get_mac(p_hxge_t hxgep, queue_t *q, p_mblk_t mp,
92 	caddr_t cp);
93 static int hxge_param_get_debug_flag(p_hxge_t hxgep, queue_t *q,
94 	p_mblk_t mp, caddr_t cp);
95 static int hxge_param_set_hxge_debug_flag(p_hxge_t hxgep,
96 	queue_t *, mblk_t *, char *, caddr_t);
97 static int hxge_param_set_hpi_debug_flag(p_hxge_t hxgep,
98 	queue_t *, mblk_t *, char *, caddr_t);
99 static int hxge_param_dump_ptrs(p_hxge_t hxgep, queue_t *q,
100 	p_mblk_t mp, caddr_t cp);
101 
102 /*
103  * Global array of Hydra changable parameters.
104  * This array is initialized to correspond to the default
105  * Hydra configuration. This array would be copied
106  * into the parameter structure and modifed per
107  * fcode and hxge.conf configuration. Later, the parameters are
108  * exported to ndd to display and run-time configuration (at least
109  * some of them).
110  */
111 
112 static hxge_param_t hxge_param_arr[] = {
113 	/* min	max	value	old	hw-name 	conf-name	*/
114 	{hxge_param_get_generic, NULL, HXGE_PARAM_READ,
115 		0, 999, 1000, 0, "instance", "instance"},
116 
117 	/* MTU cannot be propagated to the stack from here, so don't show it */
118 	{hxge_param_get_mac, hxge_param_set_mac,
119 		HXGE_PARAM_MAC_RW | HXGE_PARAM_DONT_SHOW,
120 		0, 1, 0, 0, "accept-jumbo", "accept_jumbo"},
121 
122 	{hxge_param_get_rxdma_info, NULL,
123 		HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
124 		HXGE_RBR_RBB_MIN, HXGE_RBR_RBB_MAX, HXGE_RBR_RBB_DEFAULT, 0,
125 		"rx-rbr-size", "rx_rbr_size"},
126 
127 	{hxge_param_get_rxdma_info, NULL,
128 		HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
129 		HXGE_RCR_MIN, HXGE_RCR_MAX, HXGE_RCR_DEFAULT, 0,
130 		"rx-rcr-size", "rx_rcr_size"},
131 
132 	{hxge_param_get_generic, hxge_param_rx_intr_time,
133 		HXGE_PARAM_RXDMA_RW,
134 		HXGE_RDC_RCR_TIMEOUT_MIN, HXGE_RDC_RCR_TIMEOUT_MAX,
135 		RXDMA_RCR_TO_DEFAULT, 0, "rxdma-intr-time", "rxdma_intr_time"},
136 
137 	{hxge_param_get_generic, hxge_param_rx_intr_pkts,
138 		HXGE_PARAM_RXDMA_RW,
139 		HXGE_RDC_RCR_THRESHOLD_MIN, HXGE_RDC_RCR_THRESHOLD_MAX,
140 		RXDMA_RCR_PTHRES_DEFAULT, 0,
141 		"rxdma-intr-pkts", "rxdma_intr_pkts"},
142 
143 	/* Hardware VLAN is not used currently, so don't show it */
144 	{hxge_param_get_vlan_ids, hxge_param_set_vlan_ids,
145 		HXGE_PARAM_L2CLASS_CFG | HXGE_PARAM_DONT_SHOW,
146 		VLAN_ID_MIN, VLAN_ID_MAX, 0, 0, "vlan-ids", "vlan_ids"},
147 
148 	/* Hardware VLAN is not used currently, so don't show it */
149 	{hxge_param_get_generic, hxge_param_set_generic,
150 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
151 		VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_IMPLICIT, VLAN_ID_IMPLICIT,
152 		"implicit-vlan-id", "implicit_vlan_id"},
153 
154 	{hxge_param_get_generic, hxge_param_tcam_enable,
155 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
156 		0, 0x1, 0x0, 0, "tcam-enable", "tcam_enable"},
157 
158 	{hxge_param_get_generic, hxge_param_pfc_hash_init,
159 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
160 		0, ALL_FF_32, ALL_FF_32, 0,
161 		"hash-init-value", "hash_init_value"},
162 
163 	{hxge_param_get_generic, hxge_param_set_ether_usr,
164 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
165 		0, ALL_FF_32, 0x0, 0,
166 		"class-cfg-ether-usr1", "class_cfg_ether_usr1"},
167 
168 	{hxge_param_get_generic, hxge_param_set_ether_usr,
169 		HXGE_PARAM_CLASS_RWS | HXGE_PARAM_DONT_SHOW,
170 		0, ALL_FF_32, 0x0, 0,
171 		"class-cfg-ether-usr2", "class_cfg_ether_usr2"},
172 
173 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
174 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
175 		"class-opt-ipv4-tcp", "class_opt_ipv4_tcp"},
176 
177 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
178 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
179 		"class-opt-ipv4-udp", "class_opt_ipv4_udp"},
180 
181 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
182 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
183 		"class-opt-ipv4-ah", "class_opt_ipv4_ah"},
184 
185 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
186 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
187 		"class-opt-ipv4-sctp", "class_opt_ipv4_sctp"},
188 
189 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
190 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
191 		"class-opt-ipv6-tcp", "class_opt_ipv6_tcp"},
192 
193 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
194 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
195 		"class-opt-ipv6-udp", "class_opt_ipv6_udp"},
196 
197 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
198 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
199 		"class-opt-ipv6-ah", "class_opt_ipv6_ah"},
200 
201 	{hxge_param_get_ip_opt, hxge_param_set_ip_opt, HXGE_PARAM_CLASS_RWS,
202 		0, ALL_FF_32, HXGE_CLASS_TCAM_LOOKUP, 0,
203 		"class-opt-ipv6-sctp", "class_opt_ipv6_sctp"},
204 
205 	{hxge_param_get_debug_flag, hxge_param_set_hxge_debug_flag,
206 		HXGE_PARAM_RW | HXGE_PARAM_DONT_SHOW,
207 		0ULL, ALL_FF_64, 0ULL, 0ULL,
208 		"hxge-debug-flag", "hxge_debug_flag"},
209 
210 	{hxge_param_get_debug_flag, hxge_param_set_hpi_debug_flag,
211 		HXGE_PARAM_RW | HXGE_PARAM_DONT_SHOW,
212 		0ULL, ALL_FF_64, 0ULL, 0ULL,
213 		"hpi-debug-flag", "hpi_debug_flag"},
214 
215 	{hxge_param_dump_ptrs, NULL, HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
216 		0, 0x0fffffff, 0x0fffffff, 0, "dump-ptrs", "dump_ptrs"},
217 
218 	{NULL, NULL, HXGE_PARAM_READ | HXGE_PARAM_DONT_SHOW,
219 		0, 0x0fffffff, 0x0fffffff, 0, "end", "end"},
220 };
221 
222 extern void *hxge_list;
223 
224 /*
225  * Update the NDD array from the soft properties.
226  */
227 void
228 hxge_get_param_soft_properties(p_hxge_t hxgep)
229 {
230 	p_hxge_param_t	param_arr;
231 	uint_t		prop_len;
232 	int		i, j;
233 	uint32_t	param_count;
234 	uint32_t	*int_prop_val;
235 
236 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, " ==> hxge_get_param_soft_properties"));
237 
238 	param_arr = hxgep->param_arr;
239 	param_count = hxgep->param_count;
240 	for (i = 0; i < param_count; i++) {
241 
242 		if ((param_arr[i].type & HXGE_PARAM_READ_PROP) == 0)
243 			continue;
244 
245 		if ((param_arr[i].type & HXGE_PARAM_PROP_STR))
246 			continue;
247 
248 		if ((param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
249 		    (param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
250 
251 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
252 			    hxgep->dip, 0, param_arr[i].fcode_name,
253 			    (int **)&int_prop_val, (uint_t *)&prop_len) ==
254 			    DDI_PROP_SUCCESS) {
255 				uint64_t *cfg_value;
256 				uint64_t prop_count;
257 
258 				if (prop_len > HXGE_PARAM_ARRAY_INIT_SIZE)
259 					prop_len = HXGE_PARAM_ARRAY_INIT_SIZE;
260 #if defined(__i386)
261 				cfg_value =
262 				    (uint64_t *)(int32_t)param_arr[i].value;
263 #else
264 				cfg_value = (uint64_t *)param_arr[i].value;
265 #endif
266 				for (j = 0; j < prop_len; j++) {
267 					cfg_value[j] = int_prop_val[j];
268 				}
269 				prop_count = prop_len;
270 				param_arr[i].type |=
271 				    (prop_count << HXGE_PARAM_ARRAY_CNT_SHIFT);
272 
273 				ddi_prop_free(int_prop_val);
274 			}
275 			continue;
276 		}
277 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0,
278 		    param_arr[i].fcode_name, (int **)&int_prop_val,
279 		    &prop_len) == DDI_PROP_SUCCESS) {
280 			if ((*int_prop_val >= param_arr[i].minimum) &&
281 			    (*int_prop_val <= param_arr[i].maximum))
282 				param_arr[i].value = *int_prop_val;
283 			ddi_prop_free(int_prop_val);
284 		}
285 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0,
286 		    param_arr[i].name, (int **)&int_prop_val, &prop_len) ==
287 		    DDI_PROP_SUCCESS) {
288 			if ((*int_prop_val >= param_arr[i].minimum) &&
289 			    (*int_prop_val <= param_arr[i].maximum))
290 				param_arr[i].value = *int_prop_val;
291 			ddi_prop_free(int_prop_val);
292 		}
293 	}
294 }
295 
296 static int
297 hxge_private_param_register(p_hxge_t hxgep, p_hxge_param_t param_arr)
298 {
299 	int		status = B_TRUE;
300 	int		channel;
301 	char		*prop_name;
302 	char		*end;
303 	uint32_t	name_chars;
304 
305 	HXGE_DEBUG_MSG((hxgep, NDD2_CTL, " hxge_private_param_register %s",
306 	    param_arr->name));
307 
308 	if ((param_arr->type & HXGE_PARAM_PRIV) != HXGE_PARAM_PRIV)
309 		return (B_TRUE);
310 	prop_name = param_arr->name;
311 	if (param_arr->type & HXGE_PARAM_RXDMA) {
312 		if (strncmp("rxdma_intr", prop_name, 10) == 0)
313 			return (B_TRUE);
314 		else
315 			return (B_FALSE);
316 	}
317 
318 	if (param_arr->type & HXGE_PARAM_TXDMA) {
319 		name_chars = strlen("txdma");
320 		if (strncmp("txdma", prop_name, name_chars) == 0) {
321 			prop_name += name_chars;
322 			channel = mi_strtol(prop_name, &end, 10);
323 			/* now check if this rdc is in config */
324 			HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
325 			    " hxge_private_param_register: %d", channel));
326 			return (hxge_check_txdma_port_member(hxgep, channel));
327 		}
328 		return (B_FALSE);
329 	}
330 
331 	status = B_FALSE;
332 	HXGE_DEBUG_MSG((hxgep, NDD2_CTL, "<== hxge_private_param_register"));
333 
334 	return (status);
335 }
336 
337 void
338 hxge_setup_param(p_hxge_t hxgep)
339 {
340 	p_hxge_param_t	param_arr;
341 	int		i;
342 	pfi_t		set_pfi;
343 
344 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_setup_param"));
345 	/*
346 	 * Make sure the param_instance is set to a valid device instance.
347 	 */
348 	if (hxge_param_arr[param_instance].value == 1000)
349 		hxge_param_arr[param_instance].value = hxgep->instance;
350 
351 	param_arr = hxgep->param_arr;
352 	param_arr[param_instance].value = hxgep->instance;
353 
354 	for (i = 0; i < hxgep->param_count; i++) {
355 		if ((param_arr[i].type & HXGE_PARAM_PRIV) &&
356 		    (hxge_private_param_register(hxgep, &param_arr[i]) ==
357 		    B_FALSE)) {
358 			param_arr[i].setf = NULL;
359 			param_arr[i].getf = NULL;
360 		}
361 		if (param_arr[i].type & HXGE_PARAM_CMPLX)
362 			param_arr[i].setf = NULL;
363 
364 		if (param_arr[i].type & HXGE_PARAM_DONT_SHOW) {
365 			param_arr[i].setf = NULL;
366 			param_arr[i].getf = NULL;
367 		}
368 		set_pfi = (pfi_t)param_arr[i].setf;
369 
370 		if ((set_pfi) && (param_arr[i].type & HXGE_PARAM_INIT_ONLY)) {
371 			set_pfi = NULL;
372 		}
373 		if (!hxge_nd_load(&hxgep->param_list, param_arr[i].name,
374 		    (pfi_t)param_arr[i].getf, set_pfi,
375 		    (caddr_t)&param_arr[i])) {
376 			(void) hxge_nd_free(&hxgep->param_list);
377 			break;
378 		}
379 	}
380 
381 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_setup_param"));
382 }
383 
384 /*
385  * Called from the attached function, it allocates memory for
386  * the parameter array and some members.
387  */
388 void
389 hxge_init_param(p_hxge_t hxgep)
390 {
391 	p_hxge_param_t	param_arr;
392 	int		i, alloc_size;
393 	uint64_t	alloc_count;
394 
395 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_init_param"));
396 	/*
397 	 * Make sure the param_instance is set to a valid device instance.
398 	 */
399 	if (hxge_param_arr[param_instance].value == 1000)
400 		hxge_param_arr[param_instance].value = hxgep->instance;
401 
402 	param_arr = hxgep->param_arr;
403 	if (param_arr == NULL) {
404 		param_arr = (p_hxge_param_t)KMEM_ZALLOC(
405 		    sizeof (hxge_param_arr), KM_SLEEP);
406 	}
407 	for (i = 0; i < sizeof (hxge_param_arr) / sizeof (hxge_param_t); i++) {
408 		param_arr[i] = hxge_param_arr[i];
409 		if ((param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
410 		    (param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
411 			alloc_count = HXGE_PARAM_ARRAY_INIT_SIZE;
412 			alloc_size = alloc_count * sizeof (uint64_t);
413 #if defined(__i386)
414 			param_arr[i].value =
415 			    (uint64_t)(uint32_t)KMEM_ZALLOC(alloc_size,
416 			    KM_SLEEP);
417 			param_arr[i].old_value =
418 			    (uint64_t)(uint32_t)KMEM_ZALLOC(alloc_size,
419 			    KM_SLEEP);
420 #else
421 			param_arr[i].value =
422 			    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
423 			param_arr[i].old_value =
424 			    (uint64_t)KMEM_ZALLOC(alloc_size, KM_SLEEP);
425 #endif
426 			param_arr[i].type |=
427 			    (alloc_count << HXGE_PARAM_ARRAY_ALLOC_SHIFT);
428 		}
429 	}
430 
431 	hxgep->param_arr = param_arr;
432 	hxgep->param_count = sizeof (hxge_param_arr) / sizeof (hxge_param_t);
433 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init_param: count %d",
434 	    hxgep->param_count));
435 }
436 
437 /*
438  * Called from the attached functions, it frees memory for the parameter array
439  */
440 void
441 hxge_destroy_param(p_hxge_t hxgep)
442 {
443 	int		i;
444 	uint64_t	free_size, free_count;
445 
446 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_param"));
447 	/*
448 	 * Make sure the param_instance is set to a valid device instance.
449 	 */
450 	if (hxge_param_arr[param_instance].value == hxgep->instance) {
451 		for (i = 0; i <= hxge_param_arr[param_instance].maximum; i++) {
452 			if ((ddi_get_soft_state(hxge_list, i) != NULL) &&
453 			    (i != hxgep->instance))
454 				break;
455 		}
456 		hxge_param_arr[param_instance].value = i;
457 	}
458 	if (hxgep->param_list)
459 		hxge_nd_free(&hxgep->param_list);
460 	for (i = 0; i < hxgep->param_count; i++) {
461 		if ((hxgep->param_arr[i].type & HXGE_PARAM_PROP_ARR32) ||
462 		    (hxgep->param_arr[i].type & HXGE_PARAM_PROP_ARR64)) {
463 			free_count = ((hxgep->param_arr[i].type &
464 			    HXGE_PARAM_ARRAY_ALLOC_MASK) >>
465 			    HXGE_PARAM_ARRAY_ALLOC_SHIFT);
466 			free_count = HXGE_PARAM_ARRAY_INIT_SIZE;
467 			free_size = sizeof (uint64_t) * free_count;
468 #if defined(__i386)
469 			KMEM_FREE((void *)(uint32_t)
470 			    hxgep->param_arr[i].value, free_size);
471 			KMEM_FREE((void *)(uint32_t)
472 			    hxgep->param_arr[i].old_value, free_size);
473 #else
474 			KMEM_FREE((void *) hxgep->param_arr[i].value,
475 			    free_size);
476 			KMEM_FREE((void *) hxgep->param_arr[i].old_value,
477 			    free_size);
478 #endif
479 		}
480 	}
481 
482 	KMEM_FREE(hxgep->param_arr, sizeof (hxge_param_arr));
483 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_param"));
484 }
485 
486 /*
487  * Extracts the value from the 'hxge' parameter array and prints the
488  * parameter value. cp points to the required parameter.
489  */
490 /* ARGSUSED */
491 int
492 hxge_param_get_generic(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
493 {
494 	p_hxge_param_t pa = (p_hxge_param_t)cp;
495 
496 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " ==> hxge_param_get_generic name %s ",
497 	    pa->name));
498 
499 	if (pa->value > 0xffffffff)
500 		(void) mi_mpprintf(mp, "%x%x", (int)(pa->value >> 32),
501 		    (int)(pa->value & 0xffffffff));
502 	else
503 		(void) mi_mpprintf(mp, "%x", (int)pa->value);
504 
505 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_generic"));
506 	return (0);
507 }
508 
509 /* ARGSUSED */
510 static int
511 hxge_param_get_mac(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
512 {
513 	p_hxge_param_t pa = (p_hxge_param_t)cp;
514 
515 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_mac"));
516 
517 	(void) mi_mpprintf(mp, "%d", (uint32_t)pa->value);
518 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_mac"));
519 	return (0);
520 }
521 
522 /* ARGSUSED */
523 int
524 hxge_param_get_rxdma_info(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
525 {
526 	uint_t			print_len, buf_len;
527 	p_mblk_t		np;
528 	int			rdc;
529 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
530 	p_hxge_hw_pt_cfg_t	p_cfgp;
531 	int			buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_SIZE;
532 
533 	p_rx_rcr_rings_t rx_rcr_rings;
534 	p_rx_rcr_ring_t *rcr_rings;
535 	p_rx_rbr_rings_t rx_rbr_rings;
536 	p_rx_rbr_ring_t *rbr_rings;
537 
538 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_rxdma_info"));
539 
540 	(void) mi_mpprintf(mp, "RXDMA Information\n");
541 
542 	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
543 		/* The following may work even if we cannot get a large buf. */
544 		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
545 		return (0);
546 	}
547 	buf_len = buff_alloc_size;
548 
549 	mp->b_cont = np;
550 
551 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
552 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
553 
554 	rx_rcr_rings = hxgep->rx_rcr_rings;
555 	rcr_rings = rx_rcr_rings->rcr_rings;
556 	rx_rbr_rings = hxgep->rx_rbr_rings;
557 	rbr_rings = rx_rbr_rings->rbr_rings;
558 
559 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
560 	    "Total RDCs\t %d\n", p_cfgp->max_rdcs);
561 	((mblk_t *)np)->b_wptr += print_len;
562 	buf_len -= print_len;
563 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
564 	    "RDC\t HW RDC\t Timeout\t Packets RBR ptr \t"
565 	    "chunks\t RCR ptr\n");
566 	((mblk_t *)np)->b_wptr += print_len;
567 	buf_len -= print_len;
568 	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
569 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
570 		    " %d\t  %d\t $%p\t 0x%x\t $%p\n",
571 		    rdc, hxgep->rdc[rdc], (void *)rbr_rings[rdc],
572 		    rbr_rings[rdc]->num_blocks, (void *)rcr_rings[rdc]);
573 		((mblk_t *)np)->b_wptr += print_len;
574 		buf_len -= print_len;
575 	}
576 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_rxdma_info"));
577 	return (0);
578 }
579 
580 int
581 hxge_mk_mblk_tail_space(p_mblk_t mp, p_mblk_t *nmp, size_t size)
582 {
583 	p_mblk_t tmp;
584 
585 	tmp = mp;
586 	while (tmp->b_cont)
587 		tmp = tmp->b_cont;
588 	if ((tmp->b_wptr + size) >= tmp->b_datap->db_lim) {
589 		tmp->b_cont = allocb(1024, BPRI_HI);
590 		tmp = tmp->b_cont;
591 		if (!tmp)
592 			return (ENOMEM);
593 	}
594 	*nmp = tmp;
595 	return (0);
596 }
597 
598 /*
599  * Sets the ge parameter to the value in the hxge_param_register using
600  * hxge_nd_load().
601  */
602 /* ARGSUSED */
603 int
604 hxge_param_set_generic(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
605 	char *value, caddr_t cp)
606 {
607 	char		*end;
608 	uint32_t	new_value;
609 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
610 
611 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, " ==> hxge_param_set_generic"));
612 	new_value = (uint32_t)mi_strtol(value, &end, 10);
613 	if (end == value || new_value < pa->minimum ||
614 	    new_value > pa->maximum) {
615 		return (EINVAL);
616 	}
617 	pa->value = new_value;
618 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, " <== hxge_param_set_generic"));
619 	return (0);
620 }
621 
622 /* ARGSUSED */
623 int
624 hxge_param_set_mac(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
625 	char *value, caddr_t cp)
626 {
627 	char		*end;
628 	uint32_t	new_value;
629 	int		status = 0;
630 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
631 
632 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_mac"));
633 	new_value = (uint32_t)mi_strtol(value, &end, BASE_DECIMAL);
634 	if (PARAM_OUTOF_RANGE(value, end, new_value, pa)) {
635 		return (EINVAL);
636 	}
637 
638 	if (pa->value != new_value) {
639 		pa->old_value = pa->value;
640 		pa->value = new_value;
641 	}
642 
643 	if (pa->value != pa->old_value) {
644 		RW_ENTER_WRITER(&hxgep->filter_lock);
645 		(void) hxge_rx_vmac_disable(hxgep);
646 		(void) hxge_tx_vmac_disable(hxgep);
647 
648 		/*
649 		 * Apply the new jumbo parameter here.
650 		 * The order of the following two calls is important.
651 		 */
652 		(void) hxge_tx_vmac_enable(hxgep);
653 		(void) hxge_rx_vmac_enable(hxgep);
654 		RW_EXIT(&hxgep->filter_lock);
655 	}
656 
657 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_mac"));
658 	return (status);
659 }
660 
661 /* ARGSUSED */
662 int
663 hxge_param_rx_intr_pkts(p_hxge_t hxgep, queue_t *q,
664 	mblk_t *mp, char *value, caddr_t cp)
665 {
666 	char		*end;
667 	uint32_t	cfg_value;
668 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
669 
670 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_rx_intr_pkts"));
671 
672 	if (strncasecmp(value, "0x", 2) == 0)
673 		value += 2;
674 
675 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
676 
677 	if ((cfg_value > HXGE_RDC_RCR_THRESHOLD_MAX) ||
678 	    (cfg_value < HXGE_RDC_RCR_THRESHOLD_MIN)) {
679 		return (EINVAL);
680 	}
681 
682 	if ((pa->value != cfg_value)) {
683 		pa->old_value = pa->value;
684 		pa->value = cfg_value;
685 		hxgep->intr_threshold = pa->value;
686 	}
687 
688 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_rx_intr_pkts"));
689 	return (0);
690 }
691 
692 /* ARGSUSED */
693 int
694 hxge_param_rx_intr_time(p_hxge_t hxgep, queue_t *q,
695 	mblk_t *mp, char *value, caddr_t cp)
696 {
697 	char		*end;
698 	uint32_t	cfg_value;
699 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
700 
701 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_rx_intr_time"));
702 
703 	if (strncasecmp(value, "0x", 2) == 0)
704 		value += 2;
705 
706 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
707 
708 	if ((cfg_value > HXGE_RDC_RCR_TIMEOUT_MAX) ||
709 	    (cfg_value < HXGE_RDC_RCR_TIMEOUT_MIN)) {
710 		return (EINVAL);
711 	}
712 
713 	if ((pa->value != cfg_value)) {
714 		pa->old_value = pa->value;
715 		pa->value = cfg_value;
716 		hxgep->intr_timeout = pa->value;
717 	}
718 
719 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_rx_intr_time"));
720 	return (0);
721 }
722 
723 /* ARGSUSED */
724 static int
725 hxge_param_set_vlan_ids(p_hxge_t hxgep, queue_t *q, mblk_t *mp, char *value,
726     caddr_t cp)
727 {
728 	char			*end;
729 	uint32_t		status = 0, cfg_value;
730 	p_hxge_param_t		pa = (p_hxge_param_t)cp;
731 	uint32_t		cfg_it = B_FALSE;
732 	uint32_t		*val_ptr, *old_val_ptr;
733 	hxge_param_map_t	*vmap, *old_map;
734 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
735 	uint64_t		cfgd_vlans;
736 	int			i, inc = 0, cfg_position;
737 	hxge_mv_cfg_t		*vlan_tbl;
738 	hpi_handle_t		handle;
739 
740 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_vlan_ids "));
741 
742 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
743 	vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
744 	handle = hxgep->hpi_reg_handle;
745 
746 	if (strncasecmp(value, "0x", 2) == 0)
747 		value += 2;
748 
749 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
750 
751 	/* now do decoding */
752 	cfgd_vlans = ((pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
753 	    HXGE_PARAM_ARRAY_CNT_SHIFT);
754 
755 	if (cfgd_vlans >= HXGE_PARAM_ARRAY_INIT_SIZE) {
756 		/*
757 		 * for now, we process only upto HXGE_PARAM_ARRAY_INIT_SIZE
758 		 * parameters In the future, we may want to expand
759 		 * the storage array and continue
760 		 */
761 		return (EINVAL);
762 	}
763 
764 	vmap = (hxge_param_map_t *)&cfg_value;
765 	if ((vmap->param_id == 0) || (vmap->param_id > VLAN_ID_MAX)) {
766 		return (EINVAL);
767 	}
768 
769 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " hxge_param_set_vlan_ids id %d",
770 	    vmap->param_id));
771 #if defined(__i386)
772 	val_ptr = (uint32_t *)(uint32_t)pa->value;
773 	old_val_ptr = (uint32_t *)(uint32_t)pa->old_value;
774 #else
775 	val_ptr = (uint32_t *)pa->value;
776 	old_val_ptr = (uint32_t *)pa->old_value;
777 #endif
778 
779 	/* Search to see if this vlan id is already configured */
780 	for (i = 0; i < cfgd_vlans; i++) {
781 		old_map = (hxge_param_map_t *)&val_ptr[i];
782 		if ((old_map->param_id == 0) ||
783 		    (vmap->param_id == old_map->param_id) ||
784 		    (vlan_tbl[vmap->param_id].flag)) {
785 			cfg_position = i;
786 			break;
787 		}
788 	}
789 
790 	if (cfgd_vlans == 0) {
791 		cfg_position = 0;
792 		inc++;
793 	}
794 
795 	if (i == cfgd_vlans) {
796 		cfg_position = i;
797 		inc++;
798 	}
799 
800 	HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
801 	    " set_vlan_ids mapping i %d cfgd_vlans %llx position %d ",
802 	    i, cfgd_vlans, cfg_position));
803 
804 	if (val_ptr[cfg_position] != cfg_value) {
805 		old_val_ptr[cfg_position] = val_ptr[cfg_position];
806 		val_ptr[cfg_position] = cfg_value;
807 		vlan_tbl[vmap->param_id].flag = 1;
808 		cfg_it = B_TRUE;
809 		if (inc) {
810 			cfgd_vlans++;
811 			pa->type &= ~HXGE_PARAM_ARRAY_CNT_MASK;
812 			pa->type |= (cfgd_vlans << HXGE_PARAM_ARRAY_CNT_SHIFT);
813 
814 		}
815 
816 		HXGE_DEBUG_MSG((hxgep, NDD2_CTL,
817 		    " after: param_set_vlan_ids cfg_vlans %llx position %d \n",
818 		    cfgd_vlans, cfg_position));
819 	}
820 
821 	if (cfg_it == B_TRUE) {
822 		status = hpi_pfc_cfg_vlan_table_entry_set(handle,
823 		    (vlan_id_t)vmap->param_id);
824 		if (status != HPI_SUCCESS)
825 			return (EINVAL);
826 	}
827 
828 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_vlan_ids"));
829 
830 	return (0);
831 }
832 
833 
834 /* ARGSUSED */
835 static int
836 hxge_param_get_vlan_ids(p_hxge_t hxgep, queue_t *q, mblk_t *mp, caddr_t cp)
837 {
838 	uint_t			print_len, buf_len;
839 	p_mblk_t		np;
840 	int			i;
841 	uint32_t		*val_ptr;
842 	hxge_param_map_t	*vmap;
843 	p_hxge_param_t		pa = (p_hxge_param_t)cp;
844 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
845 	uint64_t		cfgd_vlans = 0;
846 	int buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_SIZE * 32;
847 
848 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_vlan_ids "));
849 	(void) mi_mpprintf(mp, "VLAN Information\n");
850 
851 	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
852 		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
853 		return (0);
854 	}
855 
856 	buf_len = buff_alloc_size;
857 	mp->b_cont = np;
858 	cfgd_vlans = (pa->type & HXGE_PARAM_ARRAY_CNT_MASK) >>
859 	    HXGE_PARAM_ARRAY_CNT_SHIFT;
860 
861 	i = (int)cfgd_vlans;
862 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
863 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
864 	    "Configured VLANs %d\n VLAN ID\n", i);
865 	((mblk_t *)np)->b_wptr += print_len;
866 	buf_len -= print_len;
867 
868 #if defined(__i386)
869 	val_ptr = (uint32_t *)(uint32_t)pa->value;
870 #else
871 	val_ptr = (uint32_t *)pa->value;
872 #endif
873 
874 	for (i = 0; i < cfgd_vlans; i++) {
875 		vmap = (hxge_param_map_t *)&val_ptr[i];
876 		if (p_class_cfgp->vlan_tbl[vmap->param_id].flag) {
877 			print_len = snprintf((char *)((mblk_t *)np)->b_wptr,
878 			    buf_len, "  %d\n", vmap->param_id);
879 			((mblk_t *)np)->b_wptr += print_len;
880 			buf_len -= print_len;
881 		}
882 	}
883 
884 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_vlan_ids"));
885 
886 	return (0);
887 }
888 
889 /* ARGSUSED */
890 static int
891 hxge_param_tcam_enable(p_hxge_t hxgep, queue_t *q,
892 	mblk_t *mp, char *value, caddr_t cp)
893 {
894 	uint32_t	status = 0, cfg_value;
895 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
896 	uint32_t	cfg_it = B_FALSE;
897 	char		*end;
898 
899 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_tcam_enable"));
900 
901 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_BINARY);
902 	if (pa->value != cfg_value) {
903 		pa->old_value = pa->value;
904 		pa->value = cfg_value;
905 		cfg_it = B_TRUE;
906 	}
907 	if (cfg_it == B_TRUE) {
908 		if (pa->value)
909 			status = hxge_pfc_config_tcam_enable(hxgep);
910 		else
911 			status = hxge_pfc_config_tcam_disable(hxgep);
912 		if (status != HXGE_OK)
913 			return (EINVAL);
914 	}
915 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " <== hxge_param_tcam_enable"));
916 	return (0);
917 }
918 
919 /* ARGSUSED */
920 static int
921 hxge_param_set_ether_usr(p_hxge_t hxgep, queue_t *q,
922 	mblk_t *mp, char *value, caddr_t cp)
923 {
924 	char		*end;
925 	uint32_t	status = 0, cfg_value;
926 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
927 
928 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_ether_usr"));
929 
930 	if (strncasecmp(value, "0x", 2) == 0)
931 		value += 2;
932 
933 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
934 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
935 		return (EINVAL);
936 	}
937 	if (pa->value != cfg_value) {
938 		pa->old_value = pa->value;
939 		pa->value = cfg_value;
940 	}
941 
942 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_ether_usr"));
943 	return (status);
944 }
945 
946 static int
947 hxge_class_name_2value(p_hxge_t hxgep, char *name)
948 {
949 	int		i;
950 	int		class_instance = param_class_opt_ipv4_tcp;
951 	p_hxge_param_t	param_arr;
952 
953 	param_arr = hxgep->param_arr;
954 	for (i = TCAM_CLASS_TCP_IPV4; i <= TCAM_CLASS_SCTP_IPV6; i++) {
955 		if (strcmp(param_arr[class_instance].name, name) == 0)
956 			return (i);
957 		class_instance++;
958 	}
959 	return (-1);
960 }
961 
962 /* ARGSUSED */
963 int
964 hxge_param_set_ip_opt(p_hxge_t hxgep, queue_t *q,
965 	mblk_t *mp, char *value, caddr_t cp)
966 {
967 	char		*end;
968 	uint32_t	status, cfg_value;
969 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
970 	tcam_class_t	class;
971 	uint32_t	cfg_it = B_FALSE;
972 
973 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_ip_opt"));
974 
975 	if (strncasecmp(value, "0x", 2) == 0)
976 		value += 2;
977 
978 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
979 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
980 		return (EINVAL);
981 	}
982 	if (pa->value != cfg_value) {
983 		pa->old_value = pa->value;
984 		pa->value = cfg_value;
985 		cfg_it = B_TRUE;
986 	}
987 	if (cfg_it == B_TRUE) {
988 		/* do the actual hw setup  */
989 		class = hxge_class_name_2value(hxgep, pa->name);
990 		if (class == -1)
991 			return (EINVAL);
992 
993 		status = hxge_pfc_ip_class_config(hxgep, class, pa->value);
994 		if (status != HXGE_OK)
995 			return (EINVAL);
996 	}
997 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_ip_opt"));
998 	return (0);
999 }
1000 
1001 /* ARGSUSED */
1002 int
1003 hxge_param_get_ip_opt(p_hxge_t hxgep, queue_t *q, mblk_t *mp, caddr_t cp)
1004 {
1005 	uint32_t	status, cfg_value;
1006 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1007 	tcam_class_t	class;
1008 
1009 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_ip_opt"));
1010 
1011 	/* do the actual hw setup  */
1012 	class = hxge_class_name_2value(hxgep, pa->name);
1013 	if (class == -1)
1014 		return (EINVAL);
1015 	cfg_value = 0;
1016 	status = hxge_pfc_ip_class_config_get(hxgep, class, &cfg_value);
1017 	if (status != HXGE_OK)
1018 		return (EINVAL);
1019 	HXGE_DEBUG_MSG((hxgep, NDD_CTL,
1020 	    "hxge_param_get_ip_opt_get %x ", cfg_value));
1021 	pa->value = cfg_value;
1022 
1023 	if (mp != NULL)
1024 		(void) mi_mpprintf(mp, "%x", cfg_value);
1025 
1026 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_ip_opt status "));
1027 	return (0);
1028 }
1029 
1030 /* ARGSUSED */
1031 static int
1032 hxge_param_pfc_hash_init(p_hxge_t hxgep, queue_t *q, mblk_t *mp,
1033 	char *value, caddr_t cp)
1034 {
1035 	char		*end;
1036 	uint32_t	status, cfg_value;
1037 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1038 	uint32_t	cfg_it = B_FALSE;
1039 
1040 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_pfc_hash_init"));
1041 
1042 	if (strncasecmp(value, "0x", 2) == 0)
1043 		value += 2;
1044 
1045 	cfg_value = (uint32_t)mi_strtol(value, &end, BASE_HEX);
1046 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
1047 		return (EINVAL);
1048 	}
1049 
1050 	HXGE_DEBUG_MSG((hxgep, NDD_CTL,
1051 	    " hxge_param_pfc_hash_init value %x", cfg_value));
1052 	if (pa->value != cfg_value) {
1053 		pa->old_value = pa->value;
1054 		pa->value = cfg_value;
1055 		cfg_it = B_TRUE;
1056 	}
1057 
1058 	if (cfg_it == B_TRUE) {
1059 		status = hxge_pfc_set_hash(hxgep, (uint32_t)pa->value);
1060 		if (status != HXGE_OK)
1061 			return (EINVAL);
1062 	}
1063 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, " <== hxge_param_pfc_hash_init"));
1064 	return (0);
1065 }
1066 
1067 /* ARGSUSED */
1068 static int
1069 hxge_param_set_hxge_debug_flag(p_hxge_t hxgep, queue_t *q,
1070 	mblk_t *mp, char *value, caddr_t cp)
1071 {
1072 	char		*end;
1073 	uint32_t	status = 0;
1074 	uint64_t	cfg_value = 0;
1075 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1076 	uint32_t	cfg_it = B_FALSE;
1077 
1078 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_hxge_debug_flag"));
1079 
1080 	if (strncasecmp(value, "0x", 2) == 0)
1081 		value += 2;
1082 
1083 	cfg_value = mi_strtol(value, &end, BASE_HEX);
1084 
1085 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
1086 		HXGE_DEBUG_MSG((hxgep, NDD_CTL,
1087 		    " hxge_param_set_hxge_debug_flag"
1088 		    " outof range %llx", cfg_value));
1089 		return (EINVAL);
1090 	}
1091 	if (pa->value != cfg_value) {
1092 		pa->old_value = pa->value;
1093 		pa->value = cfg_value;
1094 		cfg_it = B_TRUE;
1095 	}
1096 	if (cfg_it == B_TRUE)
1097 		hxgep->hxge_debug_level = pa->value;
1098 
1099 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_hxge_debug_flag"));
1100 	return (status);
1101 }
1102 
1103 /* ARGSUSED */
1104 static int
1105 hxge_param_get_debug_flag(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
1106 {
1107 	int		status = 0;
1108 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1109 
1110 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_get_debug_flag"));
1111 
1112 	if (pa->value > 0xffffffff)
1113 		(void) mi_mpprintf(mp, "%x%x", (int)(pa->value >> 32),
1114 		    (int)(pa->value & 0xffffffff));
1115 	else
1116 		(void) mi_mpprintf(mp, "%x", (int)pa->value);
1117 
1118 
1119 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_get_debug_flag"));
1120 	return (status);
1121 }
1122 
1123 /* ARGSUSED */
1124 static int
1125 hxge_param_set_hpi_debug_flag(p_hxge_t hxgep, queue_t *q,
1126 	mblk_t *mp, char *value, caddr_t cp)
1127 {
1128 	char		*end;
1129 	uint32_t	status = 0;
1130 	uint64_t	cfg_value = 0;
1131 	p_hxge_param_t	pa = (p_hxge_param_t)cp;
1132 	uint32_t	cfg_it = B_FALSE;
1133 
1134 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_param_set_hpi_debug_flag"));
1135 
1136 	if (strncasecmp(value, "0x", 2) == 0)
1137 		value += 2;
1138 
1139 	cfg_value = mi_strtol(value, &end, BASE_HEX);
1140 
1141 	if (PARAM_OUTOF_RANGE(value, end, cfg_value, pa)) {
1142 		HXGE_DEBUG_MSG((hxgep, NDD_CTL, " hxge_param_set_hpi_debug_flag"
1143 		    " outof range %llx", cfg_value));
1144 		return (EINVAL);
1145 	}
1146 	if (pa->value != cfg_value) {
1147 		pa->old_value = pa->value;
1148 		pa->value = cfg_value;
1149 		cfg_it = B_TRUE;
1150 	}
1151 	if (cfg_it == B_TRUE) {
1152 		hpi_debug_level = pa->value;
1153 	}
1154 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_param_set_debug_flag"));
1155 	return (status);
1156 }
1157 
1158 typedef struct block_info {
1159 	char *name;
1160 	uint32_t offset;
1161 } block_info_t;
1162 
1163 block_info_t reg_block[] = {
1164 	{"PIO", PIO_BASE_ADDR},
1165 	{"PIO_LDSV", PIO_LDSV_BASE_ADDR},
1166 	{"PIO_LDMASK", PIO_LDMASK_BASE_ADDR},
1167 	{"PFC", PFC_BASE_ADDR},
1168 	{"RDC", RDC_BASE_ADDR},
1169 	{"TDC", TDC_BASE_ADDR},
1170 	{"VMAC", VMAC_BASE_ADDR},
1171 	{"END", ALL_FF_32},
1172 };
1173 
1174 /* ARGSUSED */
1175 static int
1176 hxge_param_dump_ptrs(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t cp)
1177 {
1178 	uint_t			print_len, buf_len;
1179 	p_mblk_t		np;
1180 	int			rdc, tdc, block;
1181 	uint64_t		base;
1182 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
1183 	p_hxge_hw_pt_cfg_t	p_cfgp;
1184 	int			buff_alloc_size = HXGE_NDD_INFODUMP_BUFF_8K;
1185 	p_tx_ring_t		*tx_rings;
1186 	p_rx_rcr_rings_t	rx_rcr_rings;
1187 	p_rx_rcr_ring_t		*rcr_rings;
1188 	p_rx_rbr_rings_t	rx_rbr_rings;
1189 	p_rx_rbr_ring_t		*rbr_rings;
1190 
1191 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_param_dump_ptrs"));
1192 
1193 	(void) mi_mpprintf(mp, "ptr information\n");
1194 
1195 	if ((np = allocb(buff_alloc_size, BPRI_HI)) == NULL) {
1196 		/* The following may work even if we cannot get a large buf. */
1197 		(void) mi_mpprintf(mp, "%s\n", "out of buffer");
1198 		return (0);
1199 	}
1200 	buf_len = buff_alloc_size;
1201 
1202 	mp->b_cont = np;
1203 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1204 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1205 
1206 	rx_rcr_rings = hxgep->rx_rcr_rings;
1207 	rcr_rings = rx_rcr_rings->rcr_rings;
1208 	rx_rbr_rings = hxgep->rx_rbr_rings;
1209 	rbr_rings = rx_rbr_rings->rbr_rings;
1210 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1211 	    "hxgep (hxge_t) $%p\n dev_regs (dev_regs_t) $%p\n",
1212 	    (void *)hxgep, (void *)hxgep->dev_regs);
1213 
1214 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1215 	/* do register pointers */
1216 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1217 	    "reg base (hpi_reg_ptr_t) $%p\t pci reg (hpi_reg_ptr_t) $%p\n",
1218 	    (void *)hxgep->dev_regs->hxge_regp,
1219 	    (void *)hxgep->dev_regs->hxge_pciregp);
1220 
1221 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1222 
1223 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1224 	    "\nBlock \t Offset \n");
1225 
1226 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1227 	block = 0;
1228 #if defined(__i386)
1229 	base = (uint64_t)(uint32_t)hxgep->dev_regs->hxge_regp;
1230 #else
1231 	base = (uint64_t)hxgep->dev_regs->hxge_regp;
1232 #endif
1233 	while (reg_block[block].offset != ALL_FF_32) {
1234 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1235 		    "%9s\t 0x%llx\n", reg_block[block].name,
1236 		    (unsigned long long) (reg_block[block].offset + base));
1237 		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1238 		block++;
1239 	}
1240 
1241 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1242 	    "\nRDC\t rcrp (rx_rcr_ring_t)\t rbrp (rx_rbr_ring_t)\n");
1243 
1244 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1245 
1246 	for (rdc = 0; rdc < p_cfgp->max_rdcs; rdc++) {
1247 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1248 		    " %d\t  $%p\t\t   $%p\n",
1249 		    rdc, (void *)rcr_rings[rdc], (void *)rbr_rings[rdc]);
1250 		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1251 	}
1252 
1253 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1254 	    "\nTDC\t tdcp (tx_ring_t)\n");
1255 
1256 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1257 	tx_rings = hxgep->tx_rings->rings;
1258 	for (tdc = 0; tdc < p_cfgp->max_tdcs; tdc++) {
1259 		print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len,
1260 		    " %d\t  $%p\n", tdc, (void *)tx_rings[tdc]);
1261 		ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1262 	}
1263 
1264 	print_len = snprintf((char *)((mblk_t *)np)->b_wptr, buf_len, "\n\n");
1265 
1266 	ADVANCE_PRINT_BUFFER(np, print_len, buf_len);
1267 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_param_dump_ptrs"));
1268 	return (0);
1269 }
1270 
1271 /*
1272  * Load 'name' into the named dispatch table pointed to by 'ndp'.
1273  * 'ndp' should be the address of a char pointer cell.  If the table
1274  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
1275  * is stuffed.  If there is not enough space in the table for a new
1276  * entry, more space is allocated.
1277  */
1278 boolean_t
1279 hxge_nd_load(caddr_t *pparam, char *name,
1280 	pfi_t get_pfi, pfi_t set_pfi, caddr_t data)
1281 {
1282 	ND	*nd;
1283 	NDE	*nde;
1284 
1285 	HXGE_DEBUG_MSG((NULL, NDD2_CTL, " ==> hxge_nd_load: %s", name));
1286 	if (!pparam)
1287 		return (B_FALSE);
1288 	if ((nd = (ND *) * pparam) == NULL) {
1289 		if ((nd = (ND *) KMEM_ZALLOC(sizeof (ND), KM_NOSLEEP)) == NULL)
1290 			return (B_FALSE);
1291 		*pparam = (caddr_t)nd;
1292 	}
1293 	if (nd->nd_tbl) {
1294 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
1295 			if (strcmp(name, nde->nde_name) == 0)
1296 				goto fill_it;
1297 		}
1298 	}
1299 	if (nd->nd_free_count <= 1) {
1300 		if ((nde = (NDE *) KMEM_ZALLOC(nd->nd_size +
1301 		    NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
1302 			return (B_FALSE);
1303 		nd->nd_free_count += NDE_ALLOC_COUNT;
1304 		if (nd->nd_tbl) {
1305 			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
1306 			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
1307 		} else {
1308 			nd->nd_free_count--;
1309 			nde->nde_name = "?";
1310 			nde->nde_get_pfi = hxge_nd_get_names;
1311 			nde->nde_set_pfi = hxge_set_default;
1312 		}
1313 		nde->nde_data = (caddr_t)nd;
1314 		nd->nd_tbl = nde;
1315 		nd->nd_size += NDE_ALLOC_SIZE;
1316 	}
1317 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
1318 		noop;
1319 	nd->nd_free_count--;
1320 fill_it:
1321 	nde->nde_name = name;
1322 	nde->nde_get_pfi = get_pfi;
1323 	nde->nde_set_pfi = set_pfi;
1324 	nde->nde_data = data;
1325 	HXGE_DEBUG_MSG((NULL, NDD2_CTL, " <== hxge_nd_load"));
1326 
1327 	return (B_TRUE);
1328 }
1329 
1330 /*
1331  * Free the table pointed to by 'pparam'
1332  */
1333 void
1334 hxge_nd_free(caddr_t *pparam)
1335 {
1336 	ND *nd;
1337 
1338 	if ((nd = (ND *)*pparam) != NULL) {
1339 		if (nd->nd_tbl)
1340 			KMEM_FREE((char *)nd->nd_tbl, nd->nd_size);
1341 		KMEM_FREE((char *)nd, sizeof (ND));
1342 		*pparam = nil(caddr_t);
1343 	}
1344 }
1345 
1346 int
1347 hxge_nd_getset(p_hxge_t hxgep, queue_t *q, caddr_t param, p_mblk_t mp)
1348 {
1349 	int		err;
1350 	IOCP		iocp;
1351 	p_mblk_t	mp1, mp2;
1352 	ND		*nd;
1353 	NDE		*nde;
1354 	char		*valp;
1355 
1356 	size_t		avail;
1357 
1358 	if (!param) {
1359 		return (B_FALSE);
1360 	}
1361 	nd = (ND *) param;
1362 	iocp = (IOCP) mp->b_rptr;
1363 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
1364 		mp->b_datap->db_type = M_IOCACK;
1365 		iocp->ioc_count = 0;
1366 		iocp->ioc_error = EINVAL;
1367 		return (B_FALSE);
1368 	}
1369 	/*
1370 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
1371 	 * However, existing code sends in some big buffers.
1372 	 */
1373 	avail = iocp->ioc_count;
1374 	if (mp1->b_cont) {
1375 		freemsg(mp1->b_cont);
1376 		mp1->b_cont = NULL;
1377 	}
1378 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
1379 	for (valp = (char *)mp1->b_rptr; *valp != '\0'; valp++) {
1380 		if (*valp == '-')
1381 			*valp = '_';
1382 	}
1383 
1384 	valp = (char *)mp1->b_rptr;
1385 
1386 	for (nde = nd->nd_tbl; /* */; nde++) {
1387 		if (!nde->nde_name)
1388 			return (B_FALSE);
1389 		if (strcmp(nde->nde_name, valp) == 0)
1390 			break;
1391 	}
1392 	err = EINVAL;
1393 	while (*valp++)
1394 		noop;
1395 	if (!*valp || valp >= (char *)mp1->b_wptr)
1396 		valp = nilp(char);
1397 	switch (iocp->ioc_cmd) {
1398 	case ND_GET:
1399 		if (*nde->nde_get_pfi == NULL)
1400 			return (B_FALSE);
1401 
1402 		/*
1403 		 * (temporary) hack: "*valp" is size of user buffer for
1404 		 * copyout. If result of action routine is too big, free excess
1405 		 * and return ioc_rval as buffer size needed. Return as many
1406 		 * mblocks as will fit, free the rest.  For backward
1407 		 * compatibility, assume size of original ioctl buffer if
1408 		 * "*valp" bad or not given.
1409 		 */
1410 		if (valp)
1411 			avail = mi_strtol(valp, (char **)0, 10);
1412 		/*
1413 		 * We overwrite the name/value with the reply data
1414 		 */
1415 		mp2 = mp1;
1416 		while (mp2) {
1417 			mp2->b_wptr = mp2->b_rptr;
1418 			mp2 = mp2->b_cont;
1419 		}
1420 
1421 		err = (*nde->nde_get_pfi) (hxgep, q, mp1, nde->nde_data);
1422 
1423 		if (!err) {
1424 			size_t size_out = 0;
1425 			ssize_t excess;
1426 
1427 			iocp->ioc_rval = 0;
1428 
1429 			/* Tack on the null */
1430 			err = hxge_mk_mblk_tail_space(mp1, &mp2, 1);
1431 			if (!err) {
1432 				*mp2->b_wptr++ = '\0';
1433 				size_out = msgdsize(mp1);
1434 				excess = size_out - avail;
1435 				if (excess > 0) {
1436 					iocp->ioc_rval = (int)size_out;
1437 					size_out -= excess;
1438 					(void) adjmsg(mp1, -(excess + 1));
1439 					err = hxge_mk_mblk_tail_space(
1440 					    mp1, &mp2, 1);
1441 					if (!err)
1442 						*mp2->b_wptr++ = '\0';
1443 					else
1444 						size_out = 0;
1445 				}
1446 			} else
1447 				size_out = 0;
1448 			iocp->ioc_count = size_out;
1449 		}
1450 		break;
1451 
1452 	case ND_SET:
1453 		if (valp) {
1454 			if (nde->nde_set_pfi) {
1455 				err = (*nde->nde_set_pfi) (hxgep, q, mp1, valp,
1456 				    nde->nde_data);
1457 				iocp->ioc_count = 0;
1458 				freemsg(mp1);
1459 				mp->b_cont = NULL;
1460 			}
1461 		}
1462 		break;
1463 
1464 	default:
1465 		break;
1466 	}
1467 	iocp->ioc_error = err;
1468 	mp->b_datap->db_type = M_IOCACK;
1469 	return (B_TRUE);
1470 }
1471 
1472 /* ARGSUSED */
1473 int
1474 hxge_nd_get_names(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t param)
1475 {
1476 	ND		*nd;
1477 	NDE		*nde;
1478 	char		*rwtag;
1479 	boolean_t	get_ok, set_ok;
1480 	size_t		param_len;
1481 	int		status = 0;
1482 
1483 	nd = (ND *) param;
1484 	if (!nd)
1485 		return (ENOENT);
1486 
1487 	for (nde = nd->nd_tbl; nde->nde_name; nde++) {
1488 		get_ok = (nde->nde_get_pfi != hxge_get_default) &&
1489 		    (nde->nde_get_pfi != NULL);
1490 		set_ok = (nde->nde_set_pfi != hxge_set_default) &&
1491 		    (nde->nde_set_pfi != NULL);
1492 		if (get_ok) {
1493 			if (set_ok)
1494 				rwtag = "read and write";
1495 			else
1496 				rwtag = "read only";
1497 		} else if (set_ok)
1498 			rwtag = "write only";
1499 		else {
1500 			continue;
1501 		}
1502 		param_len = strlen(rwtag);
1503 		param_len += strlen(nde->nde_name);
1504 		param_len += 4;
1505 
1506 		(void) mi_mpprintf(mp, "%s (%s)", nde->nde_name, rwtag);
1507 	}
1508 	return (status);
1509 }
1510 
1511 /* ARGSUSED */
1512 int
1513 hxge_get_default(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, caddr_t data)
1514 {
1515 	return (EACCES);
1516 }
1517 
1518 /* ARGSUSED */
1519 int
1520 hxge_set_default(p_hxge_t hxgep, queue_t *q, p_mblk_t mp, char *value,
1521 	caddr_t data)
1522 {
1523 	return (EACCES);
1524 }
1525 
1526 void
1527 hxge_param_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1528 {
1529 	int cmd;
1530 	int status = B_FALSE;
1531 
1532 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_param_ioctl"));
1533 	cmd = iocp->ioc_cmd;
1534 	switch (cmd) {
1535 	default:
1536 		HXGE_DEBUG_MSG((hxgep, IOC_CTL,
1537 		    "hxge_param_ioctl: bad cmd 0x%0x", cmd));
1538 		break;
1539 
1540 	case ND_GET:
1541 	case ND_SET:
1542 		HXGE_DEBUG_MSG((hxgep, IOC_CTL,
1543 		    "hxge_param_ioctl: cmd 0x%0x", cmd));
1544 		if (!hxge_nd_getset(hxgep, wq, hxgep->param_list, mp)) {
1545 			HXGE_DEBUG_MSG((hxgep, IOC_CTL,
1546 			    "false ret from hxge_nd_getset"));
1547 			break;
1548 		}
1549 		status = B_TRUE;
1550 		break;
1551 	}
1552 
1553 	if (status) {
1554 		qreply(wq, mp);
1555 	} else {
1556 		miocnak(wq, mp, 0, EINVAL);
1557 	}
1558 	HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_param_ioctl"));
1559 }
1560