xref: /titanic_41/usr/src/uts/common/io/hxge/hxge_virtual.c (revision 48258c6b4e17f36ab09fba0bd6307d1fec9dcbce)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <hxge_impl.h>
29 #include <hxge_vmac.h>
30 #include <hxge_pfc.h>
31 #include <hpi_pfc.h>
32 
33 static hxge_status_t hxge_get_mac_addr_properties(p_hxge_t);
34 static void hxge_use_cfg_hydra_properties(p_hxge_t);
35 static void hxge_use_cfg_dma_config(p_hxge_t);
36 static void hxge_use_cfg_class_config(p_hxge_t);
37 static void hxge_set_hw_dma_config(p_hxge_t);
38 static void hxge_set_hw_class_config(p_hxge_t);
39 static void hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
40 	uint8_t endldg, int *ngrps);
41 static hxge_status_t hxge_mmac_init(p_hxge_t);
42 
43 extern uint16_t hxge_rcr_timeout;
44 extern uint16_t hxge_rcr_threshold;
45 
46 extern uint32_t hxge_rbr_size;
47 extern uint32_t hxge_rcr_size;
48 
49 extern uint_t hxge_rx_intr();
50 extern uint_t hxge_tx_intr();
51 extern uint_t hxge_vmac_intr();
52 extern uint_t hxge_syserr_intr();
53 extern uint_t hxge_pfc_intr();
54 
55 uint_t hxge_nmac_intr(caddr_t arg1, caddr_t arg2);
56 
57 /*
58  * Entry point to populate configuration parameters into the master hxge
59  * data structure and to update the NDD parameter list.
60  */
61 hxge_status_t
62 hxge_get_config_properties(p_hxge_t hxgep)
63 {
64 	hxge_status_t		status = HXGE_OK;
65 
66 	HXGE_DEBUG_MSG((hxgep, VPD_CTL, " ==> hxge_get_config_properties"));
67 
68 	if (hxgep->hxge_hw_p == NULL) {
69 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
70 		    " hxge_get_config_properties: common hardware not set"));
71 		return (HXGE_ERROR);
72 	}
73 
74 	hxgep->classifier.tcam_size = TCAM_HXGE_TCAM_MAX_ENTRY;
75 
76 	status = hxge_get_mac_addr_properties(hxgep);
77 	if (status != HXGE_OK) {
78 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
79 		    " hxge_get_config_properties: mac addr properties failed"));
80 		return (status);
81 	}
82 
83 	HXGE_DEBUG_MSG((hxgep, VPD_CTL,
84 	    " ==> hxge_get_config_properties: Hydra"));
85 
86 	hxge_use_cfg_hydra_properties(hxgep);
87 
88 	HXGE_DEBUG_MSG((hxgep, VPD_CTL, " <== hxge_get_config_properties"));
89 	return (HXGE_OK);
90 }
91 
92 
93 static void
94 hxge_set_hw_vlan_class_config(p_hxge_t hxgep)
95 {
96 	int			i;
97 	p_hxge_param_t		param_arr;
98 	uint_t			vlan_cnt;
99 	int			*vlan_cfg_val;
100 	hxge_param_map_t	*vmap;
101 	char			*prop;
102 	p_hxge_class_pt_cfg_t 	p_class_cfgp;
103 	uint32_t		good_cfg[32];
104 	int			good_count = 0;
105 	hxge_mv_cfg_t		*vlan_tbl;
106 
107 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_vlan_config"));
108 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
109 
110 	param_arr = hxgep->param_arr;
111 	prop = param_arr[param_vlan_ids].fcode_name;
112 
113 	/*
114 	 * uint32_t array, each array entry specifying a VLAN id
115 	 */
116 	for (i = 0; i <= VLAN_ID_MAX; i++) {
117 		p_class_cfgp->vlan_tbl[i].flag = 0;
118 	}
119 
120 	vlan_tbl = (hxge_mv_cfg_t *)&p_class_cfgp->vlan_tbl[0];
121 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
122 	    &vlan_cfg_val, &vlan_cnt) != DDI_PROP_SUCCESS) {
123 		return;
124 	}
125 
126 	for (i = 0; i < vlan_cnt; i++) {
127 		vmap = (hxge_param_map_t *)&vlan_cfg_val[i];
128 		if ((vmap->param_id) && (vmap->param_id <= VLAN_ID_MAX)) {
129 			HXGE_DEBUG_MSG((hxgep, CFG2_CTL,
130 			    " hxge_vlan_config vlan id %d", vmap->param_id));
131 
132 			good_cfg[good_count] = vlan_cfg_val[i];
133 			if (vlan_tbl[vmap->param_id].flag == 0)
134 				good_count++;
135 
136 			vlan_tbl[vmap->param_id].flag = 1;
137 		}
138 	}
139 
140 	ddi_prop_free(vlan_cfg_val);
141 	if (good_count != vlan_cnt) {
142 		(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
143 		    hxgep->dip, prop, (int *)good_cfg, good_count);
144 	}
145 
146 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_vlan_config"));
147 }
148 
149 
150 /*
151  * Read param_vlan_ids and param_implicit_vlan_id properties from either
152  * hxge.conf or OBP. Update the soft properties. Populate these
153  * properties into the hxge data structure.
154  */
155 static void
156 hxge_use_cfg_vlan_class_config(p_hxge_t hxgep)
157 {
158 	uint_t		vlan_cnt;
159 	int		*vlan_cfg_val;
160 	int		status;
161 	p_hxge_param_t	param_arr;
162 	char		*prop;
163 	uint32_t	implicit_vlan_id = 0;
164 	int		*int_prop_val;
165 	uint_t		prop_len;
166 	p_hxge_param_t	pa;
167 
168 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_vlan_config"));
169 	param_arr = hxgep->param_arr;
170 	prop = param_arr[param_vlan_ids].fcode_name;
171 
172 	status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
173 	    &vlan_cfg_val, &vlan_cnt);
174 	if (status == DDI_PROP_SUCCESS) {
175 		status = ddi_prop_update_int_array(DDI_DEV_T_NONE,
176 		    hxgep->dip, prop, vlan_cfg_val, vlan_cnt);
177 		ddi_prop_free(vlan_cfg_val);
178 	}
179 
180 	pa = &param_arr[param_implicit_vlan_id];
181 	prop = pa->fcode_name;
182 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
183 	    &int_prop_val, &prop_len) == DDI_PROP_SUCCESS) {
184 		implicit_vlan_id = (uint32_t)*int_prop_val;
185 		if ((implicit_vlan_id >= pa->minimum) ||
186 		    (implicit_vlan_id <= pa->maximum)) {
187 			status = ddi_prop_update_int(DDI_DEV_T_NONE, hxgep->dip,
188 			    prop, (int)implicit_vlan_id);
189 		}
190 		ddi_prop_free(int_prop_val);
191 	}
192 
193 	hxge_set_hw_vlan_class_config(hxgep);
194 
195 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_vlan_config"));
196 }
197 
198 /*
199  * Read in the configuration parameters from either hxge.conf or OBP and
200  * populate the master data structure hxge.
201  * Use these parameters to update the soft properties and the ndd array.
202  */
203 static void
204 hxge_use_cfg_hydra_properties(p_hxge_t hxgep)
205 {
206 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_hydra_properties"));
207 
208 	(void) hxge_use_cfg_dma_config(hxgep);
209 	(void) hxge_use_cfg_vlan_class_config(hxgep);
210 	(void) hxge_use_cfg_class_config(hxgep);
211 
212 	/*
213 	 * Read in the hardware (fcode) properties and use these properties
214 	 * to update the ndd array.
215 	 */
216 	(void) hxge_get_param_soft_properties(hxgep);
217 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_use_cfg_hydra_properties"));
218 }
219 
220 
221 /*
222  * Read param_accept_jumbo, param_rxdma_intr_time, and param_rxdma_intr_pkts
223  * from either hxge.conf or OBP.
224  * Update the soft properties.
225  * Populate these properties into the hxge data structure for latter use.
226  */
227 static void
228 hxge_use_cfg_dma_config(p_hxge_t hxgep)
229 {
230 	int			tx_ndmas, rx_ndmas;
231 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
232 	p_hxge_hw_pt_cfg_t	p_cfgp;
233 	dev_info_t		*dip;
234 	p_hxge_param_t		param_arr;
235 	char			*prop;
236 	int 			*prop_val;
237 	uint_t 			prop_len;
238 
239 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_use_cfg_dma_config"));
240 	param_arr = hxgep->param_arr;
241 
242 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
243 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
244 	dip = hxgep->dip;
245 
246 	tx_ndmas = 4;
247 	p_cfgp->start_tdc = 0;
248 	p_cfgp->max_tdcs =  hxgep->max_tdcs = tx_ndmas;
249 	hxgep->tdc_mask = (tx_ndmas - 1);
250 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
251 	    "p_cfgp 0x%llx max_tdcs %d hxgep->max_tdcs %d",
252 	    p_cfgp, p_cfgp->max_tdcs, hxgep->max_tdcs));
253 
254 	rx_ndmas = 4;
255 	p_cfgp->start_rdc = 0;
256 	p_cfgp->max_rdcs =  hxgep->max_rdcs = rx_ndmas;
257 
258 	p_cfgp->start_ldg = 0;
259 	p_cfgp->max_ldgs = HXGE_INT_MAX_LDG;
260 
261 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_default_dma_config: "
262 	    "p_cfgp 0x%llx max_rdcs %d hxgep->max_rdcs %d",
263 	    p_cfgp, p_cfgp->max_rdcs, hxgep->max_rdcs));
264 
265 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_use_cfg_dma_config: "
266 	    "p_cfgp 0x%016llx start_ldg %d hxgep->max_ldgs %d ",
267 	    p_cfgp, p_cfgp->start_ldg,  p_cfgp->max_ldgs));
268 
269 	/*
270 	 * add code for individual rdc properties
271 	 */
272 	prop = param_arr[param_accept_jumbo].fcode_name;
273 
274 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
275 	    &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
276 		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
277 			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
278 			    hxgep->dip, prop, prop_val, prop_len);
279 		}
280 		ddi_prop_free(prop_val);
281 	}
282 
283 	prop = param_arr[param_rxdma_intr_time].fcode_name;
284 
285 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
286 	    &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
287 		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
288 			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
289 			    hxgep->dip, prop, prop_val, prop_len);
290 		}
291 		ddi_prop_free(prop_val);
292 	}
293 
294 	prop = param_arr[param_rxdma_intr_pkts].fcode_name;
295 
296 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0, prop,
297 	    &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
298 		if ((prop_len > 0) && (prop_len <= p_cfgp->max_rdcs)) {
299 			(void) ddi_prop_update_int_array(DDI_DEV_T_NONE,
300 			    hxgep->dip, prop, prop_val, prop_len);
301 		}
302 		ddi_prop_free(prop_val);
303 	}
304 
305 	hxge_set_hw_dma_config(hxgep);
306 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "<== hxge_use_cfg_dma_config"));
307 }
308 
309 static void
310 hxge_use_cfg_class_config(p_hxge_t hxgep)
311 {
312 	hxge_set_hw_class_config(hxgep);
313 }
314 
315 static void
316 hxge_set_hw_dma_config(p_hxge_t hxgep)
317 {
318 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
319 	p_hxge_hw_pt_cfg_t	p_cfgp;
320 
321 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, "==> hxge_set_hw_dma_config"));
322 
323 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
324 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
325 
326 	/* Transmit DMA Channels */
327 	hxgep->ntdc = p_cfgp->max_tdcs;
328 
329 	/* Receive DMA Channels */
330 	hxgep->nrdc = p_cfgp->max_rdcs;
331 
332 	p_dma_cfgp->rbr_size = hxge_rbr_size;
333 	p_dma_cfgp->rcr_size = hxge_rcr_size;
334 
335 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_dma_config"));
336 }
337 
338 
339 boolean_t
340 hxge_check_rxdma_port_member(p_hxge_t hxgep, uint8_t rdc)
341 {
342 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
343 	p_hxge_hw_pt_cfg_t	p_cfgp;
344 	int			status = B_TRUE;
345 
346 	HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_rxdma_port_member"));
347 
348 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
349 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
350 
351 	/* Receive DMA Channels */
352 	if (rdc < p_cfgp->max_rdcs)
353 		status = B_TRUE;
354 	HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_rxdma_port_member"));
355 
356 	return (status);
357 }
358 
359 boolean_t
360 hxge_check_txdma_port_member(p_hxge_t hxgep, uint8_t tdc)
361 {
362 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
363 	p_hxge_hw_pt_cfg_t	p_cfgp;
364 	int			status = B_FALSE;
365 
366 	HXGE_DEBUG_MSG((hxgep, CFG2_CTL, "==> hxge_check_txdma_port_member"));
367 
368 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
369 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
370 
371 	/* Receive DMA Channels */
372 	if (tdc < p_cfgp->max_tdcs)
373 		status = B_TRUE;
374 	HXGE_DEBUG_MSG((hxgep, CFG2_CTL, " <== hxge_check_txdma_port_member"));
375 
376 	return (status);
377 }
378 
379 
380 /*
381  * Read the L2 classes, L3 classes, and initial hash from either hxge.conf
382  * or OBP. Populate these properties into the hxge data structure for latter
383  * use. Note that we are not updating these soft properties.
384  */
385 static void
386 hxge_set_hw_class_config(p_hxge_t hxgep)
387 {
388 	int			i, j;
389 	p_hxge_param_t		param_arr;
390 	int			*int_prop_val;
391 	uint32_t		cfg_value;
392 	char			*prop;
393 	p_hxge_class_pt_cfg_t	p_class_cfgp;
394 	int			start_prop, end_prop;
395 	uint_t			prop_cnt;
396 
397 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " ==> hxge_set_hw_class_config"));
398 
399 	p_class_cfgp = (p_hxge_class_pt_cfg_t)&hxgep->class_config;
400 
401 	param_arr = hxgep->param_arr;
402 
403 	/*
404 	 * L2 class configuration. User configurable ether types
405 	 */
406 	start_prop =  param_class_cfg_ether_usr1;
407 	end_prop = param_class_cfg_ether_usr2;
408 
409 	for (i = start_prop; i <= end_prop; i++) {
410 		prop = param_arr[i].fcode_name;
411 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
412 		    0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
413 			cfg_value =  (uint32_t)*int_prop_val;
414 			ddi_prop_free(int_prop_val);
415 		} else {
416 			cfg_value = (uint32_t)param_arr[i].value;
417 		}
418 
419 		j = (i - start_prop) + TCAM_CLASS_ETYPE_1;
420 		p_class_cfgp->class_cfg[j] = cfg_value;
421 	}
422 
423 	/*
424 	 * Use properties from either .conf or the NDD param array. Only bits
425 	 * 2 and 3 are significant
426 	 */
427 	start_prop =  param_class_opt_ipv4_tcp;
428 	end_prop = param_class_opt_ipv6_sctp;
429 
430 	for (i = start_prop; i <= end_prop; i++) {
431 		prop = param_arr[i].fcode_name;
432 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip,
433 		    0, prop, &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
434 			cfg_value =  (uint32_t)*int_prop_val;
435 			ddi_prop_free(int_prop_val);
436 		} else {
437 			cfg_value = (uint32_t)param_arr[i].value;
438 		}
439 
440 		j = (i - start_prop) + TCAM_CLASS_TCP_IPV4;
441 		p_class_cfgp->class_cfg[j] = cfg_value;
442 	}
443 
444 	prop = param_arr[param_hash_init_value].fcode_name;
445 
446 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hxgep->dip, 0, prop,
447 	    &int_prop_val, &prop_cnt) == DDI_PROP_SUCCESS) {
448 		cfg_value =  (uint32_t)*int_prop_val;
449 		ddi_prop_free(int_prop_val);
450 	} else {
451 		cfg_value = (uint32_t)param_arr[param_hash_init_value].value;
452 	}
453 
454 	p_class_cfgp->init_hash = (uint32_t)cfg_value;
455 
456 	HXGE_DEBUG_MSG((hxgep, CFG_CTL, " <== hxge_set_hw_class_config"));
457 }
458 
459 
460 /*
461  * Interrupts related interface functions.
462  */
463 hxge_status_t
464 hxge_ldgv_init(p_hxge_t hxgep, int *navail_p, int *nrequired_p)
465 {
466 	uint8_t			ldv, i, maxldvs, maxldgs, start, end, nldvs;
467 	int			ldg, endldg, ngrps;
468 	uint8_t			channel;
469 	p_hxge_dma_pt_cfg_t	p_dma_cfgp;
470 	p_hxge_hw_pt_cfg_t	p_cfgp;
471 	p_hxge_ldgv_t		ldgvp;
472 	p_hxge_ldg_t		ldgp, ptr;
473 	p_hxge_ldv_t		ldvp;
474 	hxge_status_t		status = HXGE_OK;
475 
476 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_init"));
477 	if (!*navail_p) {
478 		*nrequired_p = 0;
479 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
480 		    "<== hxge_ldgv_init:no avail"));
481 		return (HXGE_ERROR);
482 	}
483 	p_dma_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
484 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
485 
486 	/* each DMA channels */
487 	nldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
488 
489 	/* vmac */
490 	nldvs++;
491 
492 	/* pfc */
493 	nldvs++;
494 
495 	/* nmac for the link status register only */
496 	nldvs++;
497 
498 	/* system error interrupts. */
499 	nldvs++;
500 
501 	maxldvs = nldvs;
502 	maxldgs = p_cfgp->max_ldgs;
503 
504 	if (!maxldvs || !maxldgs) {
505 		/* No devices configured. */
506 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_ldgv_init: "
507 		    "no logical devices or groups configured."));
508 		return (HXGE_ERROR);
509 	}
510 	ldgvp = hxgep->ldgvp;
511 	if (ldgvp == NULL) {
512 		ldgvp = KMEM_ZALLOC(sizeof (hxge_ldgv_t), KM_SLEEP);
513 		hxgep->ldgvp = ldgvp;
514 		ldgvp->maxldgs = maxldgs;
515 		ldgvp->maxldvs = maxldvs;
516 		ldgp = ldgvp->ldgp =
517 		    KMEM_ZALLOC(sizeof (hxge_ldg_t) * maxldgs, KM_SLEEP);
518 		ldvp = ldgvp->ldvp =
519 		    KMEM_ZALLOC(sizeof (hxge_ldv_t) * maxldvs, KM_SLEEP);
520 	}
521 
522 	ldgvp->ndma_ldvs = p_cfgp->max_tdcs + p_cfgp->max_rdcs;
523 	ldgvp->tmres = HXGE_TIMER_RESO;
524 
525 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
526 	    "==> hxge_ldgv_init: maxldvs %d maxldgs %d nldvs %d",
527 	    maxldvs, maxldgs, nldvs));
528 
529 	ldg = p_cfgp->start_ldg;
530 	ptr = ldgp;
531 	for (i = 0; i < maxldgs; i++) {
532 		ptr->arm = B_TRUE;
533 		ptr->vldg_index = i;
534 		ptr->ldg_timer = HXGE_TIMER_LDG;
535 		ptr->ldg = ldg++;
536 		ptr->sys_intr_handler = hxge_intr;
537 		ptr->nldvs = 0;
538 		ptr->hxgep = hxgep;
539 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
540 		    "==> hxge_ldgv_init: maxldvs %d maxldgs %d ldg %d",
541 		    maxldvs, maxldgs, ptr->ldg));
542 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
543 		    "==> hxge_ldv_init: timer %d", ptr->ldg_timer));
544 		ptr++;
545 	}
546 
547 	ldg = p_cfgp->start_ldg;
548 	if (maxldgs > *navail_p) {
549 		ngrps = *navail_p;
550 	} else {
551 		ngrps = maxldgs;
552 	}
553 	endldg = ldg + ngrps;
554 
555 	/*
556 	 * Receive DMA channels.
557 	 */
558 	channel = p_cfgp->start_rdc;
559 	start = p_cfgp->start_rdc + HXGE_RDMA_LD_START;
560 	end = start + p_cfgp->max_rdcs;
561 	nldvs = 0;
562 	ldgvp->nldvs = 0;
563 	ldgp->ldvp = NULL;
564 	*nrequired_p = 0;
565 	ptr = ldgp;
566 
567 	/*
568 	 * Start with RDC to configure logical devices for each group.
569 	 */
570 	for (i = 0, ldv = start; ldv < end; i++, ldv++) {
571 		ldvp->is_rxdma = B_TRUE;
572 		ldvp->ldv = ldv;
573 
574 		/*
575 		 * If non-seq needs to change the following code
576 		 */
577 		ldvp->channel = channel++;
578 		ldvp->vdma_index = i;
579 		ldvp->ldv_intr_handler = hxge_rx_intr;
580 		ldvp->ldv_ldf_masks = 0;
581 		ldvp->use_timer = B_FALSE;
582 		ldvp->hxgep = hxgep;
583 		hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
584 		nldvs++;
585 	}
586 
587 	/*
588 	 * Transmit DMA channels.
589 	 */
590 	channel = p_cfgp->start_tdc;
591 	start = p_cfgp->start_tdc + HXGE_TDMA_LD_START;
592 	end = start + p_cfgp->max_tdcs;
593 	for (i = 0, ldv = start; ldv < end; i++, ldv++) {
594 		ldvp->is_txdma = B_TRUE;
595 		ldvp->ldv = ldv;
596 		ldvp->channel = channel++;
597 		ldvp->vdma_index = i;
598 		ldvp->ldv_intr_handler = hxge_tx_intr;
599 		ldvp->ldv_ldf_masks = 0;
600 		ldvp->use_timer = B_FALSE;
601 		ldvp->hxgep = hxgep;
602 		hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
603 		nldvs++;
604 	}
605 
606 	/*
607 	 * VMAC
608 	 */
609 	ldvp->is_vmac = B_TRUE;
610 	ldvp->ldv_intr_handler = hxge_vmac_intr;
611 	ldvp->ldv_ldf_masks = 0;
612 	ldv = HXGE_VMAC_LD;
613 	ldvp->ldv = ldv;
614 	ldvp->use_timer = B_FALSE;
615 	ldvp->hxgep = hxgep;
616 	hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
617 	nldvs++;
618 
619 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
620 	    "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
621 	    nldvs, *navail_p, *nrequired_p));
622 
623 	/*
624 	 * PFC
625 	 */
626 	ldvp->is_pfc = B_TRUE;
627 	ldvp->ldv_intr_handler = hxge_pfc_intr;
628 	ldvp->ldv_ldf_masks = 0;
629 	ldv = HXGE_PFC_LD;
630 	ldvp->ldv = ldv;
631 	ldvp->use_timer = B_FALSE;
632 	ldvp->hxgep = hxgep;
633 	hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
634 	nldvs++;
635 
636 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
637 	    "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
638 	    nldvs, *navail_p, *nrequired_p));
639 
640 	/*
641 	 * NMAC
642 	 */
643 	ldvp->ldv_intr_handler = hxge_nmac_intr;
644 	ldvp->ldv_ldf_masks = 0;
645 	ldv = HXGE_NMAC_LD;
646 	ldvp->ldv = ldv;
647 	ldvp->use_timer = B_FALSE;
648 	ldvp->hxgep = hxgep;
649 	hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
650 	nldvs++;
651 
652 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
653 	    "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
654 	    nldvs, *navail_p, *nrequired_p));
655 
656 	/*
657 	 * System error interrupts.
658 	 */
659 	ldv = HXGE_SYS_ERROR_LD;
660 	ldvp->ldv = ldv;
661 	ldvp->is_syserr = B_TRUE;
662 	ldvp->ldv_intr_handler = hxge_syserr_intr;
663 	ldvp->ldv_ldf_masks = 0;
664 	ldvp->hxgep = hxgep;
665 	ldvp->use_timer = B_FALSE;
666 	ldgvp->ldvp_syserr = ldvp;
667 
668 	/* Reset PEU error mask to allow PEU error interrupts */
669 	HXGE_REG_WR64(hxgep->hpi_handle, PEU_INTR_MASK, 0x0);
670 
671 	/*
672 	 * Unmask the system interrupt states.
673 	 */
674 	(void) hxge_fzc_sys_err_mask_set(hxgep, B_FALSE);
675 	(void) hxge_ldgv_setup(&ptr, &ldvp, ldv, endldg, nrequired_p);
676 	nldvs++;
677 
678 	ldgvp->ldg_intrs = *nrequired_p;
679 
680 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
681 	    "==> hxge_ldgv_init: nldvs %d navail %d nrequired %d",
682 	    nldvs, *navail_p, *nrequired_p));
683 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_init"));
684 	return (status);
685 }
686 
687 hxge_status_t
688 hxge_ldgv_uninit(p_hxge_t hxgep)
689 {
690 	p_hxge_ldgv_t		ldgvp;
691 
692 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_ldgv_uninit"));
693 	ldgvp = hxgep->ldgvp;
694 	if (ldgvp == NULL) {
695 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
696 		    "<== hxge_ldgv_uninit: no logical group configured."));
697 		return (HXGE_OK);
698 	}
699 
700 	if (ldgvp->ldgp) {
701 		KMEM_FREE(ldgvp->ldgp, sizeof (hxge_ldg_t) * ldgvp->maxldgs);
702 	}
703 	if (ldgvp->ldvp) {
704 		KMEM_FREE(ldgvp->ldvp, sizeof (hxge_ldv_t) * ldgvp->maxldvs);
705 	}
706 
707 	KMEM_FREE(ldgvp, sizeof (hxge_ldgv_t));
708 	hxgep->ldgvp = NULL;
709 
710 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_ldgv_uninit"));
711 	return (HXGE_OK);
712 }
713 
714 hxge_status_t
715 hxge_intr_ldgv_init(p_hxge_t hxgep)
716 {
717 	hxge_status_t	status = HXGE_OK;
718 
719 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_ldgv_init"));
720 	/*
721 	 * Configure the logical device group numbers, state vectors
722 	 * and interrupt masks for each logical device.
723 	 */
724 	status = hxge_fzc_intr_init(hxgep);
725 
726 	/*
727 	 * Configure logical device masks and timers.
728 	 */
729 	status = hxge_intr_mask_mgmt(hxgep);
730 
731 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_ldgv_init"));
732 	return (status);
733 }
734 
735 hxge_status_t
736 hxge_intr_mask_mgmt(p_hxge_t hxgep)
737 {
738 	p_hxge_ldgv_t	ldgvp;
739 	p_hxge_ldg_t	ldgp;
740 	p_hxge_ldv_t	ldvp;
741 	hpi_handle_t	handle;
742 	int		i, j;
743 	hpi_status_t	rs = HPI_SUCCESS;
744 
745 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_mask_mgmt"));
746 
747 	if ((ldgvp = hxgep->ldgvp) == NULL) {
748 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
749 		    "<== hxge_intr_mask_mgmt: Null ldgvp"));
750 		return (HXGE_ERROR);
751 	}
752 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
753 	ldgp = ldgvp->ldgp;
754 	ldvp = ldgvp->ldvp;
755 	if (ldgp == NULL || ldvp == NULL) {
756 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
757 		    "<== hxge_intr_mask_mgmt: Null ldgp or ldvp"));
758 		return (HXGE_ERROR);
759 	}
760 
761 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
762 	    "==> hxge_intr_mask_mgmt: # of intrs %d ", ldgvp->ldg_intrs));
763 	/* Initialize masks. */
764 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
765 	    "==> hxge_intr_mask_mgmt(Hydra): # intrs %d ", ldgvp->ldg_intrs));
766 	for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
767 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
768 		    "==> hxge_intr_mask_mgmt(Hydra): # ldv %d in group %d",
769 		    ldgp->nldvs, ldgp->ldg));
770 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
771 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
772 			    "==> hxge_intr_mask_mgmt: set ldv # %d "
773 			    "for ldg %d", ldvp->ldv, ldgp->ldg));
774 			rs = hpi_intr_mask_set(handle, ldvp->ldv,
775 			    ldvp->ldv_ldf_masks);
776 			if (rs != HPI_SUCCESS) {
777 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
778 				    "<== hxge_intr_mask_mgmt: set mask failed "
779 				    " rs 0x%x ldv %d mask 0x%x",
780 				    rs, ldvp->ldv, ldvp->ldv_ldf_masks));
781 				return (HXGE_ERROR | rs);
782 			}
783 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
784 			    "==> hxge_intr_mask_mgmt: set mask OK "
785 			    " rs 0x%x ldv %d mask 0x%x",
786 			    rs, ldvp->ldv, ldvp->ldv_ldf_masks));
787 		}
788 	}
789 
790 	ldgp = ldgvp->ldgp;
791 	/* Configure timer and arm bit */
792 	for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
793 		rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
794 		    ldgp->arm, ldgp->ldg_timer);
795 		if (rs != HPI_SUCCESS) {
796 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
797 			    "<== hxge_intr_mask_mgmt: set timer failed "
798 			    " rs 0x%x dg %d timer 0x%x",
799 			    rs, ldgp->ldg, ldgp->ldg_timer));
800 			return (HXGE_ERROR | rs);
801 		}
802 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
803 		    "==> hxge_intr_mask_mgmt: set timer OK "
804 		    " rs 0x%x ldg %d timer 0x%x",
805 		    rs, ldgp->ldg, ldgp->ldg_timer));
806 	}
807 
808 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_fzc_intr_mask_mgmt"));
809 	return (HXGE_OK);
810 }
811 
812 hxge_status_t
813 hxge_intr_mask_mgmt_set(p_hxge_t hxgep, boolean_t on)
814 {
815 	p_hxge_ldgv_t	ldgvp;
816 	p_hxge_ldg_t	ldgp;
817 	p_hxge_ldv_t	ldvp;
818 	hpi_handle_t	handle;
819 	int		i, j;
820 	hpi_status_t	rs = HPI_SUCCESS;
821 
822 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
823 	    "==> hxge_intr_mask_mgmt_set (%d)", on));
824 
825 	if ((ldgvp = hxgep->ldgvp) == NULL) {
826 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
827 		    "==> hxge_intr_mask_mgmt_set: Null ldgvp"));
828 		return (HXGE_ERROR);
829 	}
830 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
831 	ldgp = ldgvp->ldgp;
832 	ldvp = ldgvp->ldvp;
833 	if (ldgp == NULL || ldvp == NULL) {
834 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
835 		    "<== hxge_intr_mask_mgmt_set: Null ldgp or ldvp"));
836 		return (HXGE_ERROR);
837 	}
838 
839 	/* set masks. */
840 	for (i = 0; i < ldgvp->ldg_intrs; i++, ldgp++) {
841 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
842 		    "==> hxge_intr_mask_mgmt_set: flag %d ldg %d"
843 		    "set mask nldvs %d", on, ldgp->ldg, ldgp->nldvs));
844 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
845 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
846 			    "==> hxge_intr_mask_mgmt_set: "
847 			    "for %d %d flag %d", i, j, on));
848 			if (on) {
849 				ldvp->ldv_ldf_masks = 0;
850 				HXGE_DEBUG_MSG((hxgep, INT_CTL,
851 				    "==> hxge_intr_mask_mgmt_set: "
852 				    "ON mask off"));
853 			} else {
854 				ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK;
855 				HXGE_DEBUG_MSG((hxgep, INT_CTL,
856 				    "==> hxge_intr_mask_mgmt_set:mask on"));
857 			}
858 
859 			/*
860 			 * Bringup - NMAC constantly interrupts since hydrad
861 			 * is not available yet. When hydrad is available
862 			 * and handles the interrupts, we will delete the
863 			 * following two lines
864 			 */
865 			if (ldvp->ldv_intr_handler == hxge_nmac_intr)
866 				ldvp->ldv_ldf_masks = (uint8_t)LD_IM_MASK;
867 
868 			rs = hpi_intr_mask_set(handle, ldvp->ldv,
869 			    ldvp->ldv_ldf_masks);
870 			if (rs != HPI_SUCCESS) {
871 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
872 				    "==> hxge_intr_mask_mgmt_set: "
873 				    "set mask failed rs 0x%x ldv %d mask 0x%x",
874 				    rs, ldvp->ldv, ldvp->ldv_ldf_masks));
875 				return (HXGE_ERROR | rs);
876 			}
877 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
878 			    "==> hxge_intr_mask_mgmt_set: flag %d"
879 			    "set mask OK ldv %d mask 0x%x",
880 			    on, ldvp->ldv, ldvp->ldv_ldf_masks));
881 		}
882 	}
883 
884 	ldgp = ldgvp->ldgp;
885 	/* set the arm bit */
886 	for (i = 0; i < hxgep->ldgvp->ldg_intrs; i++, ldgp++) {
887 		if (on && !ldgp->arm) {
888 			ldgp->arm = B_TRUE;
889 		} else if (!on && ldgp->arm) {
890 			ldgp->arm = B_FALSE;
891 		}
892 		rs = hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
893 		    ldgp->arm, ldgp->ldg_timer);
894 		if (rs != HPI_SUCCESS) {
895 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
896 			    "<== hxge_intr_mask_mgmt_set: "
897 			    "set timer failed rs 0x%x ldg %d timer 0x%x",
898 			    rs, ldgp->ldg, ldgp->ldg_timer));
899 			return (HXGE_ERROR | rs);
900 		}
901 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
902 		    "==> hxge_intr_mask_mgmt_set: OK (flag %d) "
903 		    "set timer ldg %d timer 0x%x",
904 		    on, ldgp->ldg, ldgp->ldg_timer));
905 	}
906 
907 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_mask_mgmt_set"));
908 	return (HXGE_OK);
909 }
910 
911 /*
912  * For Big Endian systems, the mac address will be from OBP. For Little
913  * Endian (x64) systems, it will be retrieved from the card since it cannot
914  * be programmed into PXE.
915  * This function also populates the MMAC parameters.
916  */
917 static hxge_status_t
918 hxge_get_mac_addr_properties(p_hxge_t hxgep)
919 {
920 #if defined(_BIG_ENDIAN)
921 	uchar_t		*prop_val;
922 	uint_t		prop_len;
923 #endif
924 	uint32_t	num_macs;
925 	hxge_status_t	status;
926 
927 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_get_mac_addr_properties "));
928 #if defined(_BIG_ENDIAN)
929 	/*
930 	 * Get the ethernet address.
931 	 */
932 	(void) localetheraddr((struct ether_addr *)NULL, &hxgep->ouraddr);
933 
934 	/*
935 	 * Check if it is an adapter with its own local mac address
936 	 * If it is present, override the system mac address.
937 	 */
938 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, hxgep->dip, 0,
939 	    "local-mac-address", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
940 		if (prop_len == ETHERADDRL) {
941 			hxgep->factaddr = *(p_ether_addr_t)prop_val;
942 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Local mac address = "
943 			    "%02x:%02x:%02x:%02x:%02x:%02x",
944 			    prop_val[0], prop_val[1], prop_val[2],
945 			    prop_val[3], prop_val[4], prop_val[5]));
946 		}
947 		ddi_prop_free(prop_val);
948 	}
949 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, hxgep->dip, 0,
950 	    "local-mac-address?", &prop_val, &prop_len) == DDI_PROP_SUCCESS) {
951 		if (strncmp("true", (caddr_t)prop_val, (size_t)prop_len) == 0) {
952 			hxgep->ouraddr = hxgep->factaddr;
953 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
954 			    "Using local MAC address"));
955 		}
956 		ddi_prop_free(prop_val);
957 	} else {
958 		hxgep->ouraddr = hxgep->factaddr;
959 	}
960 #else
961 	(void) hxge_pfc_mac_addrs_get(hxgep);
962 	hxgep->ouraddr = hxgep->factaddr;
963 #endif
964 
965 	/*
966 	 * Get the number of MAC addresses the Hydra supports per blade.
967 	 */
968 	if (hxge_pfc_num_macs_get(hxgep, &num_macs) == HXGE_OK) {
969 		hxgep->hxge_mmac_info.num_mmac = (uint8_t)num_macs;
970 	} else {
971 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
972 		    "hxge_get_mac_addr_properties: get macs failed"));
973 		return (HXGE_ERROR);
974 	}
975 
976 	/*
977 	 * Initialize alt. mac addr. in the mac pool
978 	 */
979 	status = hxge_mmac_init(hxgep);
980 	if (status != HXGE_OK) {
981 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
982 		    "hxge_get_mac_addr_properties: init mmac failed"));
983 		return (HXGE_ERROR);
984 	}
985 
986 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_get_mac_addr_properties "));
987 	return (HXGE_OK);
988 }
989 
990 static void
991 hxge_ldgv_setup(p_hxge_ldg_t *ldgp, p_hxge_ldv_t *ldvp, uint8_t ldv,
992 	uint8_t endldg, int *ngrps)
993 {
994 	HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup"));
995 	/* Assign the group number for each device. */
996 	(*ldvp)->ldg_assigned = (*ldgp)->ldg;
997 	(*ldvp)->ldgp = *ldgp;
998 	(*ldvp)->ldv = ldv;
999 
1000 	HXGE_DEBUG_MSG((NULL, INT_CTL,
1001 	    "==> hxge_ldgv_setup: ldv %d endldg %d ldg %d, ldvp $%p",
1002 	    ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
1003 
1004 	(*ldgp)->nldvs++;
1005 	if ((*ldgp)->ldg == (endldg - 1)) {
1006 		if ((*ldgp)->ldvp == NULL) {
1007 			(*ldgp)->ldvp = *ldvp;
1008 			*ngrps += 1;
1009 			HXGE_DEBUG_MSG((NULL, INT_CTL,
1010 			    "==> hxge_ldgv_setup: ngrps %d", *ngrps));
1011 		}
1012 		HXGE_DEBUG_MSG((NULL, INT_CTL,
1013 		    "==> hxge_ldgv_setup: ldvp $%p ngrps %d",
1014 		    *ldvp, *ngrps));
1015 		++*ldvp;
1016 	} else {
1017 		(*ldgp)->ldvp = *ldvp;
1018 		*ngrps += 1;
1019 		HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup(done): "
1020 		    "ldv %d endldg %d ldg %d, ldvp $%p",
1021 		    ldv, endldg, (*ldgp)->ldg, (*ldgp)->ldvp));
1022 		(*ldvp) = ++*ldvp;
1023 		(*ldgp) = ++*ldgp;
1024 		HXGE_DEBUG_MSG((NULL, INT_CTL,
1025 		    "==> hxge_ldgv_setup: new ngrps %d", *ngrps));
1026 	}
1027 
1028 	HXGE_DEBUG_MSG((NULL, INT_CTL, "==> hxge_ldgv_setup: "
1029 	    "ldg %d nldvs %d ldv %d ldvp $%p endldg %d ngrps %d",
1030 	    (*ldgp)->ldg, (*ldgp)->nldvs, ldv, ldvp, endldg, *ngrps));
1031 
1032 	HXGE_DEBUG_MSG((NULL, INT_CTL, "<== hxge_ldgv_setup"));
1033 }
1034 
1035 /*
1036  * Note: This function assumes the following distribution of mac
1037  * addresses for a hydra blade:
1038  *
1039  *      -------------
1040  *    0|            |0 - local-mac-address for blade
1041  *      -------------
1042  *     |            |1 - Start of alt. mac addr. for blade
1043  *     |            |
1044  *     |            |
1045  *     |            |15
1046  *     --------------
1047  */
1048 
1049 static hxge_status_t
1050 hxge_mmac_init(p_hxge_t hxgep)
1051 {
1052 	int slot;
1053 	hxge_mmac_t *mmac_info;
1054 
1055 	mmac_info = (hxge_mmac_t *)&hxgep->hxge_mmac_info;
1056 
1057 	/* Set flags for unique MAC */
1058 	mmac_info->mac_pool[0].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
1059 	mmac_info->num_factory_mmac = 1;
1060 
1061 	/*
1062 	 * Skip the factory/default address which is in slot 0.
1063 	 * Initialze all other mac addr. to "AVAILABLE" state.
1064 	 * Clear flags of all alternate MAC slots.
1065 	 */
1066 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
1067 		(void) hpi_pfc_clear_mac_address(hxgep->hpi_handle, slot);
1068 		mmac_info->mac_pool[slot].flags = 0;
1069 	}
1070 
1071 	/* Exclude the factory mac address */
1072 	mmac_info->naddrfree = mmac_info->num_mmac - 1;
1073 
1074 	/* Initialize the first two parameters for mmac kstat */
1075 	hxgep->statsp->mmac_stats.mmac_max_cnt = mmac_info->num_mmac;
1076 	hxgep->statsp->mmac_stats.mmac_avail_cnt = mmac_info->naddrfree;
1077 
1078 	return (HXGE_OK);
1079 }
1080 
1081 /*ARGSUSED*/
1082 uint_t
1083 hxge_nmac_intr(caddr_t arg1, caddr_t arg2)
1084 {
1085 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1086 	hpi_handle_t		handle;
1087 	p_hxge_stats_t		statsp;
1088 	cip_link_stat_t		link_stat;
1089 
1090 	HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "==> hxge_nmac_intr"));
1091 
1092 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1093 	statsp = (p_hxge_stats_t)hxgep->statsp;
1094 
1095 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
1096 	HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "hxge_nmac_intr: status is 0x%x",
1097 	    link_stat.value));
1098 
1099 	if (link_stat.bits.xpcs0_link_up) {
1100 		mac_link_update(hxgep->mach, LINK_STATE_UP);
1101 		statsp->mac_stats.link_up = 1;
1102 	} else {
1103 		mac_link_update(hxgep->mach, LINK_STATE_DOWN);
1104 		statsp->mac_stats.link_up = 0;
1105 	}
1106 
1107 	HXGE_DEBUG_MSG((hxgep, MAC_INT_CTL, "<== hxge_nmac_intr"));
1108 	return (DDI_INTR_CLAIMED);
1109 }
1110