xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_hio_guest.c (revision 24fe0b3bf671e123467ce1df0b67cadd3614c8e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio_guest.c
29  *
30  * This file manages the virtualization resources for a guest domain.
31  *
32  */
33 
34 #include <sys/nxge/nxge_impl.h>
35 #include <sys/nxge/nxge_fzc.h>
36 #include <sys/nxge/nxge_rxdma.h>
37 #include <sys/nxge/nxge_txdma.h>
38 
39 #include <sys/nxge/nxge_hio.h>
40 
41 /*
42  * nxge_hio_unregister
43  *
44  *	Unregister with the VNET module.
45  *
46  * Arguments:
47  * 	nxge
48  *
49  * Notes:
50  *	We must uninitialize all DMA channels associated with the VR, too.
51  *
52  *	We're assuming that the channels will be disabled & unassigned
53  *	in the service domain, after we're done here.
54  *
55  * Context:
56  *	Guest domain
57  */
58 void
59 nxge_hio_unregister(
60 	nxge_t *nxge)
61 {
62 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
63 
64 	if (nhd == 0) {
65 		return;
66 	}
67 
68 #if defined(sun4v)
69 	/* Unregister with vNet. */
70 	if (nhd->hio.vio.unregister) {
71 		if (nxge->hio_vr)
72 			(*nhd->hio.vio.unregister)(nxge->hio_vr->vhp);
73 	}
74 #endif
75 }
76 
77 /*
78  * nxge_guest_regs_map
79  *
80  *	Map in a guest domain's register set(s).
81  *
82  * Arguments:
83  * 	nxge
84  *
85  * Notes:
86  *	Note that we set <is_vraddr> to TRUE.
87  *
88  * Context:
89  *	Guest domain
90  */
91 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
92 	DDI_DEVICE_ATTR_V0,
93 	DDI_STRUCTURE_LE_ACC,
94 	DDI_STRICTORDER_ACC,
95 };
96 
97 int
98 nxge_guest_regs_map(
99 	nxge_t *nxge)
100 {
101 	dev_regs_t 	*regs;
102 	off_t		regsize;
103 	int rv;
104 
105 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
106 
107 	/* So we can allocate properly-aligned memory. */
108 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
109 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
110 
111 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
112 	regs = nxge->dev_regs;
113 
114 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
115 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
116 		return (NXGE_ERROR);
117 	}
118 
119 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
120 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
121 
122 	if (rv != DDI_SUCCESS) {
123 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
124 		return (NXGE_ERROR);
125 	}
126 
127 	nxge->npi_handle.regh = regs->nxge_regh;
128 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
129 	nxge->npi_handle.is_vraddr = B_TRUE;
130 	nxge->npi_handle.function.instance = nxge->instance;
131 	nxge->npi_handle.function.function = nxge->function_num;
132 	nxge->npi_handle.nxgep = (void *)nxge;
133 
134 	/* NPI_REG_ADD_HANDLE_SET() */
135 	nxge->npi_reg_handle.regh = regs->nxge_regh;
136 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
137 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
138 	nxge->npi_reg_handle.function.instance = nxge->instance;
139 	nxge->npi_reg_handle.function.function = nxge->function_num;
140 	nxge->npi_reg_handle.nxgep = (void *)nxge;
141 
142 	/* NPI_VREG_ADD_HANDLE_SET() */
143 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
144 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
145 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
146 	nxge->npi_vreg_handle.function.instance = nxge->instance;
147 	nxge->npi_vreg_handle.function.function = nxge->function_num;
148 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
149 
150 	regs->nxge_vir_regp = regs->nxge_regp;
151 	regs->nxge_vir_regh = regs->nxge_regh;
152 
153 	/*
154 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
155 	 * or FCODE reg variables.
156 	 */
157 
158 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
159 
160 	return (NXGE_OK);
161 }
162 
163 void
164 nxge_guest_regs_map_free(
165 	nxge_t *nxge)
166 {
167 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
168 
169 	if (nxge->dev_regs) {
170 		if (nxge->dev_regs->nxge_regh) {
171 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
172 			    "==> nxge_unmap_regs: device registers"));
173 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
174 			nxge->dev_regs->nxge_regh = NULL;
175 		}
176 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
177 		nxge->dev_regs = 0;
178 	}
179 
180 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
181 }
182 
183 #if defined(sun4v)
184 
185 /*
186  * -------------------------------------------------------------
187  * Local prototypes
188  * -------------------------------------------------------------
189  */
190 static nxge_hio_dc_t *nxge_guest_dc_alloc(
191 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
192 
193 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
194 static void nxge_check_guest_state(nxge_hio_vr_t *);
195 
196 /*
197  * nxge_hio_vr_add
198  *
199  *	If we have been given a virtualization region (VR),
200  *	then initialize it.
201  *
202  * Arguments:
203  * 	nxge
204  *
205  * Notes:
206  *
207  * Context:
208  *	Guest domain
209  */
210 /* ARGSUSED */
211 int
212 nxge_hio_vr_add(nxge_t *nxge)
213 {
214 	extern mac_callbacks_t nxge_m_callbacks;
215 
216 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
217 	nxge_hio_vr_t *vr;
218 	nxge_hio_dc_t *dc;
219 
220 	int *reg_val;
221 	uint_t reg_len;
222 	uint8_t vr_index;
223 
224 	nxhv_vr_fp_t *fp;
225 	uint64_t vr_address, vr_size;
226 	uint32_t cookie;
227 
228 	nxhv_dc_fp_t *tx, *rx;
229 	uint64_t tx_map, rx_map;
230 
231 	uint64_t hv_rv;
232 
233 	/* Variables needed to register with vnet. */
234 	mac_register_t *mac_info;
235 	ether_addr_t mac_addr;
236 	nx_vio_fp_t *vio;
237 
238 	int i;
239 
240 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
241 
242 	/*
243 	 * Get our HV cookie.
244 	 */
245 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
246 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
247 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
248 		return (NXGE_ERROR);
249 	}
250 
251 	cookie = (uint32_t)(reg_val[0]);
252 	ddi_prop_free(reg_val);
253 
254 	fp = &nhd->hio.vr;
255 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
256 	if (hv_rv != 0) {
257 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
258 		    "vr->getinfo() failed"));
259 		return (NXGE_ERROR);
260 	}
261 
262 	/*
263 	 * In the guest domain, we can use any VR data structure
264 	 * we want, because we're not supposed to know which VR
265 	 * the service domain has allocated to us.
266 	 *
267 	 * In the current version, the least significant nybble of
268 	 * the cookie is the VR region, but that could change
269 	 * very easily.
270 	 *
271 	 * In the future, a guest may have more than one VR allocated
272 	 * to it, which is why we go through this exercise.
273 	 */
274 	MUTEX_ENTER(&nhd->lock);
275 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
276 		if (nhd->vr[vr_index].nxge == 0) {
277 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
278 			break;
279 		}
280 	}
281 	MUTEX_EXIT(&nhd->lock);
282 
283 	if (vr_index == FUNC_VIR_MAX) {
284 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
285 		    "no VRs available"));
286 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
287 		    "nxge_hio_vr_add(%d): cookie(0x%x)\n",
288 		    nxge->instance, cookie));
289 		return (NXGE_ERROR);
290 	}
291 
292 	vr = &nhd->vr[vr_index];
293 
294 	vr->nxge = (uintptr_t)nxge;
295 	vr->cookie = (uint32_t)cookie;
296 	vr->address = vr_address;
297 	vr->size = vr_size;
298 	vr->region = vr_index;
299 
300 	/*
301 	 * This is redundant data, but useful nonetheless.  It helps
302 	 * us to keep track of which RDCs & TDCs belong to us.
303 	 */
304 	if (nxge->tx_set.lg.count == 0)
305 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
306 	if (nxge->rx_set.lg.count == 0)
307 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
308 
309 	/*
310 	 * See nxge_intr.c.
311 	 */
312 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
313 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
314 		    "nxge_hio_intr_init() failed"));
315 		return (NXGE_ERROR);
316 	}
317 
318 	/*
319 	 * Now we find out which RDCs & TDCs have been allocated to us.
320 	 */
321 	tx = &nhd->hio.tx;
322 	if (tx->get_map) {
323 		/*
324 		 * The map we get back is a bitmap of the
325 		 * virtual Tx DMA channels we own -
326 		 * they are NOT real channel numbers.
327 		 */
328 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
329 		if (hv_rv != 0) {
330 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
331 			    "tx->get_map() failed"));
332 			return (NXGE_ERROR);
333 		}
334 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
335 
336 		/*
337 		 * For each channel, mark these two fields
338 		 * while we have the VR data structure.
339 		 */
340 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
341 			if ((1 << i) & tx_map) {
342 				dc = nxge_guest_dc_alloc(nxge, vr,
343 				    NXGE_TRANSMIT_GROUP);
344 				if (dc == 0) {
345 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
346 					    "DC add failed"));
347 					return (NXGE_ERROR);
348 				}
349 				dc->channel = (nxge_channel_t)i;
350 			}
351 		}
352 	}
353 
354 	rx = &nhd->hio.rx;
355 	if (rx->get_map) {
356 		/*
357 		 * I repeat, the map we get back is a bitmap of
358 		 * the virtual Rx DMA channels we own -
359 		 * they are NOT real channel numbers.
360 		 */
361 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
362 		if (hv_rv != 0) {
363 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
364 			    "rx->get_map() failed"));
365 			return (NXGE_ERROR);
366 		}
367 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
368 
369 		/*
370 		 * For each channel, mark these two fields
371 		 * while we have the VR data structure.
372 		 */
373 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
374 			if ((1 << i) & rx_map) {
375 				dc = nxge_guest_dc_alloc(nxge, vr,
376 				    NXGE_RECEIVE_GROUP);
377 				if (dc == 0) {
378 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
379 					    "DC add failed"));
380 					return (NXGE_ERROR);
381 				}
382 				dc->channel = (nxge_channel_t)i;
383 			}
384 		}
385 	}
386 
387 	/*
388 	 * Register with vnet.
389 	 */
390 	if ((mac_info = mac_alloc(MAC_VERSION)) == NULL)
391 		return (NXGE_ERROR);
392 
393 	mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
394 	mac_info->m_driver = nxge;
395 	mac_info->m_dip = nxge->dip;
396 	mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
397 	mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
398 	(void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
399 	mac_info->m_callbacks = &nxge_m_callbacks;
400 	mac_info->m_min_sdu = 0;
401 	mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX -
402 	    sizeof (struct ether_header) - ETHERFCSL - 4;
403 
404 	(void) memset(&mac_addr, 0xff, sizeof (mac_addr));
405 
406 	/* Register with vio_net. */
407 	vio = &nhd->hio.vio;
408 	if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID,
409 	    nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) {
410 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed"));
411 		KMEM_FREE(mac_info->m_src_addr, MAXMACADDRLEN);
412 		KMEM_FREE(mac_info->m_dst_addr, MAXMACADDRLEN);
413 		mac_free(mac_info);
414 		return (NXGE_ERROR);
415 	}
416 
417 	KMEM_FREE(mac_info->m_src_addr, MAXMACADDRLEN);
418 	KMEM_FREE(mac_info->m_dst_addr, MAXMACADDRLEN);
419 	mac_free(mac_info);
420 
421 	nxge->hio_vr = vr;	/* For faster lookups. */
422 
423 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
424 
425 	return (NXGE_OK);
426 }
427 
428 /*
429  * nxge_guest_dc_alloc
430  *
431  *	Find a free nxge_hio_dc_t data structure.
432  *
433  * Arguments:
434  * 	nxge
435  * 	type	TRANSMIT or RECEIVE.
436  *
437  * Notes:
438  *
439  * Context:
440  *	Guest domain
441  */
442 nxge_hio_dc_t *
443 nxge_guest_dc_alloc(
444 	nxge_t *nxge,
445 	nxge_hio_vr_t *vr,
446 	nxge_grp_type_t type)
447 {
448 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
449 	nxge_hio_dc_t *dc;
450 	int limit, i;
451 
452 	/*
453 	 * In the guest domain, there may be more than one VR.
454 	 * each one of which will be using the same slots, or
455 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
456 	 * tables must be shared.
457 	 */
458 	if (type == NXGE_TRANSMIT_GROUP) {
459 		dc = &nhd->tdc[0];
460 		limit = NXGE_MAX_TDCS;
461 	} else {
462 		dc = &nhd->rdc[0];
463 		limit = NXGE_MAX_RDCS;
464 	}
465 
466 	MUTEX_ENTER(&nhd->lock);
467 	for (i = 0; i < limit; i++, dc++) {
468 		if (dc->vr == 0) {
469 			dc->vr = vr;
470 			dc->cookie = vr->cookie;
471 			MUTEX_EXIT(&nhd->lock);
472 			return (dc);
473 		}
474 	}
475 	MUTEX_EXIT(&nhd->lock);
476 
477 	return (0);
478 }
479 
480 /*
481  * res_map_parse
482  *
483  *	Parse a resource map.  The resources are DMA channels, receive
484  *	or transmit, depending on <type>.
485  *
486  * Arguments:
487  * 	nxge
488  * 	type	Transmit or receive.
489  *	res_map	The resource map to parse.
490  *
491  * Notes:
492  *
493  * Context:
494  *	Guest domain
495  */
496 void
497 res_map_parse(
498 	nxge_t *nxge,
499 	nxge_grp_type_t type,
500 	uint64_t res_map)
501 {
502 	uint8_t slots, mask, slot;
503 	int first, count;
504 
505 	nxge_hw_pt_cfg_t *hardware;
506 	nxge_grp_t *group;
507 
508 	/* Slots are numbered 0 - 7. */
509 	slots = (uint8_t)(res_map & 0xff);
510 
511 	/* Count the number of bits in the bitmap. */
512 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
513 		if (slots & mask)
514 			count++;
515 		if (count == 1)
516 			first = slot;
517 		mask <<= 1;
518 	}
519 
520 	hardware = &nxge->pt_config.hw_config;
521 	group = (type == NXGE_TRANSMIT_GROUP) ?
522 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
523 
524 	/*
525 	 * A guest domain has one Tx & one Rx group, so far.
526 	 * In the future, there may be more than one.
527 	 */
528 	if (type == NXGE_TRANSMIT_GROUP) {
529 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
530 		nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0];
531 
532 		hardware->tdc.start = first;
533 		hardware->tdc.count = count;
534 		hardware->tdc.owned = count;
535 
536 		tdc_grp->start_tdc = first;
537 		tdc_grp->max_tdcs = (uint8_t)count;
538 		tdc_grp->grp_index = group->index;
539 		tdc_grp->map = slots;
540 
541 		group->map = slots;
542 
543 		/*
544 		 * Pointless in a guest domain.  This bitmap is used
545 		 * in only one place: nxge_txc_init(),
546 		 * a service-domain-only function.
547 		 */
548 		port->tx_dma_map = slots;
549 
550 		nxge->tx_set.owned.map |= slots;
551 	} else {
552 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
553 
554 		hardware->start_rdc = first;
555 		hardware->max_rdcs = count;
556 
557 		rdc_grp->start_rdc = (uint8_t)first;
558 		rdc_grp->max_rdcs = (uint8_t)count;
559 		rdc_grp->def_rdc = (uint8_t)first;
560 
561 		rdc_grp->map = slots;
562 		group->map = slots;
563 
564 		nxge->rx_set.owned.map |= slots;
565 	}
566 }
567 
568 /*
569  * nxge_hio_vr_release
570  *
571  *	Release a virtualization region (VR).
572  *
573  * Arguments:
574  * 	nxge
575  *
576  * Notes:
577  *	We must uninitialize all DMA channels associated with the VR, too.
578  *
579  *	The service domain will re-initialize these DMA channels later.
580  *	See nxge_hio.c:nxge_hio_share_free() for details.
581  *
582  * Context:
583  *	Guest domain
584  */
585 int
586 nxge_hio_vr_release(nxge_t *nxge)
587 {
588 	nxge_hio_data_t	*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
589 	int		vr_index;
590 
591 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
592 
593 	if (nxge->hio_vr == NULL) {
594 		return (NXGE_OK);
595 	}
596 
597 	/*
598 	 * Uninitialize interrupts.
599 	 */
600 	nxge_hio_intr_uninit(nxge);
601 
602 	/*
603 	 * Uninitialize the receive DMA channels.
604 	 */
605 	nxge_uninit_rxdma_channels(nxge);
606 
607 	/*
608 	 * Uninitialize the transmit DMA channels.
609 	 */
610 	nxge_uninit_txdma_channels(nxge);
611 
612 	/*
613 	 * Remove both groups. Assumption: only two groups!
614 	 */
615 	if (nxge->rx_set.group[0] != NULL)
616 		nxge_grp_remove(nxge, nxge->rx_set.group[0]);
617 	if (nxge->tx_set.group[0] != NULL)
618 		nxge_grp_remove(nxge, nxge->tx_set.group[0]);
619 
620 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
621 
622 	/*
623 	 * Clean up.
624 	 */
625 	MUTEX_ENTER(&nhd->lock);
626 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
627 		if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
628 			nhd->vr[vr_index].nxge = NULL;
629 			break;
630 		}
631 	}
632 	MUTEX_EXIT(&nhd->lock);
633 
634 	return (NXGE_OK);
635 }
636 
637 #if defined(NIU_LP_WORKAROUND)
638 /*
639  * nxge_tdc_lp_conf
640  *
641  *	Configure the logical pages for a TDC.
642  *
643  * Arguments:
644  * 	nxge
645  * 	channel	The TDC to configure.
646  *
647  * Notes:
648  *
649  * Context:
650  *	Guest domain
651  */
652 nxge_status_t
653 nxge_tdc_lp_conf(
654 	p_nxge_t nxge,
655 	int channel)
656 {
657 	nxge_hio_dc_t		*dc;
658 	nxge_dma_common_t	*data;
659 	nxge_dma_common_t	*control;
660 	tx_ring_t 		*ring;
661 
662 	uint64_t		hv_rv;
663 	uint64_t		ra, size;
664 
665 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
666 
667 	ring = nxge->tx_rings->rings[channel];
668 
669 	if (ring->hv_set) {
670 		/* This shouldn't happen. */
671 		return (NXGE_OK);
672 	}
673 
674 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
675 		return (NXGE_ERROR);
676 
677 	/*
678 	 * Initialize logical page 0 for data buffers.
679 	 *
680 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
681 	 * nxge_main.c:nxge_dma_mem_alloc().
682 	 */
683 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
684 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
685 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
686 
687 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
688 	    (uint64_t)channel, 0,
689 	    ring->hv_tx_buf_base_ioaddr_pp,
690 	    ring->hv_tx_buf_ioaddr_size);
691 
692 	if (hv_rv != 0) {
693 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
694 		    "<== nxge_tdc_lp_conf: channel %d "
695 		    "(page 0 data buf) hv: %d "
696 		    "ioaddr_pp $%p size 0x%llx ",
697 		    channel, hv_rv,
698 		    ring->hv_tx_buf_base_ioaddr_pp,
699 		    ring->hv_tx_buf_ioaddr_size));
700 		return (NXGE_ERROR | hv_rv);
701 	}
702 
703 	ra = size = 0;
704 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
705 	    (uint64_t)channel, 0, &ra, &size);
706 
707 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
708 	    "==> nxge_tdc_lp_conf: channel %d "
709 	    "(page 0 data buf) hv_rv 0x%llx "
710 	    "set ioaddr_pp $%p set size 0x%llx "
711 	    "get ra ioaddr_pp $%p get size 0x%llx ",
712 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
713 	    ring->hv_tx_buf_ioaddr_size, ra, size));
714 
715 	/*
716 	 * Initialize logical page 1 for control buffers.
717 	 */
718 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
719 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
720 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
721 
722 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
723 	    (uint64_t)channel, (uint64_t)1,
724 	    ring->hv_tx_cntl_base_ioaddr_pp,
725 	    ring->hv_tx_cntl_ioaddr_size);
726 
727 	if (hv_rv != 0) {
728 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
729 		    "<== nxge_tdc_lp_conf: channel %d "
730 		    "(page 1 cntl buf) hv_rv 0x%llx "
731 		    "ioaddr_pp $%p size 0x%llx ",
732 		    channel, hv_rv,
733 		    ring->hv_tx_cntl_base_ioaddr_pp,
734 		    ring->hv_tx_cntl_ioaddr_size));
735 		return (NXGE_ERROR | hv_rv);
736 	}
737 
738 	ra = size = 0;
739 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
740 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
741 
742 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
743 	    "==> nxge_tdc_lp_conf: channel %d "
744 	    "(page 1 cntl buf) hv_rv 0x%llx "
745 	    "set ioaddr_pp $%p set size 0x%llx "
746 	    "get ra ioaddr_pp $%p get size 0x%llx ",
747 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
748 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
749 
750 	ring->hv_set = B_TRUE;
751 
752 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
753 
754 	return (NXGE_OK);
755 }
756 
757 /*
758  * nxge_rdc_lp_conf
759  *
760  *	Configure an RDC's logical pages.
761  *
762  * Arguments:
763  * 	nxge
764  * 	channel	The RDC to configure.
765  *
766  * Notes:
767  *
768  * Context:
769  *	Guest domain
770  */
771 nxge_status_t
772 nxge_rdc_lp_conf(
773 	p_nxge_t nxge,
774 	int channel)
775 {
776 	nxge_hio_dc_t		*dc;
777 	nxge_dma_common_t	*data;
778 	nxge_dma_common_t	*control;
779 	rx_rbr_ring_t		*ring;
780 
781 	uint64_t		hv_rv;
782 	uint64_t		ra, size;
783 
784 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
785 
786 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
787 
788 	if (ring->hv_set) {
789 		return (NXGE_OK);
790 	}
791 
792 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
793 		return (NXGE_ERROR);
794 
795 	/*
796 	 * Initialize logical page 0 for data buffers.
797 	 *
798 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
799 	 * nxge_main.c:nxge_dma_mem_alloc().
800 	 */
801 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
802 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
803 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
804 
805 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
806 	    (uint64_t)channel, 0,
807 	    ring->hv_rx_buf_base_ioaddr_pp,
808 	    ring->hv_rx_buf_ioaddr_size);
809 
810 	if (hv_rv != 0) {
811 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
812 		    "<== nxge_rdc_lp_conf: channel %d "
813 		    "(page 0 data buf) hv_rv 0x%llx "
814 		    "ioaddr_pp $%p size 0x%llx ",
815 		    channel, hv_rv,
816 		    ring->hv_rx_buf_base_ioaddr_pp,
817 		    ring->hv_rx_buf_ioaddr_size));
818 		return (NXGE_ERROR | hv_rv);
819 	}
820 
821 	ra = size = 0;
822 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
823 	    (uint64_t)channel, 0, &ra, &size);
824 
825 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
826 	    "==> nxge_rdc_lp_conf: channel %d "
827 	    "(page 0 data buf) hv_rv 0x%llx "
828 	    "set ioaddr_pp $%p set size 0x%llx "
829 	    "get ra ioaddr_pp $%p get size 0x%llx ",
830 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
831 	    ring->hv_rx_buf_ioaddr_size, ra, size));
832 
833 	/*
834 	 * Initialize logical page 1 for control buffers.
835 	 */
836 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
837 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
838 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
839 
840 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
841 	    (uint64_t)channel, (uint64_t)1,
842 	    ring->hv_rx_cntl_base_ioaddr_pp,
843 	    ring->hv_rx_cntl_ioaddr_size);
844 
845 	if (hv_rv != 0) {
846 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
847 		    "<== nxge_rdc_lp_conf: channel %d "
848 		    "(page 1 cntl buf) hv_rv 0x%llx "
849 		    "ioaddr_pp $%p size 0x%llx ",
850 		    channel, hv_rv,
851 		    ring->hv_rx_cntl_base_ioaddr_pp,
852 		    ring->hv_rx_cntl_ioaddr_size));
853 		return (NXGE_ERROR | hv_rv);
854 	}
855 
856 	ra = size = 0;
857 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
858 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
859 
860 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
861 	    "==> nxge_rdc_lp_conf: channel %d "
862 	    "(page 1 cntl buf) hv_rv 0x%llx "
863 	    "set ioaddr_pp $%p set size 0x%llx "
864 	    "get ra ioaddr_pp $%p get size 0x%llx ",
865 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
866 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
867 
868 	ring->hv_set = B_TRUE;
869 
870 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
871 
872 	return (NXGE_OK);
873 }
874 #endif	/* defined(NIU_LP_WORKAROUND) */
875 
876 /*
877  * This value is in milliseconds.
878  */
879 #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
880 
881 /*
882  * nxge_hio_start_timer
883  *
884  *	Start the timer which checks for Tx hangs.
885  *
886  * Arguments:
887  * 	nxge
888  *
889  * Notes:
890  *	This function is called from nxge_attach().
891  *
892  *	This function kicks off the guest domain equivalent of
893  *	nxge_check_hw_state().  It is called only once, from attach.
894  *
895  * Context:
896  *	Guest domain
897  */
898 void
899 nxge_hio_start_timer(
900 	nxge_t *nxge)
901 {
902 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
903 	nxge_hio_vr_t *vr;
904 	int region;
905 
906 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
907 
908 	MUTEX_ENTER(&nhd->lock);
909 
910 	/*
911 	 * Find our VR data structure.  (We are currently assuming
912 	 * one VR per guest domain.  That may change in the future.)
913 	 */
914 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
915 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
916 			break;
917 	}
918 
919 	MUTEX_EXIT(&nhd->lock);
920 
921 	if (region == NXGE_VR_SR_MAX) {
922 		return;
923 	}
924 
925 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
926 
927 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
928 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
929 
930 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
931 }
932 
933 /*
934  * nxge_check_guest_state
935  *
936  *	Essentially, check for Tx hangs.  In the future, if we are
937  *	polling the hardware, we may do so here.
938  *
939  * Arguments:
940  * 	vr	The virtualization region (VR) data structure.
941  *
942  * Notes:
943  *	This function is the guest domain equivalent of
944  *	nxge_check_hw_state().  Since we have no hardware to
945  * 	check, we simply call nxge_check_tx_hang().
946  *
947  * Context:
948  *	Guest domain
949  */
950 void
951 nxge_check_guest_state(
952 	nxge_hio_vr_t *vr)
953 {
954 	nxge_t *nxge = (nxge_t *)vr->nxge;
955 
956 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
957 
958 	MUTEX_ENTER(nxge->genlock);
959 	nxge->nxge_timerid = 0;
960 
961 	if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
962 		nxge_check_tx_hang(nxge);
963 
964 		nxge->nxge_timerid = timeout((void(*)(void *))
965 		    nxge_check_guest_state, (caddr_t)vr,
966 		    drv_usectohz(1000 * NXGE_GUEST_TIMER));
967 	}
968 
969 nxge_check_guest_state_exit:
970 	MUTEX_EXIT(nxge->genlock);
971 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
972 }
973 
974 nxge_status_t
975 nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm)
976 {
977 	nxge_grp_t	*group;
978 	uint32_t	channel;
979 	nxge_hio_dc_t	*dc;
980 	nxge_ldg_t	*ldgp;
981 
982 	/*
983 	 * Validate state of guest interface before
984 	 * proceeeding.
985 	 */
986 	if (!isLDOMguest(nxge))
987 		return (NXGE_ERROR);
988 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
989 		return (NXGE_ERROR);
990 
991 	/*
992 	 * In guest domain, always and only dealing with
993 	 * group 0 for an instance of nxge.
994 	 */
995 	group = nxge->rx_set.group[0];
996 
997 	/*
998 	 * Look to arm the the RDCs for the group.
999 	 */
1000 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
1001 		if ((1 << channel) & group->map) {
1002 			/*
1003 			 * Get the RDC.
1004 			 */
1005 			dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel);
1006 			if (dc == NULL)
1007 				return (NXGE_ERROR);
1008 
1009 			/*
1010 			 * Get the RDC's ldg group.
1011 			 */
1012 			ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector];
1013 			if (ldgp == NULL)
1014 				return (NXGE_ERROR);
1015 
1016 			/*
1017 			 * Set the state of the group.
1018 			 */
1019 			ldgp->arm = arm;
1020 
1021 			nxge_hio_ldgimgn(nxge, ldgp);
1022 		}
1023 	}
1024 
1025 	return (NXGE_OK);
1026 }
1027 
1028 nxge_status_t
1029 nxge_hio_rdc_enable(p_nxge_t nxge)
1030 {
1031 	nxge_grp_t	*group;
1032 	npi_handle_t	handle;
1033 	uint32_t	channel;
1034 	npi_status_t	rval;
1035 
1036 	/*
1037 	 * Validate state of guest interface before
1038 	 * proceeeding.
1039 	 */
1040 	if (!isLDOMguest(nxge))
1041 		return (NXGE_ERROR);
1042 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
1043 		return (NXGE_ERROR);
1044 
1045 	/*
1046 	 * In guest domain, always and only dealing with
1047 	 * group 0 for an instance of nxge.
1048 	 */
1049 	group = nxge->rx_set.group[0];
1050 
1051 	/*
1052 	 * Get the PIO handle.
1053 	 */
1054 	handle = NXGE_DEV_NPI_HANDLE(nxge);
1055 
1056 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
1057 		/*
1058 		 * If this channel is in the map, then enable
1059 		 * it.
1060 		 */
1061 		if ((1 << channel) & group->map) {
1062 			/*
1063 			 * Enable the RDC and clear the empty bit.
1064 			 */
1065 			rval = npi_rxdma_cfg_rdc_enable(handle, channel);
1066 			if (rval != NPI_SUCCESS)
1067 				return (NXGE_ERROR);
1068 
1069 			(void) npi_rxdma_channel_rbr_empty_clear(handle,
1070 			    channel);
1071 		}
1072 	}
1073 
1074 	return (NXGE_OK);
1075 }
1076 #endif	/* defined(sun4v) */
1077