xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_hio_guest.c (revision ed093b41a93e8563e6e1e5dae0768dda2a7bcc27)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio_guest.c
29  *
30  * This file manages the virtualization resources for a guest domain.
31  *
32  */
33 
34 #include <sys/nxge/nxge_impl.h>
35 #include <sys/nxge/nxge_fzc.h>
36 #include <sys/nxge/nxge_rxdma.h>
37 #include <sys/nxge/nxge_txdma.h>
38 #include <sys/nxge/nxge_hio.h>
39 
40 /*
41  * nxge_guest_regs_map
42  *
43  *	Map in a guest domain's register set(s).
44  *
45  * Arguments:
46  * 	nxge
47  *
48  * Notes:
49  *	Note that we set <is_vraddr> to TRUE.
50  *
51  * Context:
52  *	Guest domain
53  */
54 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
55 	DDI_DEVICE_ATTR_V0,
56 	DDI_STRUCTURE_LE_ACC,
57 	DDI_STRICTORDER_ACC,
58 };
59 
60 int
61 nxge_guest_regs_map(nxge_t *nxge)
62 {
63 	dev_regs_t 	*regs;
64 	off_t		regsize;
65 	int rv;
66 
67 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
68 
69 	/* So we can allocate properly-aligned memory. */
70 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
71 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
72 
73 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
74 	regs = nxge->dev_regs;
75 
76 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
77 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
78 		return (NXGE_ERROR);
79 	}
80 
81 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
82 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
83 
84 	if (rv != DDI_SUCCESS) {
85 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
86 		return (NXGE_ERROR);
87 	}
88 
89 	nxge->npi_handle.regh = regs->nxge_regh;
90 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
91 	nxge->npi_handle.is_vraddr = B_TRUE;
92 	nxge->npi_handle.function.instance = nxge->instance;
93 	nxge->npi_handle.function.function = nxge->function_num;
94 	nxge->npi_handle.nxgep = (void *)nxge;
95 
96 	/* NPI_REG_ADD_HANDLE_SET() */
97 	nxge->npi_reg_handle.regh = regs->nxge_regh;
98 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
99 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
100 	nxge->npi_reg_handle.function.instance = nxge->instance;
101 	nxge->npi_reg_handle.function.function = nxge->function_num;
102 	nxge->npi_reg_handle.nxgep = (void *)nxge;
103 
104 	/* NPI_VREG_ADD_HANDLE_SET() */
105 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
106 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
107 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
108 	nxge->npi_vreg_handle.function.instance = nxge->instance;
109 	nxge->npi_vreg_handle.function.function = nxge->function_num;
110 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
111 
112 	regs->nxge_vir_regp = regs->nxge_regp;
113 	regs->nxge_vir_regh = regs->nxge_regh;
114 
115 	/*
116 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
117 	 * or FCODE reg variables.
118 	 */
119 
120 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
121 
122 	return (NXGE_OK);
123 }
124 
125 void
126 nxge_guest_regs_map_free(
127 	nxge_t *nxge)
128 {
129 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
130 
131 	if (nxge->dev_regs) {
132 		if (nxge->dev_regs->nxge_regh) {
133 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
134 			    "==> nxge_unmap_regs: device registers"));
135 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
136 			nxge->dev_regs->nxge_regh = NULL;
137 		}
138 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
139 		nxge->dev_regs = 0;
140 	}
141 
142 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
143 }
144 
145 #if defined(sun4v)
146 
147 /*
148  * -------------------------------------------------------------
149  * Local prototypes
150  * -------------------------------------------------------------
151  */
152 static nxge_hio_dc_t *nxge_guest_dc_alloc(
153 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
154 
155 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
156 static void nxge_check_guest_state(nxge_hio_vr_t *);
157 
158 /*
159  * nxge_hio_vr_add
160  *
161  *	If we have been given a virtualization region (VR),
162  *	then initialize it.
163  *
164  * Arguments:
165  * 	nxge
166  *
167  * Notes:
168  *
169  * Context:
170  *	Guest domain
171  */
172 int
173 nxge_hio_vr_add(nxge_t *nxge)
174 {
175 	extern nxge_status_t	nxge_mac_register(p_nxge_t);
176 
177 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
178 	nxge_hio_vr_t		*vr;
179 	nxge_hio_dc_t		*dc;
180 	int			*reg_val;
181 	uint_t			reg_len;
182 	uint8_t			vr_index;
183 	nxhv_vr_fp_t		*fp;
184 	uint64_t		vr_address, vr_size;
185 	uint32_t		cookie;
186 	nxhv_dc_fp_t		*tx, *rx;
187 	uint64_t		tx_map, rx_map;
188 	uint64_t		hv_rv;
189 	int			i;
190 	nxge_status_t		status;
191 
192 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
193 
194 	if (nhd->type == NXGE_HIO_TYPE_SERVICE) {
195 		/*
196 		 * Can't add VR to the service domain from which we came.
197 		 */
198 		ASSERT(nhd->type == NXGE_HIO_TYPE_GUEST);
199 		return (DDI_FAILURE);
200 	}
201 
202 	/*
203 	 * Get our HV cookie.
204 	 */
205 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
206 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
207 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
208 		return (DDI_FAILURE);
209 	}
210 
211 	cookie = (uint32_t)(reg_val[0]);
212 	ddi_prop_free(reg_val);
213 
214 	fp = &nhd->hio.vr;
215 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
216 	if (hv_rv != 0) {
217 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
218 		    "vr->getinfo() failed"));
219 		return (DDI_FAILURE);
220 	}
221 
222 	/*
223 	 * In the guest domain, we can use any VR data structure
224 	 * we want, because we're not supposed to know which VR
225 	 * the service domain has allocated to us.
226 	 *
227 	 * In the current version, the least significant nybble of
228 	 * the cookie is the VR region, but that could change
229 	 * very easily.
230 	 *
231 	 * In the future, a guest may have more than one VR allocated
232 	 * to it, which is why we go through this exercise.
233 	 */
234 	MUTEX_ENTER(&nhd->lock);
235 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
236 		if (nhd->vr[vr_index].nxge == 0) {
237 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
238 			break;
239 		}
240 	}
241 	MUTEX_EXIT(&nhd->lock);
242 
243 	if (vr_index == FUNC_VIR_MAX) {
244 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
245 		    "no VRs available"));
246 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
247 		    "nxge_hio_vr_add(%d): cookie(0x%x)\n",
248 		    nxge->instance, cookie));
249 		return (DDI_FAILURE);
250 	}
251 
252 	vr = &nhd->vr[vr_index];
253 
254 	vr->nxge = (uintptr_t)nxge;
255 	vr->cookie = (uint32_t)cookie;
256 	vr->address = vr_address;
257 	vr->size = vr_size;
258 	vr->region = vr_index;
259 
260 	/*
261 	 * This is redundant data, but useful nonetheless.  It helps
262 	 * us to keep track of which RDCs & TDCs belong to us.
263 	 */
264 	if (nxge->tx_set.lg.count == 0)
265 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
266 	if (nxge->rx_set.lg.count == 0)
267 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
268 
269 	/*
270 	 * See nxge_intr.c.
271 	 */
272 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
273 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
274 		    "nxge_hio_intr_init() failed"));
275 		return (DDI_FAILURE);
276 	}
277 
278 	/*
279 	 * Now we find out which RDCs & TDCs have been allocated to us.
280 	 */
281 	tx = &nhd->hio.tx;
282 	if (tx->get_map) {
283 		/*
284 		 * The map we get back is a bitmap of the
285 		 * virtual Tx DMA channels we own -
286 		 * they are NOT real channel numbers.
287 		 */
288 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
289 		if (hv_rv != 0) {
290 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
291 			    "tx->get_map() failed"));
292 			return (DDI_FAILURE);
293 		}
294 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
295 
296 		/*
297 		 * For each channel, mark these two fields
298 		 * while we have the VR data structure.
299 		 */
300 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
301 			if ((1 << i) & tx_map) {
302 				dc = nxge_guest_dc_alloc(nxge, vr,
303 				    NXGE_TRANSMIT_GROUP);
304 				if (dc == 0) {
305 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
306 					    "DC add failed"));
307 					return (DDI_FAILURE);
308 				}
309 				dc->channel = (nxge_channel_t)i;
310 			}
311 		}
312 	}
313 
314 	rx = &nhd->hio.rx;
315 	if (rx->get_map) {
316 		/*
317 		 * I repeat, the map we get back is a bitmap of
318 		 * the virtual Rx DMA channels we own -
319 		 * they are NOT real channel numbers.
320 		 */
321 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
322 		if (hv_rv != 0) {
323 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
324 			    "rx->get_map() failed"));
325 			return (DDI_FAILURE);
326 		}
327 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
328 
329 		/*
330 		 * For each channel, mark these two fields
331 		 * while we have the VR data structure.
332 		 */
333 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
334 			if ((1 << i) & rx_map) {
335 				dc = nxge_guest_dc_alloc(nxge, vr,
336 				    NXGE_RECEIVE_GROUP);
337 				if (dc == 0) {
338 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
339 					    "DC add failed"));
340 					return (DDI_FAILURE);
341 				}
342 				dc->channel = (nxge_channel_t)i;
343 			}
344 		}
345 	}
346 
347 	status = nxge_mac_register(nxge);
348 	if (status != NXGE_OK) {
349 		cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n",
350 		    nxge->instance);
351 		return (DDI_FAILURE);
352 	}
353 
354 	nxge->hio_vr = vr;	/* For faster lookups. */
355 
356 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
357 
358 	return (DDI_SUCCESS);
359 }
360 
361 /*
362  * nxge_guest_dc_alloc
363  *
364  *	Find a free nxge_hio_dc_t data structure.
365  *
366  * Arguments:
367  * 	nxge
368  * 	type	TRANSMIT or RECEIVE.
369  *
370  * Notes:
371  *
372  * Context:
373  *	Guest domain
374  */
375 nxge_hio_dc_t *
376 nxge_guest_dc_alloc(
377 	nxge_t *nxge,
378 	nxge_hio_vr_t *vr,
379 	nxge_grp_type_t type)
380 {
381 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
382 	nxge_hio_dc_t *dc;
383 	int limit, i;
384 
385 	/*
386 	 * In the guest domain, there may be more than one VR.
387 	 * each one of which will be using the same slots, or
388 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
389 	 * tables must be shared.
390 	 */
391 	if (type == NXGE_TRANSMIT_GROUP) {
392 		dc = &nhd->tdc[0];
393 		limit = NXGE_MAX_TDCS;
394 	} else {
395 		dc = &nhd->rdc[0];
396 		limit = NXGE_MAX_RDCS;
397 	}
398 
399 	MUTEX_ENTER(&nhd->lock);
400 	for (i = 0; i < limit; i++, dc++) {
401 		if (dc->vr == 0) {
402 			dc->vr = vr;
403 			dc->cookie = vr->cookie;
404 			MUTEX_EXIT(&nhd->lock);
405 			return (dc);
406 		}
407 	}
408 	MUTEX_EXIT(&nhd->lock);
409 
410 	return (0);
411 }
412 
413 int
414 nxge_hio_get_dc_htable_idx(nxge_t *nxge, vpc_type_t type, uint32_t channel)
415 {
416 	nxge_hio_dc_t   *dc;
417 
418 	ASSERT(isLDOMguest(nxge));
419 
420 	dc = nxge_grp_dc_find(nxge, type, channel);
421 	if (dc == NULL)
422 		return (-1);
423 
424 	return (dc->ldg.vector);
425 }
426 
427 /*
428  * res_map_parse
429  *
430  *	Parse a resource map.  The resources are DMA channels, receive
431  *	or transmit, depending on <type>.
432  *
433  * Arguments:
434  * 	nxge
435  * 	type	Transmit or receive.
436  *	res_map	The resource map to parse.
437  *
438  * Notes:
439  *
440  * Context:
441  *	Guest domain
442  */
443 void
444 res_map_parse(
445 	nxge_t *nxge,
446 	nxge_grp_type_t type,
447 	uint64_t res_map)
448 {
449 	uint8_t slots, mask, slot;
450 	int first, count;
451 
452 	nxge_hw_pt_cfg_t *hardware;
453 	nxge_grp_t *group;
454 
455 	/* Slots are numbered 0 - 7. */
456 	slots = (uint8_t)(res_map & 0xff);
457 
458 	/* Count the number of bits in the bitmap. */
459 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
460 		if (slots & mask)
461 			count++;
462 		if (count == 1)
463 			first = slot;
464 		mask <<= 1;
465 	}
466 
467 	hardware = &nxge->pt_config.hw_config;
468 	group = (type == NXGE_TRANSMIT_GROUP) ?
469 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
470 
471 	/*
472 	 * A guest domain has one Tx & one Rx group, so far.
473 	 * In the future, there may be more than one.
474 	 */
475 	if (type == NXGE_TRANSMIT_GROUP) {
476 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
477 		nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0];
478 
479 		hardware->tdc.start = first;
480 		hardware->tdc.count = count;
481 		hardware->tdc.owned = count;
482 
483 		tdc_grp->start_tdc = first;
484 		tdc_grp->max_tdcs = (uint8_t)count;
485 		tdc_grp->grp_index = group->index;
486 		tdc_grp->map = slots;
487 
488 		group->map = slots;
489 
490 		/*
491 		 * Pointless in a guest domain.  This bitmap is used
492 		 * in only one place: nxge_txc_init(),
493 		 * a service-domain-only function.
494 		 */
495 		port->tx_dma_map = slots;
496 
497 		nxge->tx_set.owned.map |= slots;
498 	} else {
499 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
500 
501 		hardware->start_rdc = first;
502 		hardware->max_rdcs = count;
503 
504 		rdc_grp->start_rdc = (uint8_t)first;
505 		rdc_grp->max_rdcs = (uint8_t)count;
506 		rdc_grp->def_rdc = (uint8_t)first;
507 
508 		rdc_grp->map = slots;
509 		group->map = slots;
510 
511 		nxge->rx_set.owned.map |= slots;
512 	}
513 }
514 
515 /*
516  * nxge_hio_vr_release
517  *
518  *	Release a virtualization region (VR).
519  *
520  * Arguments:
521  * 	nxge
522  *
523  * Notes:
524  *	We must uninitialize all DMA channels associated with the VR, too.
525  *
526  *	The service domain will re-initialize these DMA channels later.
527  *	See nxge_hio.c:nxge_hio_share_free() for details.
528  *
529  * Context:
530  *	Guest domain
531  */
532 int
533 nxge_hio_vr_release(nxge_t *nxge)
534 {
535 	nxge_hio_data_t	*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
536 	int		vr_index;
537 
538 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
539 
540 	if (nxge->hio_vr == NULL) {
541 		return (NXGE_OK);
542 	}
543 
544 	/*
545 	 * Uninitialize interrupts.
546 	 */
547 	nxge_hio_intr_uninit(nxge);
548 
549 	/*
550 	 * Uninitialize the receive DMA channels.
551 	 */
552 	nxge_uninit_rxdma_channels(nxge);
553 
554 	/*
555 	 * Uninitialize the transmit DMA channels.
556 	 */
557 	nxge_uninit_txdma_channels(nxge);
558 
559 	/*
560 	 * Remove both groups. Assumption: only two groups!
561 	 */
562 	if (nxge->rx_set.group[0] != NULL)
563 		nxge_grp_remove(nxge, nxge->rx_set.group[0]);
564 	if (nxge->tx_set.group[0] != NULL)
565 		nxge_grp_remove(nxge, nxge->tx_set.group[0]);
566 
567 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
568 
569 	/*
570 	 * Clean up.
571 	 */
572 	MUTEX_ENTER(&nhd->lock);
573 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
574 		if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
575 			nhd->vr[vr_index].nxge = (uintptr_t)NULL;
576 			break;
577 		}
578 	}
579 	MUTEX_EXIT(&nhd->lock);
580 
581 	return (NXGE_OK);
582 }
583 
584 #if defined(NIU_LP_WORKAROUND)
585 /*
586  * nxge_tdc_lp_conf
587  *
588  *	Configure the logical pages for a TDC.
589  *
590  * Arguments:
591  * 	nxge
592  * 	channel	The TDC to configure.
593  *
594  * Notes:
595  *
596  * Context:
597  *	Guest domain
598  */
599 nxge_status_t
600 nxge_tdc_lp_conf(
601 	p_nxge_t nxge,
602 	int channel)
603 {
604 	nxge_hio_dc_t		*dc;
605 	nxge_dma_common_t	*data;
606 	nxge_dma_common_t	*control;
607 	tx_ring_t 		*ring;
608 
609 	uint64_t		hv_rv;
610 	uint64_t		ra, size;
611 
612 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
613 
614 	ring = nxge->tx_rings->rings[channel];
615 
616 	if (ring->hv_set) {
617 		/* This shouldn't happen. */
618 		return (NXGE_OK);
619 	}
620 
621 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
622 		return (NXGE_ERROR);
623 
624 	/*
625 	 * Initialize logical page 0 for data buffers.
626 	 *
627 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
628 	 * nxge_main.c:nxge_dma_mem_alloc().
629 	 */
630 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
631 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
632 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
633 
634 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
635 	    (uint64_t)channel, 0,
636 	    ring->hv_tx_buf_base_ioaddr_pp,
637 	    ring->hv_tx_buf_ioaddr_size);
638 
639 	if (hv_rv != 0) {
640 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
641 		    "<== nxge_tdc_lp_conf: channel %d "
642 		    "(page 0 data buf) hv: %d "
643 		    "ioaddr_pp $%p size 0x%llx ",
644 		    channel, hv_rv,
645 		    ring->hv_tx_buf_base_ioaddr_pp,
646 		    ring->hv_tx_buf_ioaddr_size));
647 		return (NXGE_ERROR | hv_rv);
648 	}
649 
650 	ra = size = 0;
651 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
652 	    (uint64_t)channel, 0, &ra, &size);
653 
654 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
655 	    "==> nxge_tdc_lp_conf: channel %d "
656 	    "(page 0 data buf) hv_rv 0x%llx "
657 	    "set ioaddr_pp $%p set size 0x%llx "
658 	    "get ra ioaddr_pp $%p get size 0x%llx ",
659 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
660 	    ring->hv_tx_buf_ioaddr_size, ra, size));
661 
662 	/*
663 	 * Initialize logical page 1 for control buffers.
664 	 */
665 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
666 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
667 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
668 
669 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
670 	    (uint64_t)channel, (uint64_t)1,
671 	    ring->hv_tx_cntl_base_ioaddr_pp,
672 	    ring->hv_tx_cntl_ioaddr_size);
673 
674 	if (hv_rv != 0) {
675 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
676 		    "<== nxge_tdc_lp_conf: channel %d "
677 		    "(page 1 cntl buf) hv_rv 0x%llx "
678 		    "ioaddr_pp $%p size 0x%llx ",
679 		    channel, hv_rv,
680 		    ring->hv_tx_cntl_base_ioaddr_pp,
681 		    ring->hv_tx_cntl_ioaddr_size));
682 		return (NXGE_ERROR | hv_rv);
683 	}
684 
685 	ra = size = 0;
686 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
687 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
688 
689 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
690 	    "==> nxge_tdc_lp_conf: channel %d "
691 	    "(page 1 cntl buf) hv_rv 0x%llx "
692 	    "set ioaddr_pp $%p set size 0x%llx "
693 	    "get ra ioaddr_pp $%p get size 0x%llx ",
694 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
695 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
696 
697 	ring->hv_set = B_TRUE;
698 
699 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
700 
701 	return (NXGE_OK);
702 }
703 
704 /*
705  * nxge_rdc_lp_conf
706  *
707  *	Configure an RDC's logical pages.
708  *
709  * Arguments:
710  * 	nxge
711  * 	channel	The RDC to configure.
712  *
713  * Notes:
714  *
715  * Context:
716  *	Guest domain
717  */
718 nxge_status_t
719 nxge_rdc_lp_conf(
720 	p_nxge_t nxge,
721 	int channel)
722 {
723 	nxge_hio_dc_t		*dc;
724 	nxge_dma_common_t	*data;
725 	nxge_dma_common_t	*control;
726 	rx_rbr_ring_t		*ring;
727 
728 	uint64_t		hv_rv;
729 	uint64_t		ra, size;
730 
731 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
732 
733 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
734 
735 	if (ring->hv_set) {
736 		return (NXGE_OK);
737 	}
738 
739 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
740 		return (NXGE_ERROR);
741 
742 	/*
743 	 * Initialize logical page 0 for data buffers.
744 	 *
745 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
746 	 * nxge_main.c:nxge_dma_mem_alloc().
747 	 */
748 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
749 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
750 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
751 
752 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
753 	    (uint64_t)channel, 0,
754 	    ring->hv_rx_buf_base_ioaddr_pp,
755 	    ring->hv_rx_buf_ioaddr_size);
756 
757 	if (hv_rv != 0) {
758 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
759 		    "<== nxge_rdc_lp_conf: channel %d "
760 		    "(page 0 data buf) hv_rv 0x%llx "
761 		    "ioaddr_pp $%p size 0x%llx ",
762 		    channel, hv_rv,
763 		    ring->hv_rx_buf_base_ioaddr_pp,
764 		    ring->hv_rx_buf_ioaddr_size));
765 		return (NXGE_ERROR | hv_rv);
766 	}
767 
768 	ra = size = 0;
769 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
770 	    (uint64_t)channel, 0, &ra, &size);
771 
772 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
773 	    "==> nxge_rdc_lp_conf: channel %d "
774 	    "(page 0 data buf) hv_rv 0x%llx "
775 	    "set ioaddr_pp $%p set size 0x%llx "
776 	    "get ra ioaddr_pp $%p get size 0x%llx ",
777 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
778 	    ring->hv_rx_buf_ioaddr_size, ra, size));
779 
780 	/*
781 	 * Initialize logical page 1 for control buffers.
782 	 */
783 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
784 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
785 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
786 
787 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
788 	    (uint64_t)channel, (uint64_t)1,
789 	    ring->hv_rx_cntl_base_ioaddr_pp,
790 	    ring->hv_rx_cntl_ioaddr_size);
791 
792 	if (hv_rv != 0) {
793 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
794 		    "<== nxge_rdc_lp_conf: channel %d "
795 		    "(page 1 cntl buf) hv_rv 0x%llx "
796 		    "ioaddr_pp $%p size 0x%llx ",
797 		    channel, hv_rv,
798 		    ring->hv_rx_cntl_base_ioaddr_pp,
799 		    ring->hv_rx_cntl_ioaddr_size));
800 		return (NXGE_ERROR | hv_rv);
801 	}
802 
803 	ra = size = 0;
804 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
805 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
806 
807 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
808 	    "==> nxge_rdc_lp_conf: channel %d "
809 	    "(page 1 cntl buf) hv_rv 0x%llx "
810 	    "set ioaddr_pp $%p set size 0x%llx "
811 	    "get ra ioaddr_pp $%p get size 0x%llx ",
812 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
813 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
814 
815 	ring->hv_set = B_TRUE;
816 
817 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
818 
819 	return (NXGE_OK);
820 }
821 #endif	/* defined(NIU_LP_WORKAROUND) */
822 
823 /*
824  * This value is in milliseconds.
825  */
826 #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
827 
828 /*
829  * nxge_hio_start_timer
830  *
831  *	Start the timer which checks for Tx hangs.
832  *
833  * Arguments:
834  * 	nxge
835  *
836  * Notes:
837  *	This function is called from nxge_attach().
838  *
839  *	This function kicks off the guest domain equivalent of
840  *	nxge_check_hw_state().  It is called only once, from attach.
841  *
842  * Context:
843  *	Guest domain
844  */
845 void
846 nxge_hio_start_timer(
847 	nxge_t *nxge)
848 {
849 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
850 	nxge_hio_vr_t *vr;
851 	int region;
852 
853 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
854 
855 	MUTEX_ENTER(&nhd->lock);
856 
857 	/*
858 	 * Find our VR data structure.  (We are currently assuming
859 	 * one VR per guest domain.  That may change in the future.)
860 	 */
861 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
862 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
863 			break;
864 	}
865 
866 	MUTEX_EXIT(&nhd->lock);
867 
868 	if (region == NXGE_VR_SR_MAX) {
869 		return;
870 	}
871 
872 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
873 
874 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
875 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
876 
877 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
878 }
879 
880 /*
881  * nxge_check_guest_state
882  *
883  *	Essentially, check for Tx hangs.  In the future, if we are
884  *	polling the hardware, we may do so here.
885  *
886  * Arguments:
887  * 	vr	The virtualization region (VR) data structure.
888  *
889  * Notes:
890  *	This function is the guest domain equivalent of
891  *	nxge_check_hw_state().  Since we have no hardware to
892  * 	check, we simply call nxge_check_tx_hang().
893  *
894  * Context:
895  *	Guest domain
896  */
897 void
898 nxge_check_guest_state(
899 	nxge_hio_vr_t *vr)
900 {
901 	nxge_t *nxge = (nxge_t *)vr->nxge;
902 
903 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
904 
905 	MUTEX_ENTER(nxge->genlock);
906 	nxge->nxge_timerid = 0;
907 
908 	if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
909 		nxge_check_tx_hang(nxge);
910 
911 		nxge->nxge_timerid = timeout((void(*)(void *))
912 		    nxge_check_guest_state, (caddr_t)vr,
913 		    drv_usectohz(1000 * NXGE_GUEST_TIMER));
914 	}
915 
916 nxge_check_guest_state_exit:
917 	MUTEX_EXIT(nxge->genlock);
918 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
919 }
920 
921 nxge_status_t
922 nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm)
923 {
924 	nxge_grp_t	*group;
925 	uint32_t	channel;
926 	nxge_hio_dc_t	*dc;
927 	nxge_ldg_t	*ldgp;
928 
929 	/*
930 	 * Validate state of guest interface before
931 	 * proceeeding.
932 	 */
933 	if (!isLDOMguest(nxge))
934 		return (NXGE_ERROR);
935 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
936 		return (NXGE_ERROR);
937 
938 	/*
939 	 * In guest domain, always and only dealing with
940 	 * group 0 for an instance of nxge.
941 	 */
942 	group = nxge->rx_set.group[0];
943 
944 	/*
945 	 * Look to arm the the RDCs for the group.
946 	 */
947 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
948 		if ((1 << channel) & group->map) {
949 			/*
950 			 * Get the RDC.
951 			 */
952 			dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel);
953 			if (dc == NULL)
954 				return (NXGE_ERROR);
955 
956 			/*
957 			 * Get the RDC's ldg group.
958 			 */
959 			ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector];
960 			if (ldgp == NULL)
961 				return (NXGE_ERROR);
962 
963 			/*
964 			 * Set the state of the group.
965 			 */
966 			ldgp->arm = arm;
967 
968 			nxge_hio_ldgimgn(nxge, ldgp);
969 		}
970 	}
971 
972 	return (NXGE_OK);
973 }
974 
975 nxge_status_t
976 nxge_hio_rdc_enable(p_nxge_t nxge)
977 {
978 	nxge_grp_t	*group;
979 	npi_handle_t	handle;
980 	uint32_t	channel;
981 	npi_status_t	rval;
982 
983 	/*
984 	 * Validate state of guest interface before
985 	 * proceeeding.
986 	 */
987 	if (!isLDOMguest(nxge))
988 		return (NXGE_ERROR);
989 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
990 		return (NXGE_ERROR);
991 
992 	/*
993 	 * In guest domain, always and only dealing with
994 	 * group 0 for an instance of nxge.
995 	 */
996 	group = nxge->rx_set.group[0];
997 
998 	/*
999 	 * Get the PIO handle.
1000 	 */
1001 	handle = NXGE_DEV_NPI_HANDLE(nxge);
1002 
1003 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
1004 		/*
1005 		 * If this channel is in the map, then enable
1006 		 * it.
1007 		 */
1008 		if ((1 << channel) & group->map) {
1009 			/*
1010 			 * Enable the RDC and clear the empty bit.
1011 			 */
1012 			rval = npi_rxdma_cfg_rdc_enable(handle, channel);
1013 			if (rval != NPI_SUCCESS)
1014 				return (NXGE_ERROR);
1015 
1016 			(void) npi_rxdma_channel_rbr_empty_clear(handle,
1017 			    channel);
1018 		}
1019 	}
1020 
1021 	return (NXGE_OK);
1022 }
1023 #endif	/* defined(sun4v) */
1024