xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_hio_guest.c (revision 4eaa471005973e11a6110b69fe990530b3b95a38)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio_guest.c
29  *
30  * This file manages the virtualization resources for a guest domain.
31  *
32  */
33 
34 #include <sys/nxge/nxge_impl.h>
35 #include <sys/nxge/nxge_fzc.h>
36 #include <sys/nxge/nxge_rxdma.h>
37 #include <sys/nxge/nxge_txdma.h>
38 #include <sys/nxge/nxge_hio.h>
39 
40 /*
41  * nxge_guest_regs_map
42  *
43  *	Map in a guest domain's register set(s).
44  *
45  * Arguments:
46  * 	nxge
47  *
48  * Notes:
49  *	Note that we set <is_vraddr> to TRUE.
50  *
51  * Context:
52  *	Guest domain
53  */
54 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
55 	DDI_DEVICE_ATTR_V0,
56 	DDI_STRUCTURE_LE_ACC,
57 	DDI_STRICTORDER_ACC,
58 };
59 
60 int
61 nxge_guest_regs_map(nxge_t *nxge)
62 {
63 	dev_regs_t 	*regs;
64 	off_t		regsize;
65 	int rv;
66 
67 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
68 
69 	/* So we can allocate properly-aligned memory. */
70 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
71 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
72 
73 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
74 	regs = nxge->dev_regs;
75 
76 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
77 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
78 		return (NXGE_ERROR);
79 	}
80 
81 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
82 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
83 
84 	if (rv != DDI_SUCCESS) {
85 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
86 		return (NXGE_ERROR);
87 	}
88 
89 	nxge->npi_handle.regh = regs->nxge_regh;
90 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
91 	nxge->npi_handle.is_vraddr = B_TRUE;
92 	nxge->npi_handle.function.instance = nxge->instance;
93 	nxge->npi_handle.function.function = nxge->function_num;
94 	nxge->npi_handle.nxgep = (void *)nxge;
95 
96 	/* NPI_REG_ADD_HANDLE_SET() */
97 	nxge->npi_reg_handle.regh = regs->nxge_regh;
98 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
99 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
100 	nxge->npi_reg_handle.function.instance = nxge->instance;
101 	nxge->npi_reg_handle.function.function = nxge->function_num;
102 	nxge->npi_reg_handle.nxgep = (void *)nxge;
103 
104 	/* NPI_VREG_ADD_HANDLE_SET() */
105 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
106 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
107 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
108 	nxge->npi_vreg_handle.function.instance = nxge->instance;
109 	nxge->npi_vreg_handle.function.function = nxge->function_num;
110 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
111 
112 	regs->nxge_vir_regp = regs->nxge_regp;
113 	regs->nxge_vir_regh = regs->nxge_regh;
114 
115 	/*
116 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
117 	 * or FCODE reg variables.
118 	 */
119 
120 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
121 
122 	return (NXGE_OK);
123 }
124 
125 void
126 nxge_guest_regs_map_free(
127 	nxge_t *nxge)
128 {
129 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
130 
131 	if (nxge->dev_regs) {
132 		if (nxge->dev_regs->nxge_regh) {
133 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
134 			    "==> nxge_unmap_regs: device registers"));
135 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
136 			nxge->dev_regs->nxge_regh = NULL;
137 		}
138 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
139 		nxge->dev_regs = 0;
140 	}
141 
142 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
143 }
144 
145 #if defined(sun4v)
146 
147 /*
148  * -------------------------------------------------------------
149  * Local prototypes
150  * -------------------------------------------------------------
151  */
152 static nxge_hio_dc_t *nxge_guest_dc_alloc(
153 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
154 
155 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
156 static void nxge_check_guest_state(nxge_hio_vr_t *);
157 
158 /*
159  * nxge_hio_vr_add
160  *
161  *	If we have been given a virtualization region (VR),
162  *	then initialize it.
163  *
164  * Arguments:
165  * 	nxge
166  *
167  * Notes:
168  *
169  * Context:
170  *	Guest domain
171  */
172 /* ARGSUSED */
173 int
174 nxge_hio_vr_add(nxge_t *nxge)
175 {
176 	extern nxge_status_t	nxge_mac_register(p_nxge_t);
177 
178 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
179 	nxge_hio_vr_t		*vr;
180 	nxge_hio_dc_t		*dc;
181 	int			*reg_val;
182 	uint_t			reg_len;
183 	uint8_t			vr_index;
184 	nxhv_vr_fp_t		*fp;
185 	uint64_t		vr_address, vr_size;
186 	uint32_t		cookie;
187 	nxhv_dc_fp_t		*tx, *rx;
188 	uint64_t		tx_map, rx_map;
189 	uint64_t		hv_rv;
190 	int			i;
191 	nxge_status_t		status;
192 
193 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
194 
195 	/*
196 	 * Get our HV cookie.
197 	 */
198 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
199 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
200 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
201 		return (NXGE_ERROR);
202 	}
203 
204 	cookie = (uint32_t)(reg_val[0]);
205 	ddi_prop_free(reg_val);
206 
207 	fp = &nhd->hio.vr;
208 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
209 	if (hv_rv != 0) {
210 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
211 		    "vr->getinfo() failed"));
212 		return (NXGE_ERROR);
213 	}
214 
215 	/*
216 	 * In the guest domain, we can use any VR data structure
217 	 * we want, because we're not supposed to know which VR
218 	 * the service domain has allocated to us.
219 	 *
220 	 * In the current version, the least significant nybble of
221 	 * the cookie is the VR region, but that could change
222 	 * very easily.
223 	 *
224 	 * In the future, a guest may have more than one VR allocated
225 	 * to it, which is why we go through this exercise.
226 	 */
227 	MUTEX_ENTER(&nhd->lock);
228 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
229 		if (nhd->vr[vr_index].nxge == 0) {
230 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
231 			break;
232 		}
233 	}
234 	MUTEX_EXIT(&nhd->lock);
235 
236 	if (vr_index == FUNC_VIR_MAX) {
237 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
238 		    "no VRs available"));
239 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
240 		    "nxge_hio_vr_add(%d): cookie(0x%x)\n",
241 		    nxge->instance, cookie));
242 		return (NXGE_ERROR);
243 	}
244 
245 	vr = &nhd->vr[vr_index];
246 
247 	vr->nxge = (uintptr_t)nxge;
248 	vr->cookie = (uint32_t)cookie;
249 	vr->address = vr_address;
250 	vr->size = vr_size;
251 	vr->region = vr_index;
252 
253 	/*
254 	 * This is redundant data, but useful nonetheless.  It helps
255 	 * us to keep track of which RDCs & TDCs belong to us.
256 	 */
257 	if (nxge->tx_set.lg.count == 0)
258 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
259 	if (nxge->rx_set.lg.count == 0)
260 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
261 
262 	/*
263 	 * See nxge_intr.c.
264 	 */
265 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
266 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
267 		    "nxge_hio_intr_init() failed"));
268 		return (NXGE_ERROR);
269 	}
270 
271 	/*
272 	 * Now we find out which RDCs & TDCs have been allocated to us.
273 	 */
274 	tx = &nhd->hio.tx;
275 	if (tx->get_map) {
276 		/*
277 		 * The map we get back is a bitmap of the
278 		 * virtual Tx DMA channels we own -
279 		 * they are NOT real channel numbers.
280 		 */
281 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
282 		if (hv_rv != 0) {
283 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
284 			    "tx->get_map() failed"));
285 			return (NXGE_ERROR);
286 		}
287 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
288 
289 		/*
290 		 * For each channel, mark these two fields
291 		 * while we have the VR data structure.
292 		 */
293 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
294 			if ((1 << i) & tx_map) {
295 				dc = nxge_guest_dc_alloc(nxge, vr,
296 				    NXGE_TRANSMIT_GROUP);
297 				if (dc == 0) {
298 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
299 					    "DC add failed"));
300 					return (NXGE_ERROR);
301 				}
302 				dc->channel = (nxge_channel_t)i;
303 			}
304 		}
305 	}
306 
307 	rx = &nhd->hio.rx;
308 	if (rx->get_map) {
309 		/*
310 		 * I repeat, the map we get back is a bitmap of
311 		 * the virtual Rx DMA channels we own -
312 		 * they are NOT real channel numbers.
313 		 */
314 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
315 		if (hv_rv != 0) {
316 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
317 			    "rx->get_map() failed"));
318 			return (NXGE_ERROR);
319 		}
320 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
321 
322 		/*
323 		 * For each channel, mark these two fields
324 		 * while we have the VR data structure.
325 		 */
326 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
327 			if ((1 << i) & rx_map) {
328 				dc = nxge_guest_dc_alloc(nxge, vr,
329 				    NXGE_RECEIVE_GROUP);
330 				if (dc == 0) {
331 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
332 					    "DC add failed"));
333 					return (NXGE_ERROR);
334 				}
335 				dc->channel = (nxge_channel_t)i;
336 			}
337 		}
338 	}
339 
340 	status = nxge_mac_register(nxge);
341 	if (status != NXGE_OK) {
342 		cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n",
343 		    nxge->instance);
344 		return (status);
345 	}
346 
347 	nxge->hio_vr = vr;	/* For faster lookups. */
348 
349 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
350 
351 	return (NXGE_OK);
352 }
353 
354 /*
355  * nxge_guest_dc_alloc
356  *
357  *	Find a free nxge_hio_dc_t data structure.
358  *
359  * Arguments:
360  * 	nxge
361  * 	type	TRANSMIT or RECEIVE.
362  *
363  * Notes:
364  *
365  * Context:
366  *	Guest domain
367  */
368 nxge_hio_dc_t *
369 nxge_guest_dc_alloc(
370 	nxge_t *nxge,
371 	nxge_hio_vr_t *vr,
372 	nxge_grp_type_t type)
373 {
374 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
375 	nxge_hio_dc_t *dc;
376 	int limit, i;
377 
378 	/*
379 	 * In the guest domain, there may be more than one VR.
380 	 * each one of which will be using the same slots, or
381 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
382 	 * tables must be shared.
383 	 */
384 	if (type == NXGE_TRANSMIT_GROUP) {
385 		dc = &nhd->tdc[0];
386 		limit = NXGE_MAX_TDCS;
387 	} else {
388 		dc = &nhd->rdc[0];
389 		limit = NXGE_MAX_RDCS;
390 	}
391 
392 	MUTEX_ENTER(&nhd->lock);
393 	for (i = 0; i < limit; i++, dc++) {
394 		if (dc->vr == 0) {
395 			dc->vr = vr;
396 			dc->cookie = vr->cookie;
397 			MUTEX_EXIT(&nhd->lock);
398 			return (dc);
399 		}
400 	}
401 	MUTEX_EXIT(&nhd->lock);
402 
403 	return (0);
404 }
405 
406 /*
407  * res_map_parse
408  *
409  *	Parse a resource map.  The resources are DMA channels, receive
410  *	or transmit, depending on <type>.
411  *
412  * Arguments:
413  * 	nxge
414  * 	type	Transmit or receive.
415  *	res_map	The resource map to parse.
416  *
417  * Notes:
418  *
419  * Context:
420  *	Guest domain
421  */
422 void
423 res_map_parse(
424 	nxge_t *nxge,
425 	nxge_grp_type_t type,
426 	uint64_t res_map)
427 {
428 	uint8_t slots, mask, slot;
429 	int first, count;
430 
431 	nxge_hw_pt_cfg_t *hardware;
432 	nxge_grp_t *group;
433 
434 	/* Slots are numbered 0 - 7. */
435 	slots = (uint8_t)(res_map & 0xff);
436 
437 	/* Count the number of bits in the bitmap. */
438 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
439 		if (slots & mask)
440 			count++;
441 		if (count == 1)
442 			first = slot;
443 		mask <<= 1;
444 	}
445 
446 	hardware = &nxge->pt_config.hw_config;
447 	group = (type == NXGE_TRANSMIT_GROUP) ?
448 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
449 
450 	/*
451 	 * A guest domain has one Tx & one Rx group, so far.
452 	 * In the future, there may be more than one.
453 	 */
454 	if (type == NXGE_TRANSMIT_GROUP) {
455 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
456 		nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0];
457 
458 		hardware->tdc.start = first;
459 		hardware->tdc.count = count;
460 		hardware->tdc.owned = count;
461 
462 		tdc_grp->start_tdc = first;
463 		tdc_grp->max_tdcs = (uint8_t)count;
464 		tdc_grp->grp_index = group->index;
465 		tdc_grp->map = slots;
466 
467 		group->map = slots;
468 
469 		/*
470 		 * Pointless in a guest domain.  This bitmap is used
471 		 * in only one place: nxge_txc_init(),
472 		 * a service-domain-only function.
473 		 */
474 		port->tx_dma_map = slots;
475 
476 		nxge->tx_set.owned.map |= slots;
477 	} else {
478 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
479 
480 		hardware->start_rdc = first;
481 		hardware->max_rdcs = count;
482 
483 		rdc_grp->start_rdc = (uint8_t)first;
484 		rdc_grp->max_rdcs = (uint8_t)count;
485 		rdc_grp->def_rdc = (uint8_t)first;
486 
487 		rdc_grp->map = slots;
488 		group->map = slots;
489 
490 		nxge->rx_set.owned.map |= slots;
491 	}
492 }
493 
494 /*
495  * nxge_hio_vr_release
496  *
497  *	Release a virtualization region (VR).
498  *
499  * Arguments:
500  * 	nxge
501  *
502  * Notes:
503  *	We must uninitialize all DMA channels associated with the VR, too.
504  *
505  *	The service domain will re-initialize these DMA channels later.
506  *	See nxge_hio.c:nxge_hio_share_free() for details.
507  *
508  * Context:
509  *	Guest domain
510  */
511 int
512 nxge_hio_vr_release(nxge_t *nxge)
513 {
514 	nxge_hio_data_t	*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
515 	int		vr_index;
516 
517 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
518 
519 	if (nxge->hio_vr == NULL) {
520 		return (NXGE_OK);
521 	}
522 
523 	/*
524 	 * Uninitialize interrupts.
525 	 */
526 	nxge_hio_intr_uninit(nxge);
527 
528 	/*
529 	 * Uninitialize the receive DMA channels.
530 	 */
531 	nxge_uninit_rxdma_channels(nxge);
532 
533 	/*
534 	 * Uninitialize the transmit DMA channels.
535 	 */
536 	nxge_uninit_txdma_channels(nxge);
537 
538 	/*
539 	 * Remove both groups. Assumption: only two groups!
540 	 */
541 	if (nxge->rx_set.group[0] != NULL)
542 		nxge_grp_remove(nxge, nxge->rx_set.group[0]);
543 	if (nxge->tx_set.group[0] != NULL)
544 		nxge_grp_remove(nxge, nxge->tx_set.group[0]);
545 
546 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
547 
548 	/*
549 	 * Clean up.
550 	 */
551 	MUTEX_ENTER(&nhd->lock);
552 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
553 		if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
554 			nhd->vr[vr_index].nxge = NULL;
555 			break;
556 		}
557 	}
558 	MUTEX_EXIT(&nhd->lock);
559 
560 	return (NXGE_OK);
561 }
562 
563 #if defined(NIU_LP_WORKAROUND)
564 /*
565  * nxge_tdc_lp_conf
566  *
567  *	Configure the logical pages for a TDC.
568  *
569  * Arguments:
570  * 	nxge
571  * 	channel	The TDC to configure.
572  *
573  * Notes:
574  *
575  * Context:
576  *	Guest domain
577  */
578 nxge_status_t
579 nxge_tdc_lp_conf(
580 	p_nxge_t nxge,
581 	int channel)
582 {
583 	nxge_hio_dc_t		*dc;
584 	nxge_dma_common_t	*data;
585 	nxge_dma_common_t	*control;
586 	tx_ring_t 		*ring;
587 
588 	uint64_t		hv_rv;
589 	uint64_t		ra, size;
590 
591 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
592 
593 	ring = nxge->tx_rings->rings[channel];
594 
595 	if (ring->hv_set) {
596 		/* This shouldn't happen. */
597 		return (NXGE_OK);
598 	}
599 
600 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
601 		return (NXGE_ERROR);
602 
603 	/*
604 	 * Initialize logical page 0 for data buffers.
605 	 *
606 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
607 	 * nxge_main.c:nxge_dma_mem_alloc().
608 	 */
609 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
610 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
611 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
612 
613 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
614 	    (uint64_t)channel, 0,
615 	    ring->hv_tx_buf_base_ioaddr_pp,
616 	    ring->hv_tx_buf_ioaddr_size);
617 
618 	if (hv_rv != 0) {
619 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
620 		    "<== nxge_tdc_lp_conf: channel %d "
621 		    "(page 0 data buf) hv: %d "
622 		    "ioaddr_pp $%p size 0x%llx ",
623 		    channel, hv_rv,
624 		    ring->hv_tx_buf_base_ioaddr_pp,
625 		    ring->hv_tx_buf_ioaddr_size));
626 		return (NXGE_ERROR | hv_rv);
627 	}
628 
629 	ra = size = 0;
630 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
631 	    (uint64_t)channel, 0, &ra, &size);
632 
633 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
634 	    "==> nxge_tdc_lp_conf: channel %d "
635 	    "(page 0 data buf) hv_rv 0x%llx "
636 	    "set ioaddr_pp $%p set size 0x%llx "
637 	    "get ra ioaddr_pp $%p get size 0x%llx ",
638 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
639 	    ring->hv_tx_buf_ioaddr_size, ra, size));
640 
641 	/*
642 	 * Initialize logical page 1 for control buffers.
643 	 */
644 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
645 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
646 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
647 
648 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
649 	    (uint64_t)channel, (uint64_t)1,
650 	    ring->hv_tx_cntl_base_ioaddr_pp,
651 	    ring->hv_tx_cntl_ioaddr_size);
652 
653 	if (hv_rv != 0) {
654 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
655 		    "<== nxge_tdc_lp_conf: channel %d "
656 		    "(page 1 cntl buf) hv_rv 0x%llx "
657 		    "ioaddr_pp $%p size 0x%llx ",
658 		    channel, hv_rv,
659 		    ring->hv_tx_cntl_base_ioaddr_pp,
660 		    ring->hv_tx_cntl_ioaddr_size));
661 		return (NXGE_ERROR | hv_rv);
662 	}
663 
664 	ra = size = 0;
665 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
666 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
667 
668 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
669 	    "==> nxge_tdc_lp_conf: channel %d "
670 	    "(page 1 cntl buf) hv_rv 0x%llx "
671 	    "set ioaddr_pp $%p set size 0x%llx "
672 	    "get ra ioaddr_pp $%p get size 0x%llx ",
673 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
674 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
675 
676 	ring->hv_set = B_TRUE;
677 
678 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
679 
680 	return (NXGE_OK);
681 }
682 
683 /*
684  * nxge_rdc_lp_conf
685  *
686  *	Configure an RDC's logical pages.
687  *
688  * Arguments:
689  * 	nxge
690  * 	channel	The RDC to configure.
691  *
692  * Notes:
693  *
694  * Context:
695  *	Guest domain
696  */
697 nxge_status_t
698 nxge_rdc_lp_conf(
699 	p_nxge_t nxge,
700 	int channel)
701 {
702 	nxge_hio_dc_t		*dc;
703 	nxge_dma_common_t	*data;
704 	nxge_dma_common_t	*control;
705 	rx_rbr_ring_t		*ring;
706 
707 	uint64_t		hv_rv;
708 	uint64_t		ra, size;
709 
710 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
711 
712 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
713 
714 	if (ring->hv_set) {
715 		return (NXGE_OK);
716 	}
717 
718 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
719 		return (NXGE_ERROR);
720 
721 	/*
722 	 * Initialize logical page 0 for data buffers.
723 	 *
724 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
725 	 * nxge_main.c:nxge_dma_mem_alloc().
726 	 */
727 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
728 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
729 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
730 
731 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
732 	    (uint64_t)channel, 0,
733 	    ring->hv_rx_buf_base_ioaddr_pp,
734 	    ring->hv_rx_buf_ioaddr_size);
735 
736 	if (hv_rv != 0) {
737 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
738 		    "<== nxge_rdc_lp_conf: channel %d "
739 		    "(page 0 data buf) hv_rv 0x%llx "
740 		    "ioaddr_pp $%p size 0x%llx ",
741 		    channel, hv_rv,
742 		    ring->hv_rx_buf_base_ioaddr_pp,
743 		    ring->hv_rx_buf_ioaddr_size));
744 		return (NXGE_ERROR | hv_rv);
745 	}
746 
747 	ra = size = 0;
748 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
749 	    (uint64_t)channel, 0, &ra, &size);
750 
751 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
752 	    "==> nxge_rdc_lp_conf: channel %d "
753 	    "(page 0 data buf) hv_rv 0x%llx "
754 	    "set ioaddr_pp $%p set size 0x%llx "
755 	    "get ra ioaddr_pp $%p get size 0x%llx ",
756 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
757 	    ring->hv_rx_buf_ioaddr_size, ra, size));
758 
759 	/*
760 	 * Initialize logical page 1 for control buffers.
761 	 */
762 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
763 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
764 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
765 
766 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
767 	    (uint64_t)channel, (uint64_t)1,
768 	    ring->hv_rx_cntl_base_ioaddr_pp,
769 	    ring->hv_rx_cntl_ioaddr_size);
770 
771 	if (hv_rv != 0) {
772 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
773 		    "<== nxge_rdc_lp_conf: channel %d "
774 		    "(page 1 cntl buf) hv_rv 0x%llx "
775 		    "ioaddr_pp $%p size 0x%llx ",
776 		    channel, hv_rv,
777 		    ring->hv_rx_cntl_base_ioaddr_pp,
778 		    ring->hv_rx_cntl_ioaddr_size));
779 		return (NXGE_ERROR | hv_rv);
780 	}
781 
782 	ra = size = 0;
783 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
784 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
785 
786 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
787 	    "==> nxge_rdc_lp_conf: channel %d "
788 	    "(page 1 cntl buf) hv_rv 0x%llx "
789 	    "set ioaddr_pp $%p set size 0x%llx "
790 	    "get ra ioaddr_pp $%p get size 0x%llx ",
791 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
792 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
793 
794 	ring->hv_set = B_TRUE;
795 
796 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
797 
798 	return (NXGE_OK);
799 }
800 #endif	/* defined(NIU_LP_WORKAROUND) */
801 
802 /*
803  * This value is in milliseconds.
804  */
805 #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
806 
807 /*
808  * nxge_hio_start_timer
809  *
810  *	Start the timer which checks for Tx hangs.
811  *
812  * Arguments:
813  * 	nxge
814  *
815  * Notes:
816  *	This function is called from nxge_attach().
817  *
818  *	This function kicks off the guest domain equivalent of
819  *	nxge_check_hw_state().  It is called only once, from attach.
820  *
821  * Context:
822  *	Guest domain
823  */
824 void
825 nxge_hio_start_timer(
826 	nxge_t *nxge)
827 {
828 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
829 	nxge_hio_vr_t *vr;
830 	int region;
831 
832 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
833 
834 	MUTEX_ENTER(&nhd->lock);
835 
836 	/*
837 	 * Find our VR data structure.  (We are currently assuming
838 	 * one VR per guest domain.  That may change in the future.)
839 	 */
840 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
841 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
842 			break;
843 	}
844 
845 	MUTEX_EXIT(&nhd->lock);
846 
847 	if (region == NXGE_VR_SR_MAX) {
848 		return;
849 	}
850 
851 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
852 
853 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
854 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
855 
856 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
857 }
858 
859 /*
860  * nxge_check_guest_state
861  *
862  *	Essentially, check for Tx hangs.  In the future, if we are
863  *	polling the hardware, we may do so here.
864  *
865  * Arguments:
866  * 	vr	The virtualization region (VR) data structure.
867  *
868  * Notes:
869  *	This function is the guest domain equivalent of
870  *	nxge_check_hw_state().  Since we have no hardware to
871  * 	check, we simply call nxge_check_tx_hang().
872  *
873  * Context:
874  *	Guest domain
875  */
876 void
877 nxge_check_guest_state(
878 	nxge_hio_vr_t *vr)
879 {
880 	nxge_t *nxge = (nxge_t *)vr->nxge;
881 
882 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
883 
884 	MUTEX_ENTER(nxge->genlock);
885 	nxge->nxge_timerid = 0;
886 
887 	if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
888 		nxge_check_tx_hang(nxge);
889 
890 		nxge->nxge_timerid = timeout((void(*)(void *))
891 		    nxge_check_guest_state, (caddr_t)vr,
892 		    drv_usectohz(1000 * NXGE_GUEST_TIMER));
893 	}
894 
895 nxge_check_guest_state_exit:
896 	MUTEX_EXIT(nxge->genlock);
897 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
898 }
899 
900 nxge_status_t
901 nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm)
902 {
903 	nxge_grp_t	*group;
904 	uint32_t	channel;
905 	nxge_hio_dc_t	*dc;
906 	nxge_ldg_t	*ldgp;
907 
908 	/*
909 	 * Validate state of guest interface before
910 	 * proceeeding.
911 	 */
912 	if (!isLDOMguest(nxge))
913 		return (NXGE_ERROR);
914 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
915 		return (NXGE_ERROR);
916 
917 	/*
918 	 * In guest domain, always and only dealing with
919 	 * group 0 for an instance of nxge.
920 	 */
921 	group = nxge->rx_set.group[0];
922 
923 	/*
924 	 * Look to arm the the RDCs for the group.
925 	 */
926 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
927 		if ((1 << channel) & group->map) {
928 			/*
929 			 * Get the RDC.
930 			 */
931 			dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel);
932 			if (dc == NULL)
933 				return (NXGE_ERROR);
934 
935 			/*
936 			 * Get the RDC's ldg group.
937 			 */
938 			ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector];
939 			if (ldgp == NULL)
940 				return (NXGE_ERROR);
941 
942 			/*
943 			 * Set the state of the group.
944 			 */
945 			ldgp->arm = arm;
946 
947 			nxge_hio_ldgimgn(nxge, ldgp);
948 		}
949 	}
950 
951 	return (NXGE_OK);
952 }
953 
954 nxge_status_t
955 nxge_hio_rdc_enable(p_nxge_t nxge)
956 {
957 	nxge_grp_t	*group;
958 	npi_handle_t	handle;
959 	uint32_t	channel;
960 	npi_status_t	rval;
961 
962 	/*
963 	 * Validate state of guest interface before
964 	 * proceeeding.
965 	 */
966 	if (!isLDOMguest(nxge))
967 		return (NXGE_ERROR);
968 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
969 		return (NXGE_ERROR);
970 
971 	/*
972 	 * In guest domain, always and only dealing with
973 	 * group 0 for an instance of nxge.
974 	 */
975 	group = nxge->rx_set.group[0];
976 
977 	/*
978 	 * Get the PIO handle.
979 	 */
980 	handle = NXGE_DEV_NPI_HANDLE(nxge);
981 
982 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
983 		/*
984 		 * If this channel is in the map, then enable
985 		 * it.
986 		 */
987 		if ((1 << channel) & group->map) {
988 			/*
989 			 * Enable the RDC and clear the empty bit.
990 			 */
991 			rval = npi_rxdma_cfg_rdc_enable(handle, channel);
992 			if (rval != NPI_SUCCESS)
993 				return (NXGE_ERROR);
994 
995 			(void) npi_rxdma_channel_rbr_empty_clear(handle,
996 			    channel);
997 		}
998 	}
999 
1000 	return (NXGE_OK);
1001 }
1002 #endif	/* defined(sun4v) */
1003