xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_hio_guest.c (revision 8b80e8cb6855118d46f605e91b5ed4ce83417395)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio_guest.c
29  *
30  * This file manages the virtualization resources for a guest domain.
31  *
32  */
33 
34 #include <sys/nxge/nxge_impl.h>
35 #include <sys/nxge/nxge_fzc.h>
36 #include <sys/nxge/nxge_rxdma.h>
37 #include <sys/nxge/nxge_txdma.h>
38 
39 #include <sys/nxge/nxge_hio.h>
40 
41 /*
42  * nxge_hio_unregister
43  *
44  *	Unregister with the VNET module.
45  *
46  * Arguments:
47  * 	nxge
48  *
49  * Notes:
50  *	We must uninitialize all DMA channels associated with the VR, too.
51  *
52  *	We're assuming that the channels will be disabled & unassigned
53  *	in the service domain, after we're done here.
54  *
55  * Context:
56  *	Guest domain
57  */
58 void
59 nxge_hio_unregister(
60 	nxge_t *nxge)
61 {
62 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
63 
64 	if (nhd == 0) {
65 		return;
66 	}
67 
68 #if defined(sun4v)
69 	/* Unregister with vNet. */
70 	if (nhd->hio.vio.unregister) {
71 		if (nxge->hio_vr)
72 			(*nhd->hio.vio.unregister)(nxge->hio_vr->vhp);
73 	}
74 #endif
75 }
76 
77 /*
78  * nxge_guest_regs_map
79  *
80  *	Map in a guest domain's register set(s).
81  *
82  * Arguments:
83  * 	nxge
84  *
85  * Notes:
86  *	Note that we set <is_vraddr> to TRUE.
87  *
88  * Context:
89  *	Guest domain
90  */
91 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
92 	DDI_DEVICE_ATTR_V0,
93 	DDI_STRUCTURE_LE_ACC,
94 	DDI_STRICTORDER_ACC,
95 };
96 
97 int
98 nxge_guest_regs_map(
99 	nxge_t *nxge)
100 {
101 	dev_regs_t 	*regs;
102 	off_t		regsize;
103 	int rv;
104 
105 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
106 
107 	/* So we can allocate properly-aligned memory. */
108 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
109 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
110 
111 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
112 	regs = nxge->dev_regs;
113 
114 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
115 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
116 		return (NXGE_ERROR);
117 	}
118 
119 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
120 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
121 
122 	if (rv != DDI_SUCCESS) {
123 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
124 		return (NXGE_ERROR);
125 	}
126 
127 	nxge->npi_handle.regh = regs->nxge_regh;
128 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
129 	nxge->npi_handle.is_vraddr = B_TRUE;
130 	nxge->npi_handle.function.instance = nxge->instance;
131 	nxge->npi_handle.function.function = nxge->function_num;
132 	nxge->npi_handle.nxgep = (void *)nxge;
133 
134 	/* NPI_REG_ADD_HANDLE_SET() */
135 	nxge->npi_reg_handle.regh = regs->nxge_regh;
136 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
137 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
138 	nxge->npi_reg_handle.function.instance = nxge->instance;
139 	nxge->npi_reg_handle.function.function = nxge->function_num;
140 	nxge->npi_reg_handle.nxgep = (void *)nxge;
141 
142 	/* NPI_VREG_ADD_HANDLE_SET() */
143 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
144 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
145 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
146 	nxge->npi_vreg_handle.function.instance = nxge->instance;
147 	nxge->npi_vreg_handle.function.function = nxge->function_num;
148 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
149 
150 	regs->nxge_vir_regp = regs->nxge_regp;
151 	regs->nxge_vir_regh = regs->nxge_regh;
152 
153 	/*
154 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
155 	 * or FCODE reg variables.
156 	 */
157 
158 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
159 
160 	return (NXGE_OK);
161 }
162 
163 void
164 nxge_guest_regs_map_free(
165 	nxge_t *nxge)
166 {
167 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
168 
169 	if (nxge->dev_regs) {
170 		if (nxge->dev_regs->nxge_regh) {
171 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
172 			    "==> nxge_unmap_regs: device registers"));
173 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
174 			nxge->dev_regs->nxge_regh = NULL;
175 		}
176 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
177 		nxge->dev_regs = 0;
178 	}
179 
180 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
181 }
182 
183 #if defined(sun4v)
184 
185 /*
186  * -------------------------------------------------------------
187  * Local prototypes
188  * -------------------------------------------------------------
189  */
190 static nxge_hio_dc_t *nxge_guest_dc_alloc(
191 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
192 
193 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
194 static void nxge_check_guest_state(nxge_hio_vr_t *);
195 
196 /*
197  * nxge_hio_vr_add
198  *
199  *	If we have been given a virtualization region (VR),
200  *	then initialize it.
201  *
202  * Arguments:
203  * 	nxge
204  *
205  * Notes:
206  *
207  * Context:
208  *	Guest domain
209  */
210 /* ARGSUSED */
211 
212 int
213 nxge_hio_vr_add(nxge_t *nxge)
214 {
215 	extern mac_callbacks_t nxge_m_callbacks;
216 
217 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
218 	nxge_hio_vr_t *vr;
219 	nxge_hio_dc_t *dc;
220 
221 	int *reg_val;
222 	uint_t reg_len;
223 	uint8_t vr_index;
224 
225 	nxhv_vr_fp_t *fp;
226 	uint64_t vr_address, vr_size;
227 	uint32_t cookie;
228 
229 	nxhv_dc_fp_t *tx, *rx;
230 	uint64_t tx_map, rx_map;
231 
232 	uint64_t hv_rv;
233 
234 	/* Variables needed to register with vnet. */
235 	mac_register_t *mac_info;
236 	ether_addr_t mac_addr;
237 	nx_vio_fp_t *vio;
238 
239 	int i;
240 
241 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
242 
243 	/*
244 	 * Get our HV cookie.
245 	 */
246 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
247 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
248 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
249 		return (NXGE_ERROR);
250 	}
251 
252 	cookie = (uint32_t)reg_val[0];
253 	ddi_prop_free(reg_val);
254 
255 	fp = &nhd->hio.vr;
256 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
257 	if (hv_rv != 0) {
258 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
259 		    "vr->getinfo() failed"));
260 		return (NXGE_ERROR);
261 	}
262 
263 	/*
264 	 * In the guest domain, we can use any VR data structure
265 	 * we want, because we're not supposed to know which VR
266 	 * the service domain has allocated to us.
267 	 *
268 	 * In the current version, the least significant nybble of
269 	 * the cookie is the VR region, but that could change
270 	 * very easily.
271 	 *
272 	 * In the future, a guest may have more than one VR allocated
273 	 * to it, which is why we go through this exercise.
274 	 */
275 	MUTEX_ENTER(&nhd->lock);
276 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
277 		if (nhd->vr[vr_index].nxge == 0) {
278 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
279 			break;
280 		}
281 	}
282 	MUTEX_EXIT(&nhd->lock);
283 
284 	if (vr_index == FUNC_VIR_MAX) {
285 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
286 		    "no VRs available"));
287 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
288 		    "nxge_hio_vr_add(%d): cookie(0x%x)\n",
289 		    nxge->instance, cookie));
290 		return (NXGE_ERROR);
291 	}
292 
293 	vr = &nhd->vr[vr_index];
294 
295 	vr->nxge = (uintptr_t)nxge;
296 	vr->cookie = (uint32_t)cookie;
297 	vr->address = vr_address;
298 	vr->size = vr_size;
299 	vr->region = vr_index;
300 
301 	/*
302 	 * This is redundant data, but useful nonetheless.  It helps
303 	 * us to keep track of which RDCs & TDCs belong to us.
304 	 */
305 	if (nxge->tx_set.lg.count == 0)
306 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
307 	if (nxge->rx_set.lg.count == 0)
308 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
309 
310 	/*
311 	 * See nxge_intr.c.
312 	 */
313 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
314 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
315 		    "nxge_hio_intr_init() failed"));
316 		return (NXGE_ERROR);
317 	}
318 
319 	/*
320 	 * Now we find out which RDCs & TDCs have been allocated to us.
321 	 */
322 	tx = &nhd->hio.tx;
323 	if (tx->get_map) {
324 		/*
325 		 * The map we get back is a bitmap of the
326 		 * virtual Tx DMA channels we own -
327 		 * they are NOT real channel numbers.
328 		 */
329 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
330 		if (hv_rv != 0) {
331 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
332 			    "tx->get_map() failed"));
333 			return (NXGE_ERROR);
334 		}
335 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
336 
337 		/*
338 		 * For each channel, mark these two fields
339 		 * while we have the VR data structure.
340 		 */
341 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
342 			if ((1 << i) & tx_map) {
343 				dc = nxge_guest_dc_alloc(nxge, vr,
344 				    NXGE_TRANSMIT_GROUP);
345 				if (dc == 0) {
346 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
347 					    "DC add failed"));
348 					return (NXGE_ERROR);
349 				}
350 				dc->channel = (nxge_channel_t)i;
351 			}
352 		}
353 	}
354 
355 	rx = &nhd->hio.rx;
356 	if (rx->get_map) {
357 		/*
358 		 * I repeat, the map we get back is a bitmap of
359 		 * the virtual Rx DMA channels we own -
360 		 * they are NOT real channel numbers.
361 		 */
362 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
363 		if (hv_rv != 0) {
364 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
365 			    "rx->get_map() failed"));
366 			return (NXGE_ERROR);
367 		}
368 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
369 
370 		/*
371 		 * For each channel, mark these two fields
372 		 * while we have the VR data structure.
373 		 */
374 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
375 			if ((1 << i) & rx_map) {
376 				dc = nxge_guest_dc_alloc(nxge, vr,
377 				    NXGE_RECEIVE_GROUP);
378 				if (dc == 0) {
379 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
380 					    "DC add failed"));
381 					return (NXGE_ERROR);
382 				}
383 				dc->channel = (nxge_channel_t)i;
384 			}
385 		}
386 	}
387 
388 	/*
389 	 * Register with vnet.
390 	 */
391 	if ((mac_info = mac_alloc(MAC_VERSION)) == NULL)
392 		return (NXGE_ERROR);
393 
394 	mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
395 	mac_info->m_driver = nxge;
396 	mac_info->m_dip = nxge->dip;
397 	mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
398 	mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
399 	(void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
400 	mac_info->m_callbacks = &nxge_m_callbacks;
401 	mac_info->m_min_sdu = 0;
402 	mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX -
403 	    sizeof (struct ether_header) - ETHERFCSL - 4;
404 
405 	(void) memset(&mac_addr, 0xff, sizeof (mac_addr));
406 
407 	/* Register with vio_net. */
408 	vio = &nhd->hio.vio;
409 	if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID,
410 	    nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) {
411 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed"));
412 		return (NXGE_ERROR);
413 	}
414 
415 	nxge->hio_vr = vr;	/* For faster lookups. */
416 
417 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
418 
419 	return (NXGE_OK);
420 }
421 
422 /*
423  * nxge_guest_dc_alloc
424  *
425  *	Find a free nxge_hio_dc_t data structure.
426  *
427  * Arguments:
428  * 	nxge
429  * 	type	TRANSMIT or RECEIVE.
430  *
431  * Notes:
432  *
433  * Context:
434  *	Guest domain
435  */
436 nxge_hio_dc_t *
437 nxge_guest_dc_alloc(
438 	nxge_t *nxge,
439 	nxge_hio_vr_t *vr,
440 	nxge_grp_type_t type)
441 {
442 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
443 	nxge_hio_dc_t *dc;
444 	int limit, i;
445 
446 	/*
447 	 * In the guest domain, there may be more than one VR.
448 	 * each one of which will be using the same slots, or
449 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
450 	 * tables must be shared.
451 	 */
452 	if (type == NXGE_TRANSMIT_GROUP) {
453 		dc = &nhd->tdc[0];
454 		limit = NXGE_MAX_TDCS;
455 	} else {
456 		dc = &nhd->rdc[0];
457 		limit = NXGE_MAX_RDCS;
458 	}
459 
460 	MUTEX_ENTER(&nhd->lock);
461 	for (i = 0; i < limit; i++, dc++) {
462 		if (dc->vr == 0) {
463 			dc->vr = vr;
464 			dc->cookie = vr->cookie;
465 			MUTEX_EXIT(&nhd->lock);
466 			return (dc);
467 		}
468 	}
469 	MUTEX_EXIT(&nhd->lock);
470 
471 	return (0);
472 }
473 
474 /*
475  * res_map_parse
476  *
477  *	Parse a resource map.  The resources are DMA channels, receive
478  *	or transmit, depending on <type>.
479  *
480  * Arguments:
481  * 	nxge
482  * 	type	Transmit or receive.
483  *	res_map	The resource map to parse.
484  *
485  * Notes:
486  *
487  * Context:
488  *	Guest domain
489  */
490 void
491 res_map_parse(
492 	nxge_t *nxge,
493 	nxge_grp_type_t type,
494 	uint64_t res_map)
495 {
496 	uint8_t slots, mask, slot;
497 	int first, count;
498 
499 	nxge_hw_pt_cfg_t *hardware;
500 	nxge_grp_t *group;
501 
502 	/* Slots are numbered 0 - 7. */
503 	slots = (uint8_t)(res_map & 0xff);
504 
505 	/* Count the number of bits in the bitmap. */
506 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
507 		if (slots & mask)
508 			count++;
509 		if (count == 1)
510 			first = slot;
511 		mask <<= 1;
512 	}
513 
514 	hardware = &nxge->pt_config.hw_config;
515 	group = (type == NXGE_TRANSMIT_GROUP) ?
516 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
517 
518 	/*
519 	 * A guest domain has one Tx & one Rx group, so far.
520 	 * In the future, there may be more than one.
521 	 */
522 	if (type == NXGE_TRANSMIT_GROUP) {
523 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
524 
525 		hardware->tdc.start = first;
526 		hardware->tdc.count = count;
527 		hardware->tdc.owned = count;
528 
529 		group->map = slots;
530 
531 		/*
532 		 * Pointless in a guest domain.  This bitmap is used
533 		 * in only one place: nxge_txc_init(),
534 		 * a service-domain-only function.
535 		 */
536 		port->tx_dma_map = slots;
537 
538 		nxge->tx_set.owned.map |= slots;
539 	} else {
540 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
541 
542 		hardware->start_rdc = first;
543 		hardware->max_rdcs = count;
544 
545 		rdc_grp->start_rdc = (uint8_t)first;
546 		rdc_grp->max_rdcs = (uint8_t)count;
547 		rdc_grp->def_rdc = (uint8_t)first;
548 
549 		rdc_grp->map = slots;
550 		group->map = slots;
551 
552 		nxge->rx_set.owned.map |= slots;
553 	}
554 }
555 
556 /*
557  * nxge_hio_vr_release
558  *
559  *	Release a virtualization region (VR).
560  *
561  * Arguments:
562  * 	nxge
563  *
564  * Notes:
565  *	We must uninitialize all DMA channels associated with the VR, too.
566  *
567  *	The service domain will re-initialize these DMA channels later.
568  *	See nxge_hio.c:nxge_hio_share_free() for details.
569  *
570  * Context:
571  *	Guest domain
572  */
573 int
574 nxge_hio_vr_release(nxge_t *nxge)
575 {
576 	nxge_hio_data_t	*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
577 	int		vr_index;
578 
579 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
580 
581 	if (nxge->hio_vr == NULL) {
582 		return (NXGE_OK);
583 	}
584 
585 	/*
586 	 * Uninitialize interrupts.
587 	 */
588 	nxge_hio_intr_uninit(nxge);
589 
590 	/*
591 	 * Uninitialize the receive DMA channels.
592 	 */
593 	nxge_uninit_rxdma_channels(nxge);
594 
595 	/*
596 	 * Uninitialize the transmit DMA channels.
597 	 */
598 	nxge_uninit_txdma_channels(nxge);
599 
600 	/*
601 	 * Remove both groups. Assumption: only two groups!
602 	 */
603 	if (nxge->rx_set.group[0] != NULL)
604 		nxge_grp_remove(nxge, nxge->rx_set.group[0]);
605 	if (nxge->tx_set.group[0] != NULL)
606 		nxge_grp_remove(nxge, nxge->tx_set.group[0]);
607 
608 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
609 
610 	/*
611 	 * Clean up.
612 	 */
613 	MUTEX_ENTER(&nhd->lock);
614 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
615 		if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
616 			nhd->vr[vr_index].nxge = NULL;
617 			break;
618 		}
619 	}
620 	MUTEX_EXIT(&nhd->lock);
621 
622 	return (NXGE_OK);
623 }
624 
625 #if defined(NIU_LP_WORKAROUND)
626 /*
627  * nxge_tdc_lp_conf
628  *
629  *	Configure the logical pages for a TDC.
630  *
631  * Arguments:
632  * 	nxge
633  * 	channel	The TDC to configure.
634  *
635  * Notes:
636  *
637  * Context:
638  *	Guest domain
639  */
640 nxge_status_t
641 nxge_tdc_lp_conf(
642 	p_nxge_t nxge,
643 	int channel)
644 {
645 	nxge_hio_dc_t		*dc;
646 	nxge_dma_common_t	*data;
647 	nxge_dma_common_t	*control;
648 	tx_ring_t 		*ring;
649 
650 	uint64_t		hv_rv;
651 	uint64_t		ra, size;
652 
653 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
654 
655 	ring = nxge->tx_rings->rings[channel];
656 
657 	if (ring->hv_set) {
658 		/* This shouldn't happen. */
659 		return (NXGE_OK);
660 	}
661 
662 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
663 		return (NXGE_ERROR);
664 
665 	/*
666 	 * Initialize logical page 0 for data buffers.
667 	 *
668 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
669 	 * nxge_main.c:nxge_dma_mem_alloc().
670 	 */
671 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
672 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
673 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
674 
675 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
676 	    (uint64_t)channel, 0,
677 	    ring->hv_tx_buf_base_ioaddr_pp,
678 	    ring->hv_tx_buf_ioaddr_size);
679 
680 	if (hv_rv != 0) {
681 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
682 		    "<== nxge_tdc_lp_conf: channel %d "
683 		    "(page 0 data buf) hv: %d "
684 		    "ioaddr_pp $%p size 0x%llx ",
685 		    channel, hv_rv,
686 		    ring->hv_tx_buf_base_ioaddr_pp,
687 		    ring->hv_tx_buf_ioaddr_size));
688 		return (NXGE_ERROR | hv_rv);
689 	}
690 
691 	ra = size = 0;
692 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
693 	    (uint64_t)channel, 0, &ra, &size);
694 
695 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
696 	    "==> nxge_tdc_lp_conf: channel %d "
697 	    "(page 0 data buf) hv_rv 0x%llx "
698 	    "set ioaddr_pp $%p set size 0x%llx "
699 	    "get ra ioaddr_pp $%p get size 0x%llx ",
700 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
701 	    ring->hv_tx_buf_ioaddr_size, ra, size));
702 
703 	/*
704 	 * Initialize logical page 1 for control buffers.
705 	 */
706 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
707 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
708 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
709 
710 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
711 	    (uint64_t)channel, (uint64_t)1,
712 	    ring->hv_tx_cntl_base_ioaddr_pp,
713 	    ring->hv_tx_cntl_ioaddr_size);
714 
715 	if (hv_rv != 0) {
716 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
717 		    "<== nxge_tdc_lp_conf: channel %d "
718 		    "(page 1 cntl buf) hv_rv 0x%llx "
719 		    "ioaddr_pp $%p size 0x%llx ",
720 		    channel, hv_rv,
721 		    ring->hv_tx_cntl_base_ioaddr_pp,
722 		    ring->hv_tx_cntl_ioaddr_size));
723 		return (NXGE_ERROR | hv_rv);
724 	}
725 
726 	ra = size = 0;
727 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
728 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
729 
730 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
731 	    "==> nxge_tdc_lp_conf: channel %d "
732 	    "(page 1 cntl buf) hv_rv 0x%llx "
733 	    "set ioaddr_pp $%p set size 0x%llx "
734 	    "get ra ioaddr_pp $%p get size 0x%llx ",
735 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
736 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
737 
738 	ring->hv_set = B_TRUE;
739 
740 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
741 
742 	return (NXGE_OK);
743 }
744 
745 /*
746  * nxge_rdc_lp_conf
747  *
748  *	Configure an RDC's logical pages.
749  *
750  * Arguments:
751  * 	nxge
752  * 	channel	The RDC to configure.
753  *
754  * Notes:
755  *
756  * Context:
757  *	Guest domain
758  */
759 nxge_status_t
760 nxge_rdc_lp_conf(
761 	p_nxge_t nxge,
762 	int channel)
763 {
764 	nxge_hio_dc_t		*dc;
765 	nxge_dma_common_t	*data;
766 	nxge_dma_common_t	*control;
767 	rx_rbr_ring_t		*ring;
768 
769 	uint64_t		hv_rv;
770 	uint64_t		ra, size;
771 
772 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
773 
774 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
775 
776 	if (ring->hv_set) {
777 		return (NXGE_OK);
778 	}
779 
780 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
781 		return (NXGE_ERROR);
782 
783 	/*
784 	 * Initialize logical page 0 for data buffers.
785 	 *
786 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
787 	 * nxge_main.c:nxge_dma_mem_alloc().
788 	 */
789 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
790 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
791 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
792 
793 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
794 	    (uint64_t)channel, 0,
795 	    ring->hv_rx_buf_base_ioaddr_pp,
796 	    ring->hv_rx_buf_ioaddr_size);
797 
798 	if (hv_rv != 0) {
799 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
800 		    "<== nxge_rdc_lp_conf: channel %d "
801 		    "(page 0 data buf) hv_rv 0x%llx "
802 		    "ioaddr_pp $%p size 0x%llx ",
803 		    channel, hv_rv,
804 		    ring->hv_rx_buf_base_ioaddr_pp,
805 		    ring->hv_rx_buf_ioaddr_size));
806 		return (NXGE_ERROR | hv_rv);
807 	}
808 
809 	ra = size = 0;
810 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
811 	    (uint64_t)channel, 0, &ra, &size);
812 
813 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
814 	    "==> nxge_rdc_lp_conf: channel %d "
815 	    "(page 0 data buf) hv_rv 0x%llx "
816 	    "set ioaddr_pp $%p set size 0x%llx "
817 	    "get ra ioaddr_pp $%p get size 0x%llx ",
818 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
819 	    ring->hv_rx_buf_ioaddr_size, ra, size));
820 
821 	/*
822 	 * Initialize logical page 1 for control buffers.
823 	 */
824 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
825 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
826 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
827 
828 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
829 	    (uint64_t)channel, (uint64_t)1,
830 	    ring->hv_rx_cntl_base_ioaddr_pp,
831 	    ring->hv_rx_cntl_ioaddr_size);
832 
833 	if (hv_rv != 0) {
834 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
835 		    "<== nxge_rdc_lp_conf: channel %d "
836 		    "(page 1 cntl buf) hv_rv 0x%llx "
837 		    "ioaddr_pp $%p size 0x%llx ",
838 		    channel, hv_rv,
839 		    ring->hv_rx_cntl_base_ioaddr_pp,
840 		    ring->hv_rx_cntl_ioaddr_size));
841 		return (NXGE_ERROR | hv_rv);
842 	}
843 
844 	ra = size = 0;
845 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
846 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
847 
848 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
849 	    "==> nxge_rdc_lp_conf: channel %d "
850 	    "(page 1 cntl buf) hv_rv 0x%llx "
851 	    "set ioaddr_pp $%p set size 0x%llx "
852 	    "get ra ioaddr_pp $%p get size 0x%llx ",
853 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
854 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
855 
856 	ring->hv_set = B_TRUE;
857 
858 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
859 
860 	return (NXGE_OK);
861 }
862 #endif	/* defined(NIU_LP_WORKAROUND) */
863 
864 /*
865  * This value is in milliseconds.
866  */
867 #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
868 
869 /*
870  * nxge_hio_start_timer
871  *
872  *	Start the timer which checks for Tx hangs.
873  *
874  * Arguments:
875  * 	nxge
876  *
877  * Notes:
878  *	This function is called from nxge_attach().
879  *
880  *	This function kicks off the guest domain equivalent of
881  *	nxge_check_hw_state().  It is called only once, from attach.
882  *
883  * Context:
884  *	Guest domain
885  */
886 void
887 nxge_hio_start_timer(
888 	nxge_t *nxge)
889 {
890 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
891 	nxge_hio_vr_t *vr;
892 	int region;
893 
894 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
895 
896 	MUTEX_ENTER(&nhd->lock);
897 
898 	/*
899 	 * Find our VR data structure.  (We are currently assuming
900 	 * one VR per guest domain.  That may change in the future.)
901 	 */
902 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
903 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
904 			break;
905 	}
906 
907 	MUTEX_EXIT(&nhd->lock);
908 
909 	if (region == NXGE_VR_SR_MAX) {
910 		return;
911 	}
912 
913 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
914 
915 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
916 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
917 
918 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
919 }
920 
921 /*
922  * nxge_check_guest_state
923  *
924  *	Essentially, check for Tx hangs.  In the future, if we are
925  *	polling the hardware, we may do so here.
926  *
927  * Arguments:
928  * 	vr	The virtualization region (VR) data structure.
929  *
930  * Notes:
931  *	This function is the guest domain equivalent of
932  *	nxge_check_hw_state().  Since we have no hardware to
933  * 	check, we simply call nxge_check_tx_hang().
934  *
935  * Context:
936  *	Guest domain
937  */
938 void
939 nxge_check_guest_state(
940 	nxge_hio_vr_t *vr)
941 {
942 	nxge_t *nxge = (nxge_t *)vr->nxge;
943 
944 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
945 
946 	MUTEX_ENTER(nxge->genlock);
947 
948 	nxge->nxge_timerid = 0;
949 
950 	if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
951 		nxge_check_tx_hang(nxge);
952 
953 		nxge->nxge_timerid = timeout((void(*)(void *))
954 		    nxge_check_guest_state, (caddr_t)vr,
955 		    drv_usectohz(1000 * NXGE_GUEST_TIMER));
956 	}
957 
958 nxge_check_guest_state_exit:
959 	MUTEX_EXIT(nxge->genlock);
960 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
961 }
962 
963 #endif	/* defined(sun4v) */
964