xref: /titanic_52/usr/src/uts/i86pc/sys/rootnex.h (revision 1a5e258f5471356ca102c7176637cdce45bac147)
112f080e7Smrj /*
212f080e7Smrj  * CDDL HEADER START
312f080e7Smrj  *
412f080e7Smrj  * The contents of this file are subject to the terms of the
586c1f4dcSVikram Hegde  * Common Development and Distribution License (the "License").
686c1f4dcSVikram Hegde  * You may not use this file except in compliance with the License.
712f080e7Smrj  *
812f080e7Smrj  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
912f080e7Smrj  * or http://www.opensolaris.org/os/licensing.
1012f080e7Smrj  * See the License for the specific language governing permissions
1112f080e7Smrj  * and limitations under the License.
1212f080e7Smrj  *
1312f080e7Smrj  * When distributing Covered Code, include this CDDL HEADER in each
1412f080e7Smrj  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1512f080e7Smrj  * If applicable, add the following below this CDDL HEADER, with the
1612f080e7Smrj  * fields enclosed by brackets "[]" replaced with your own identifying
1712f080e7Smrj  * information: Portions Copyright [yyyy] [name of copyright owner]
1812f080e7Smrj  *
1912f080e7Smrj  * CDDL HEADER END
2012f080e7Smrj  */
2112f080e7Smrj /*
22ef4ab52fSFrank Van Der Linden  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
2312f080e7Smrj  */
2412f080e7Smrj 
2512f080e7Smrj #ifndef	_SYS_ROOTNEX_H
2612f080e7Smrj #define	_SYS_ROOTNEX_H
2712f080e7Smrj 
2812f080e7Smrj /*
2912f080e7Smrj  * x86 root nexus implementation specific state
3012f080e7Smrj  */
3112f080e7Smrj 
3212f080e7Smrj #include <sys/types.h>
3312f080e7Smrj #include <sys/conf.h>
3412f080e7Smrj #include <sys/modctl.h>
3512f080e7Smrj #include <sys/sunddi.h>
3620906b23SVikram Hegde #include <sys/iommulib.h>
370b7ba611SMark Johnson #include <sys/sdt.h>
3812f080e7Smrj 
3912f080e7Smrj #ifdef	__cplusplus
4012f080e7Smrj extern "C" {
4112f080e7Smrj #endif
4212f080e7Smrj 
4312f080e7Smrj 
4412f080e7Smrj /* size of buffer used for ctlop reportdev */
4512f080e7Smrj #define	REPORTDEV_BUFSIZE	1024
4612f080e7Smrj 
4712f080e7Smrj /* min and max interrupt vectors */
4812f080e7Smrj #define	VEC_MIN			1
4912f080e7Smrj #define	VEC_MAX			255
5012f080e7Smrj 
5112f080e7Smrj /* atomic increment/decrement to keep track of outstanding binds, etc */
520b7ba611SMark Johnson #ifdef DEBUG
530b7ba611SMark Johnson #define	ROOTNEX_DPROF_INC(addr)		atomic_inc_64(addr)
54*1a5e258fSJosef 'Jeff' Sipek #define	ROOTNEX_DPROF_DEC(addr)		atomic_dec_64(addr)
550b7ba611SMark Johnson #define	ROOTNEX_DPROBE1(name, type1, arg1) \
560b7ba611SMark Johnson 	DTRACE_PROBE1(name, type1, arg1)
57ef4ab52fSFrank Van Der Linden #define	ROOTNEX_DPROBE2(name, type1, arg1, type2, arg2) \
58ef4ab52fSFrank Van Der Linden 	DTRACE_PROBE2(name, type1, arg1, type2, arg2)
590b7ba611SMark Johnson #define	ROOTNEX_DPROBE3(name, type1, arg1, type2, arg2, type3, arg3) \
600b7ba611SMark Johnson 	DTRACE_PROBE3(name, type1, arg1, type2, arg2, type3, arg3)
6150200e77SFrank Van Der Linden #define	ROOTNEX_DPROBE4(name, type1, arg1, type2, arg2, type3, arg3, \
6250200e77SFrank Van Der Linden     type4, arg4) \
6350200e77SFrank Van Der Linden 	DTRACE_PROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4)
640b7ba611SMark Johnson #else
650b7ba611SMark Johnson #define	ROOTNEX_DPROF_INC(addr)
660b7ba611SMark Johnson #define	ROOTNEX_DPROF_DEC(addr)
670b7ba611SMark Johnson #define	ROOTNEX_DPROBE1(name, type1, arg1)
68ef4ab52fSFrank Van Der Linden #define	ROOTNEX_DPROBE2(name, type1, arg1, type2, arg2)
690b7ba611SMark Johnson #define	ROOTNEX_DPROBE3(name, type1, arg1, type2, arg2, type3, arg3)
7050200e77SFrank Van Der Linden #define	ROOTNEX_DPROBE4(name, type1, arg1, type2, arg2, type3, arg3, \
7150200e77SFrank Van Der Linden     type4, arg4)
720b7ba611SMark Johnson #endif
7312f080e7Smrj 
7412f080e7Smrj /* set in dmac_type to signify that this cookie uses the copy buffer */
7512f080e7Smrj #define	ROOTNEX_USES_COPYBUF		0x80000000
7612f080e7Smrj 
7712f080e7Smrj /*
7812f080e7Smrj  * integer or boolean property name and value. A few static rootnex properties
7912f080e7Smrj  * are created during rootnex attach from an array of rootnex_intprop_t..
8012f080e7Smrj  */
8112f080e7Smrj typedef struct rootnex_intprop_s {
8212f080e7Smrj 	char	*prop_name;
8312f080e7Smrj 	int	prop_value;
8412f080e7Smrj } rootnex_intprop_t;
8512f080e7Smrj 
8612f080e7Smrj /*
8712f080e7Smrj  * sgl related information which is visible to rootnex_get_sgl(). Trying to
8812f080e7Smrj  * isolate get_sgl() as much as possible so it can be easily replaced.
8912f080e7Smrj  */
9012f080e7Smrj typedef struct rootnex_sglinfo_s {
9112f080e7Smrj 	/*
9250200e77SFrank Van Der Linden 	 * Used to simplify calculations to get the maximum number
9350200e77SFrank Van Der Linden 	 * of cookies.
9450200e77SFrank Van Der Linden 	 */
9550200e77SFrank Van Der Linden 	boolean_t	si_cancross;
9650200e77SFrank Van Der Linden 
9750200e77SFrank Van Der Linden 	/*
9812f080e7Smrj 	 * These are passed into rootnex_get_sgl().
9912f080e7Smrj 	 *
10012f080e7Smrj 	 * si_min_addr - the minimum physical address
10112f080e7Smrj 	 * si_max_addr - the maximum physical address
10212f080e7Smrj 	 * si_max_cookie_size - the maximum size of a physically contiguous
10312f080e7Smrj 	 *    piece of memory that we can handle in a sgl.
10412f080e7Smrj 	 * si_segmask - segment mask to determine if we cross a segment boundary
10507c6692fSMark Johnson 	 * si_flags - dma_attr_flags
10612f080e7Smrj 	 * si_max_pages - max number of pages this sgl could occupy (which
10712f080e7Smrj 	 *    is also the maximum number of cookies we might see.
10812f080e7Smrj 	 */
10912f080e7Smrj 	uint64_t	si_min_addr;
11012f080e7Smrj 	uint64_t	si_max_addr;
11112f080e7Smrj 	uint64_t	si_max_cookie_size;
11212f080e7Smrj 	uint64_t	si_segmask;
11307c6692fSMark Johnson 	uint_t		si_flags;
11412f080e7Smrj 	uint_t		si_max_pages;
11512f080e7Smrj 
11612f080e7Smrj 	/*
11712f080e7Smrj 	 * these are returned by rootnex_get_sgl()
11812f080e7Smrj 	 *
11907c6692fSMark Johnson 	 * si_bounce_on_seg - if we need to use bounce buffer for pages above
12007c6692fSMark Johnson 	 *    ddi_dma_seg
12112f080e7Smrj 	 * si_copybuf_req - amount of copy buffer needed by the buffer.
12212f080e7Smrj 	 * si_buf_offset - The initial offset into the first page of the buffer.
12312f080e7Smrj 	 *    It's set in get sgl and used in the bind slow path to help
12412f080e7Smrj 	 *    calculate the current page index & offset from the current offset
12512f080e7Smrj 	 *    which is relative to the start of the buffer.
12612f080e7Smrj 	 * si_asp - address space of buffer passed in.
12712f080e7Smrj 	 * si_sgl_size - The actual number of cookies in the sgl. This does
12812f080e7Smrj 	 *    not reflect and sharing that we might do on window boundaries.
12912f080e7Smrj 	 */
13007c6692fSMark Johnson 	boolean_t	si_bounce_on_seg;
13112f080e7Smrj 	size_t		si_copybuf_req;
13212f080e7Smrj 	off_t		si_buf_offset;
13312f080e7Smrj 	struct as	*si_asp;
13412f080e7Smrj 	uint_t		si_sgl_size;
13512f080e7Smrj } rootnex_sglinfo_t;
13612f080e7Smrj 
13712f080e7Smrj /*
13812f080e7Smrj  * When we have to use the copy buffer, we allocate one of these structures per
13912f080e7Smrj  * buffer page to track which pages need the copy buffer, what the kernel
14012f080e7Smrj  * virtual address is (which the device can't reach), and what the copy buffer
14112f080e7Smrj  * virtual address is (where the device dma's to/from). For 32-bit kernels,
14212f080e7Smrj  * since we can't use seg kpm, we also need to keep the page_t around and state
14312f080e7Smrj  * if we've currently mapped in the page into KVA space for buffers which don't
14412f080e7Smrj  * have kva already and when we have multiple windows because we used up all our
14512f080e7Smrj  * copy buffer space.
14612f080e7Smrj  */
14712f080e7Smrj typedef struct rootnex_pgmap_s {
14812f080e7Smrj 	boolean_t	pm_uses_copybuf;
14912f080e7Smrj #if !defined(__amd64)
15012f080e7Smrj 	boolean_t	pm_mapped;
15112f080e7Smrj 	page_t		*pm_pp;
15212f080e7Smrj 	caddr_t		pm_vaddr;
15312f080e7Smrj #endif
15412f080e7Smrj 	caddr_t		pm_kaddr;
15512f080e7Smrj 	caddr_t		pm_cbaddr;
15612f080e7Smrj } rootnex_pgmap_t;
15712f080e7Smrj 
15812f080e7Smrj /*
15912f080e7Smrj  * We only need to trim a buffer when we have multiple windows. Each window has
16012f080e7Smrj  * trim state. We might have trimmed the end of the previous window, leaving the
16112f080e7Smrj  * first cookie of this window trimmed[tr_trim_first] (which basically means we
16212f080e7Smrj  * won't start with a new cookie), or we might need to trim the end of the
16312f080e7Smrj  * current window [tr_trim_last] (which basically means we won't end with a
16412f080e7Smrj  * complete cookie). We keep the same state for the first & last cookie in a
16512f080e7Smrj  * window (a window can have one or more cookies). However, when we trim the
16612f080e7Smrj  * last cookie, we keep a pointer to the last cookie in the trim state since we
16712f080e7Smrj  * only need this info when we trim. The pointer to the first cookie in the
16812f080e7Smrj  * window is in the window state since we need to know what the first cookie in
16912f080e7Smrj  * the window is in various places.
17012f080e7Smrj  *
17112f080e7Smrj  * If we do trim a cookie, we save away the physical address and size of the
17212f080e7Smrj  * cookie so that we can over write the cookie when we switch windows (the
17312f080e7Smrj  * space for a cookie which is in two windows is shared between the windows.
17412f080e7Smrj  * We keep around the same information for the last page in a window.
17512f080e7Smrj  *
17612f080e7Smrj  * if we happened to trim on a page that uses the copy buffer, and that page
17712f080e7Smrj  * is also in the middle of a window boundary because we have filled up the
17812f080e7Smrj  * copy buffer, we need to remember the copy buffer address for both windows
17912f080e7Smrj  * since the same page will have different copy buffer addresses in the two
18012f080e7Smrj  * windows. We need to due the same for kaddr in the 32-bit kernel since we
18112f080e7Smrj  * have a limited kva space which we map to.
18212f080e7Smrj  */
18312f080e7Smrj typedef struct rootnex_trim_s {
18412f080e7Smrj 	boolean_t		tr_trim_first;
18512f080e7Smrj 	boolean_t		tr_trim_last;
18612f080e7Smrj 	ddi_dma_cookie_t	*tr_last_cookie;
18712f080e7Smrj 	uint64_t		tr_first_paddr;
18812f080e7Smrj 	uint64_t		tr_last_paddr;
18912f080e7Smrj 	size_t			tr_first_size;
19012f080e7Smrj 	size_t			tr_last_size;
19112f080e7Smrj 
19212f080e7Smrj 	boolean_t		tr_first_copybuf_win;
19312f080e7Smrj 	boolean_t		tr_last_copybuf_win;
19412f080e7Smrj 	uint_t			tr_first_pidx;
19512f080e7Smrj 	uint_t			tr_last_pidx;
19612f080e7Smrj 	caddr_t			tr_first_cbaddr;
19712f080e7Smrj 	caddr_t			tr_last_cbaddr;
19812f080e7Smrj #if !defined(__amd64)
19912f080e7Smrj 	caddr_t			tr_first_kaddr;
20012f080e7Smrj 	caddr_t			tr_last_kaddr;
20112f080e7Smrj #endif
20212f080e7Smrj } rootnex_trim_t;
20312f080e7Smrj 
20412f080e7Smrj /*
20512f080e7Smrj  * per window state. A bound DMA handle can have multiple windows. Each window
20612f080e7Smrj  * will have the following state. We track if this window needs to sync,
20712f080e7Smrj  * the offset into the buffer where the window starts, the size of the window.
20812f080e7Smrj  * a pointer to the first cookie in the window, the number of cookies in the
20912f080e7Smrj  * window, and the trim state for the window. For the 32-bit kernel, we keep
21012f080e7Smrj  * track of if we need to remap the copy buffer when we switch to a this window
21112f080e7Smrj  */
21212f080e7Smrj typedef struct rootnex_window_s {
21312f080e7Smrj 	boolean_t		wd_dosync;
21412f080e7Smrj 	uint_t			wd_cookie_cnt;
21512f080e7Smrj 	off_t			wd_offset;
21612f080e7Smrj 	size_t			wd_size;
21712f080e7Smrj 	ddi_dma_cookie_t	*wd_first_cookie;
21812f080e7Smrj 	rootnex_trim_t		wd_trim;
21912f080e7Smrj #if !defined(__amd64)
22012f080e7Smrj 	boolean_t		wd_remap_copybuf;
22112f080e7Smrj #endif
22212f080e7Smrj } rootnex_window_t;
22312f080e7Smrj 
22412f080e7Smrj /* per dma handle private state */
22512f080e7Smrj typedef struct rootnex_dma_s {
22612f080e7Smrj 	/*
22712f080e7Smrj 	 * sgl related state used to build and describe the sgl.
22812f080e7Smrj 	 *
22912f080e7Smrj 	 * dp_partial_required - used in the bind slow path to identify if we
23012f080e7Smrj 	 *    need to do a partial mapping or not.
23112f080e7Smrj 	 * dp_trim_required - used in the bind slow path to identify if we
23212f080e7Smrj 	 *    need to trim when switching to a new window. This should only be
23312f080e7Smrj 	 *    set when partial is set.
23412f080e7Smrj 	 * dp_granularity_power_2 - set in alloc handle and used in bind slow
23512f080e7Smrj 	 *    path to determine if we & or % to calculate the trim.
23612f080e7Smrj 	 * dp_dma - copy of dma "object" passed in during bind
23712f080e7Smrj 	 * dp_maxxfer - trimmed dma_attr_maxxfer so that it is a whole
23812f080e7Smrj 	 *    multiple of granularity
23912f080e7Smrj 	 * dp_sglinfo - See rootnex_sglinfo_t above.
24012f080e7Smrj 	 */
24112f080e7Smrj 	boolean_t		dp_partial_required;
24212f080e7Smrj 	boolean_t		dp_trim_required;
24312f080e7Smrj 	boolean_t		dp_granularity_power_2;
24412f080e7Smrj 	uint64_t		dp_maxxfer;
24550200e77SFrank Van Der Linden 
24650200e77SFrank Van Der Linden 	boolean_t		dp_dvma_used;
24712f080e7Smrj 	ddi_dma_obj_t		dp_dma;
24850200e77SFrank Van Der Linden 	ddi_dma_obj_t		dp_dvma;
24912f080e7Smrj 	rootnex_sglinfo_t	dp_sglinfo;
25012f080e7Smrj 
25112f080e7Smrj 	/*
25212f080e7Smrj 	 * Copy buffer related state
25312f080e7Smrj 	 *
25412f080e7Smrj 	 * dp_copybuf_size - the actual size of the copy buffer that we are
25512f080e7Smrj 	 *    using. This can be smaller that dp_copybuf_req, i.e. bind size >
25612f080e7Smrj 	 *    max copy buffer size.
25712f080e7Smrj 	 * dp_cbaddr - kernel address of copy buffer. Used to determine where
25812f080e7Smrj 	 *    where to copy to/from.
25912f080e7Smrj 	 * dp_cbsize - the "real" size returned from the copy buffer alloc.
26012f080e7Smrj 	 *    Set in the copybuf alloc and used to free copybuf.
26112f080e7Smrj 	 * dp_pgmap - page map used in sync to determine which pages in the
26212f080e7Smrj 	 *    buffer use the copy buffer and what addresses to use to copy to/
26312f080e7Smrj 	 *    from.
26412f080e7Smrj 	 * dp_cb_remaping - status if this bind causes us to have to remap
26512f080e7Smrj 	 *    the copybuf when switching to new windows. This is only used in
26612f080e7Smrj 	 *    the 32-bit kernel since we use seg kpm in the 64-bit kernel for
26712f080e7Smrj 	 *    this case.
26812f080e7Smrj 	 * dp_kva - kernel heap arena vmem space for mapping to buffers which
26912f080e7Smrj 	 *    we don't have a kernel VA to bcopy to/from. This is only used in
27012f080e7Smrj 	 *    the 32-bit kernel since we use seg kpm in the 64-bit kernel for
27112f080e7Smrj 	 *    this case.
27212f080e7Smrj 	 */
27312f080e7Smrj 	size_t			dp_copybuf_size;
27412f080e7Smrj 	caddr_t			dp_cbaddr;
27512f080e7Smrj 	size_t			dp_cbsize;
27612f080e7Smrj 	rootnex_pgmap_t		*dp_pgmap;
27712f080e7Smrj #if !defined(__amd64)
27812f080e7Smrj 	boolean_t		dp_cb_remaping;
27912f080e7Smrj 	caddr_t			dp_kva;
28012f080e7Smrj #endif
28112f080e7Smrj 
28212f080e7Smrj 	/*
28312f080e7Smrj 	 * window related state. The pointer to the window state array which may
28412f080e7Smrj 	 * be a pointer into the pre allocated state, or we may have had to
28512f080e7Smrj 	 * allocate the window array on the fly because it wouldn't fit. If
28612f080e7Smrj 	 * we allocate it, we'll use dp_need_to_free_window and dp_window_size
28712f080e7Smrj 	 * during cleanup. dp_current_win keeps track of the current window.
28812f080e7Smrj 	 * dp_max_win is the maximum number of windows we could have.
28912f080e7Smrj 	 */
29012f080e7Smrj 	uint_t			dp_current_win;
29112f080e7Smrj 	rootnex_window_t	*dp_window;
29212f080e7Smrj 	boolean_t		dp_need_to_free_window;
29312f080e7Smrj 	uint_t			dp_window_size;
29412f080e7Smrj 	uint_t			dp_max_win;
29512f080e7Smrj 
29612f080e7Smrj 	/* dip of driver which "owns" handle. set to rdip in alloc_handle() */
29712f080e7Smrj 	dev_info_t		*dp_dip;
29812f080e7Smrj 
29912f080e7Smrj 	/*
30012f080e7Smrj 	 * dp_mutex and dp_inuse are only used to see if a driver is trying to
30112f080e7Smrj 	 * bind to an already bound dma handle. dp_mutex only used for dp_inuse
30212f080e7Smrj 	 */
30312f080e7Smrj 	kmutex_t		dp_mutex;
30412f080e7Smrj 	boolean_t		dp_inuse;
30512f080e7Smrj 
30612f080e7Smrj 	/*
30712f080e7Smrj 	 * cookie related state. The pointer to the cookies (dp_cookies) may
30812f080e7Smrj 	 * be a pointer into the pre allocated state, or we may have had to
30912f080e7Smrj 	 * allocate the cookie array on the fly because it wouldn't fit. If
31012f080e7Smrj 	 * we allocate it, we'll use dp_need_to_free_cookie and dp_cookie_size
31112f080e7Smrj 	 * during cleanup. dp_current_cookie is only used in the obsoleted
31212f080e7Smrj 	 * interfaces to determine when we've used up all the cookies in a
31312f080e7Smrj 	 * window during nextseg()..
31412f080e7Smrj 	 */
31512f080e7Smrj 	size_t			dp_cookie_size;
31612f080e7Smrj 	ddi_dma_cookie_t	*dp_cookies;
31712f080e7Smrj 	boolean_t		dp_need_to_free_cookie;
31812f080e7Smrj 	uint_t			dp_current_cookie; /* for obsoleted I/Fs */
31994f1124eSVikram Hegde 	ddi_dma_cookie_t	*dp_saved_cookies;
32094f1124eSVikram Hegde 	boolean_t		dp_need_to_switch_cookies;
32112f080e7Smrj 
32250200e77SFrank Van Der Linden 	void			*dp_iommu_private;
32350200e77SFrank Van Der Linden 
32412f080e7Smrj 	/*
32512f080e7Smrj 	 * pre allocated space for the bind state, allocated during alloc
32612f080e7Smrj 	 * handle. For a lot of devices, this will save us from having to do
32712f080e7Smrj 	 * kmem_alloc's during the bind most of the time. kmem_alloc's can be
32812f080e7Smrj 	 * expensive on x86 when the cpu count goes up since xcalls are
32912f080e7Smrj 	 * expensive on x86.
33012f080e7Smrj 	 */
33112f080e7Smrj 	uchar_t			*dp_prealloc_buffer;
33286c1f4dcSVikram Hegde 
33386c1f4dcSVikram Hegde 	/*
33494f1124eSVikram Hegde 	 * sleep flags set on bind and unset on unbind
33594f1124eSVikram Hegde 	 */
33694f1124eSVikram Hegde 	int			dp_sleep_flags;
33712f080e7Smrj } rootnex_dma_t;
33812f080e7Smrj 
33912f080e7Smrj /*
34012f080e7Smrj  * profile/performance counters. Most things will be dtrace probes, but there
34112f080e7Smrj  * are a couple of things we want to keep track all the time. We track the
34212f080e7Smrj  * total number of active handles and binds (i.e. an alloc without a free or
34312f080e7Smrj  * a bind without an unbind) since rootnex attach. We also track the total
34412f080e7Smrj  * number of binds which have failed since rootnex attach.
34512f080e7Smrj  */
34612f080e7Smrj typedef enum {
34712f080e7Smrj 	ROOTNEX_CNT_ACTIVE_HDLS = 0,
34812f080e7Smrj 	ROOTNEX_CNT_ACTIVE_BINDS = 1,
34912f080e7Smrj 	ROOTNEX_CNT_ALLOC_FAIL = 2,
35012f080e7Smrj 	ROOTNEX_CNT_BIND_FAIL = 3,
35112f080e7Smrj 	ROOTNEX_CNT_SYNC_FAIL = 4,
35212f080e7Smrj 	ROOTNEX_CNT_GETWIN_FAIL = 5,
35312f080e7Smrj 
35412f080e7Smrj 	/* This one must be last */
35512f080e7Smrj 	ROOTNEX_CNT_LAST
35612f080e7Smrj } rootnex_cnt_t;
35712f080e7Smrj 
35812f080e7Smrj /*
35912f080e7Smrj  * global driver state.
36012f080e7Smrj  *   r_dmahdl_cache - dma_handle kmem_cache
36112f080e7Smrj  *   r_dvma_call_list_id - ddi_set_callback() id
36212f080e7Smrj  *   r_peekpoke_mutex - serialize peeks and pokes.
36312f080e7Smrj  *   r_dip - rootnex dip
36412f080e7Smrj  *   r_reserved_msg_printed - ctlops reserve message threshold
36512f080e7Smrj  *   r_counters - profile/performance counters
36612f080e7Smrj  */
36712f080e7Smrj typedef struct rootnex_state_s {
36812f080e7Smrj 	uint_t			r_prealloc_cookies;
36912f080e7Smrj 	uint_t			r_prealloc_size;
37012f080e7Smrj 	kmem_cache_t		*r_dmahdl_cache;
37112f080e7Smrj 	uintptr_t		r_dvma_call_list_id;
37212f080e7Smrj 	kmutex_t		r_peekpoke_mutex;
37312f080e7Smrj 	dev_info_t		*r_dip;
3747aec1d6eScindi 	ddi_iblock_cookie_t	r_err_ibc;
37512f080e7Smrj 	boolean_t		r_reserved_msg_printed;
37612f080e7Smrj 	uint64_t		r_counters[ROOTNEX_CNT_LAST];
37720906b23SVikram Hegde 	iommulib_nexhandle_t    r_iommulib_handle;
37812f080e7Smrj } rootnex_state_t;
37912f080e7Smrj 
38012f080e7Smrj #ifdef	__cplusplus
38112f080e7Smrj }
38212f080e7Smrj #endif
38312f080e7Smrj 
38412f080e7Smrj #endif	/* _SYS_ROOTNEX_H */
385