xref: /titanic_44/usr/src/uts/i86pc/sys/rootnex.h (revision 8956713aded83a741173fcd4f9ef1c83521fbea9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_ROOTNEX_H
27 #define	_SYS_ROOTNEX_H
28 
29 /*
30  * x86 root nexus implementation specific state
31  */
32 
33 #include <sys/types.h>
34 #include <sys/conf.h>
35 #include <sys/modctl.h>
36 #include <sys/sunddi.h>
37 #include <sys/iommulib.h>
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 
44 /* size of buffer used for ctlop reportdev */
45 #define	REPORTDEV_BUFSIZE	1024
46 
47 /* min and max interrupt vectors */
48 #define	VEC_MIN			1
49 #define	VEC_MAX			255
50 
51 /* atomic increment/decrement to keep track of outstanding binds, etc */
52 #define	ROOTNEX_PROF_INC(addr)		atomic_inc_64(addr)
53 #define	ROOTNEX_PROF_DEC(addr)		atomic_add_64(addr, -1)
54 
55 /* set in dmac_type to signify that this cookie uses the copy buffer */
56 #define	ROOTNEX_USES_COPYBUF		0x80000000
57 
58 /*
59  * integer or boolean property name and value. A few static rootnex properties
60  * are created during rootnex attach from an array of rootnex_intprop_t..
61  */
62 typedef struct rootnex_intprop_s {
63 	char	*prop_name;
64 	int	prop_value;
65 } rootnex_intprop_t;
66 
67 /*
68  * sgl related information which is visible to rootnex_get_sgl(). Trying to
69  * isolate get_sgl() as much as possible so it can be easily replaced.
70  */
71 typedef struct rootnex_sglinfo_s {
72 	/*
73 	 * These are passed into rootnex_get_sgl().
74 	 *
75 	 * si_min_addr - the minimum physical address
76 	 * si_max_addr - the maximum physical address
77 	 * si_max_cookie_size - the maximum size of a physically contiguous
78 	 *    piece of memory that we can handle in a sgl.
79 	 * si_segmask - segment mask to determine if we cross a segment boundary
80 	 * si_max_pages - max number of pages this sgl could occupy (which
81 	 *    is also the maximum number of cookies we might see.
82 	 */
83 	uint64_t	si_min_addr;
84 	uint64_t	si_max_addr;
85 	uint64_t	si_max_cookie_size;
86 	uint64_t	si_segmask;
87 	uint_t		si_max_pages;
88 
89 	/*
90 	 * these are returned by rootnex_get_sgl()
91 	 *
92 	 * si_copybuf_req - amount of copy buffer needed by the buffer.
93 	 * si_buf_offset - The initial offset into the first page of the buffer.
94 	 *    It's set in get sgl and used in the bind slow path to help
95 	 *    calculate the current page index & offset from the current offset
96 	 *    which is relative to the start of the buffer.
97 	 * si_asp - address space of buffer passed in.
98 	 * si_sgl_size - The actual number of cookies in the sgl. This does
99 	 *    not reflect and sharing that we might do on window boundaries.
100 	 */
101 	size_t		si_copybuf_req;
102 	off_t		si_buf_offset;
103 	struct as	*si_asp;
104 	uint_t		si_sgl_size;
105 } rootnex_sglinfo_t;
106 
107 /*
108  * When we have to use the copy buffer, we allocate one of these structures per
109  * buffer page to track which pages need the copy buffer, what the kernel
110  * virtual address is (which the device can't reach), and what the copy buffer
111  * virtual address is (where the device dma's to/from). For 32-bit kernels,
112  * since we can't use seg kpm, we also need to keep the page_t around and state
113  * if we've currently mapped in the page into KVA space for buffers which don't
114  * have kva already and when we have multiple windows because we used up all our
115  * copy buffer space.
116  */
117 typedef struct rootnex_pgmap_s {
118 	boolean_t	pm_uses_copybuf;
119 #if !defined(__amd64)
120 	boolean_t	pm_mapped;
121 	page_t		*pm_pp;
122 	caddr_t		pm_vaddr;
123 #endif
124 	caddr_t		pm_kaddr;
125 	caddr_t		pm_cbaddr;
126 } rootnex_pgmap_t;
127 
128 /*
129  * We only need to trim a buffer when we have multiple windows. Each window has
130  * trim state. We might have trimmed the end of the previous window, leaving the
131  * first cookie of this window trimmed[tr_trim_first] (which basically means we
132  * won't start with a new cookie), or we might need to trim the end of the
133  * current window [tr_trim_last] (which basically means we won't end with a
134  * complete cookie). We keep the same state for the first & last cookie in a
135  * window (a window can have one or more cookies). However, when we trim the
136  * last cookie, we keep a pointer to the last cookie in the trim state since we
137  * only need this info when we trim. The pointer to the first cookie in the
138  * window is in the window state since we need to know what the first cookie in
139  * the window is in various places.
140  *
141  * If we do trim a cookie, we save away the physical address and size of the
142  * cookie so that we can over write the cookie when we switch windows (the
143  * space for a cookie which is in two windows is shared between the windows.
144  * We keep around the same information for the last page in a window.
145  *
146  * if we happened to trim on a page that uses the copy buffer, and that page
147  * is also in the middle of a window boundary because we have filled up the
148  * copy buffer, we need to remember the copy buffer address for both windows
149  * since the same page will have different copy buffer addresses in the two
150  * windows. We need to due the same for kaddr in the 32-bit kernel since we
151  * have a limited kva space which we map to.
152  */
153 typedef struct rootnex_trim_s {
154 	boolean_t		tr_trim_first;
155 	boolean_t		tr_trim_last;
156 	ddi_dma_cookie_t	*tr_last_cookie;
157 	uint64_t		tr_first_paddr;
158 	uint64_t		tr_last_paddr;
159 	size_t			tr_first_size;
160 	size_t			tr_last_size;
161 
162 	boolean_t		tr_first_copybuf_win;
163 	boolean_t		tr_last_copybuf_win;
164 	uint_t			tr_first_pidx;
165 	uint_t			tr_last_pidx;
166 	caddr_t			tr_first_cbaddr;
167 	caddr_t			tr_last_cbaddr;
168 #if !defined(__amd64)
169 	caddr_t			tr_first_kaddr;
170 	caddr_t			tr_last_kaddr;
171 #endif
172 } rootnex_trim_t;
173 
174 /*
175  * per window state. A bound DMA handle can have multiple windows. Each window
176  * will have the following state. We track if this window needs to sync,
177  * the offset into the buffer where the window starts, the size of the window.
178  * a pointer to the first cookie in the window, the number of cookies in the
179  * window, and the trim state for the window. For the 32-bit kernel, we keep
180  * track of if we need to remap the copy buffer when we switch to a this window
181  */
182 typedef struct rootnex_window_s {
183 	boolean_t		wd_dosync;
184 	uint_t			wd_cookie_cnt;
185 	off_t			wd_offset;
186 	size_t			wd_size;
187 	ddi_dma_cookie_t	*wd_first_cookie;
188 	rootnex_trim_t		wd_trim;
189 #if !defined(__amd64)
190 	boolean_t		wd_remap_copybuf;
191 #endif
192 } rootnex_window_t;
193 
194 /* per dma handle private state */
195 typedef struct rootnex_dma_s {
196 	/*
197 	 * sgl related state used to build and describe the sgl.
198 	 *
199 	 * dp_partial_required - used in the bind slow path to identify if we
200 	 *    need to do a partial mapping or not.
201 	 * dp_trim_required - used in the bind slow path to identify if we
202 	 *    need to trim when switching to a new window. This should only be
203 	 *    set when partial is set.
204 	 * dp_granularity_power_2 - set in alloc handle and used in bind slow
205 	 *    path to determine if we & or % to calculate the trim.
206 	 * dp_dma - copy of dma "object" passed in during bind
207 	 * dp_maxxfer - trimmed dma_attr_maxxfer so that it is a whole
208 	 *    multiple of granularity
209 	 * dp_sglinfo - See rootnex_sglinfo_t above.
210 	 */
211 	boolean_t		dp_partial_required;
212 	boolean_t		dp_trim_required;
213 	boolean_t		dp_granularity_power_2;
214 	uint64_t		dp_maxxfer;
215 	ddi_dma_obj_t		dp_dma;
216 	rootnex_sglinfo_t	dp_sglinfo;
217 
218 	/*
219 	 * Copy buffer related state
220 	 *
221 	 * dp_copybuf_size - the actual size of the copy buffer that we are
222 	 *    using. This can be smaller that dp_copybuf_req, i.e. bind size >
223 	 *    max copy buffer size.
224 	 * dp_cbaddr - kernel address of copy buffer. Used to determine where
225 	 *    where to copy to/from.
226 	 * dp_cbsize - the "real" size returned from the copy buffer alloc.
227 	 *    Set in the copybuf alloc and used to free copybuf.
228 	 * dp_pgmap - page map used in sync to determine which pages in the
229 	 *    buffer use the copy buffer and what addresses to use to copy to/
230 	 *    from.
231 	 * dp_cb_remaping - status if this bind causes us to have to remap
232 	 *    the copybuf when switching to new windows. This is only used in
233 	 *    the 32-bit kernel since we use seg kpm in the 64-bit kernel for
234 	 *    this case.
235 	 * dp_kva - kernel heap arena vmem space for mapping to buffers which
236 	 *    we don't have a kernel VA to bcopy to/from. This is only used in
237 	 *    the 32-bit kernel since we use seg kpm in the 64-bit kernel for
238 	 *    this case.
239 	 */
240 	size_t			dp_copybuf_size;
241 	caddr_t			dp_cbaddr;
242 	size_t			dp_cbsize;
243 	rootnex_pgmap_t		*dp_pgmap;
244 #if !defined(__amd64)
245 	boolean_t		dp_cb_remaping;
246 	caddr_t			dp_kva;
247 #endif
248 
249 	/*
250 	 * window related state. The pointer to the window state array which may
251 	 * be a pointer into the pre allocated state, or we may have had to
252 	 * allocate the window array on the fly because it wouldn't fit. If
253 	 * we allocate it, we'll use dp_need_to_free_window and dp_window_size
254 	 * during cleanup. dp_current_win keeps track of the current window.
255 	 * dp_max_win is the maximum number of windows we could have.
256 	 */
257 	uint_t			dp_current_win;
258 	rootnex_window_t	*dp_window;
259 	boolean_t		dp_need_to_free_window;
260 	uint_t			dp_window_size;
261 	uint_t			dp_max_win;
262 
263 	/* dip of driver which "owns" handle. set to rdip in alloc_handle() */
264 	dev_info_t		*dp_dip;
265 
266 	/*
267 	 * dp_mutex and dp_inuse are only used to see if a driver is trying to
268 	 * bind to an already bound dma handle. dp_mutex only used for dp_inuse
269 	 */
270 	kmutex_t		dp_mutex;
271 	boolean_t		dp_inuse;
272 
273 	/*
274 	 * cookie related state. The pointer to the cookies (dp_cookies) may
275 	 * be a pointer into the pre allocated state, or we may have had to
276 	 * allocate the cookie array on the fly because it wouldn't fit. If
277 	 * we allocate it, we'll use dp_need_to_free_cookie and dp_cookie_size
278 	 * during cleanup. dp_current_cookie is only used in the obsoleted
279 	 * interfaces to determine when we've used up all the cookies in a
280 	 * window during nextseg()..
281 	 */
282 	size_t			dp_cookie_size;
283 	ddi_dma_cookie_t	*dp_cookies;
284 	boolean_t		dp_need_to_free_cookie;
285 	uint_t			dp_current_cookie; /* for obsoleted I/Fs */
286 	ddi_dma_cookie_t	*dp_saved_cookies;
287 	boolean_t		dp_need_to_switch_cookies;
288 
289 	/*
290 	 * pre allocated space for the bind state, allocated during alloc
291 	 * handle. For a lot of devices, this will save us from having to do
292 	 * kmem_alloc's during the bind most of the time. kmem_alloc's can be
293 	 * expensive on x86 when the cpu count goes up since xcalls are
294 	 * expensive on x86.
295 	 */
296 	uchar_t			*dp_prealloc_buffer;
297 
298 	/*
299 	 * intel iommu related state
300 	 * dvma_cookies saves the dvma allocated for this handler, it has the
301 	 * size of si_max_pages, set when bind handler and freed when unbind
302 	 */
303 	void			*dp_dvma_cookies;
304 
305 	/*
306 	 * sleep flags set on bind and unset on unbind
307 	 */
308 	int			dp_sleep_flags;
309 } rootnex_dma_t;
310 
311 /*
312  * profile/performance counters. Most things will be dtrace probes, but there
313  * are a couple of things we want to keep track all the time. We track the
314  * total number of active handles and binds (i.e. an alloc without a free or
315  * a bind without an unbind) since rootnex attach. We also track the total
316  * number of binds which have failed since rootnex attach.
317  */
318 typedef enum {
319 	ROOTNEX_CNT_ACTIVE_HDLS = 0,
320 	ROOTNEX_CNT_ACTIVE_BINDS = 1,
321 	ROOTNEX_CNT_ALLOC_FAIL = 2,
322 	ROOTNEX_CNT_BIND_FAIL = 3,
323 	ROOTNEX_CNT_SYNC_FAIL = 4,
324 	ROOTNEX_CNT_GETWIN_FAIL = 5,
325 
326 	/* This one must be last */
327 	ROOTNEX_CNT_LAST
328 } rootnex_cnt_t;
329 
330 /*
331  * global driver state.
332  *   r_dmahdl_cache - dma_handle kmem_cache
333  *   r_dvma_call_list_id - ddi_set_callback() id
334  *   r_peekpoke_mutex - serialize peeks and pokes.
335  *   r_dip - rootnex dip
336  *   r_reserved_msg_printed - ctlops reserve message threshold
337  *   r_counters - profile/performance counters
338  *   r_intel_iommu_enabled - intel iommu enabled
339  */
340 typedef struct rootnex_state_s {
341 	uint_t			r_prealloc_cookies;
342 	uint_t			r_prealloc_size;
343 	kmem_cache_t		*r_dmahdl_cache;
344 	uintptr_t		r_dvma_call_list_id;
345 	kmutex_t		r_peekpoke_mutex;
346 	dev_info_t		*r_dip;
347 	ddi_iblock_cookie_t	r_err_ibc;
348 	boolean_t		r_reserved_msg_printed;
349 	uint64_t		r_counters[ROOTNEX_CNT_LAST];
350 	boolean_t		r_intel_iommu_enabled;
351 	iommulib_nexhandle_t    r_iommulib_handle;
352 } rootnex_state_t;
353 
354 
355 #ifdef	__cplusplus
356 }
357 #endif
358 
359 #endif	/* _SYS_ROOTNEX_H */
360