xref: /titanic_52/usr/src/uts/i86pc/sys/ioat.h (revision b9bd317cda1afb3a01f4812de73e8cec888cbbd7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #ifndef _SYS_IOAT_H
28 #define	_SYS_IOAT_H
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 #include <sys/types.h>
37 #include <sys/dcopy.h>
38 #include <sys/dcopy_device.h>
39 
40 
41 /* ioat ioctls */
42 #define	IOATIOC			('T'<< 8)
43 typedef enum {
44 	IOAT_IOCTL_WRITE_REG	= (IOATIOC | 0x0),
45 	IOAT_IOCTL_READ_REG	= (IOATIOC | 0x1),
46 	IOAT_IOCTL_TEST		= (IOATIOC | 0x2)
47 } ioat_ioctl_enum_t;
48 
49 typedef struct ioat_ioctl_reg_s {
50 	uint_t		size;
51 	uint_t		addr;
52 	uint64_t	data;
53 } ioat_ioctl_reg_t;
54 typedef ioat_ioctl_reg_t ioat_ioctl_wrreg_t;
55 typedef ioat_ioctl_reg_t ioat_ioctl_rdreg_t;
56 
57 #ifdef _KERNEL
58 /* *** Driver Private Below *** */
59 
60 /* IOAT_DMACAPABILITY flags */
61 #define	IOAT_DMACAP_PAGEBREAK	0x1
62 #define	IOAT_DMACAP_CRC		0x2
63 #define	IOAT_DMACAP_MARKERSKIP	0x4
64 #define	IOAT_DMACAP_XOR		0x8
65 #define	IOAT_DMACAP_DCA		0x10
66 
67 /* IOAT_INTRCTL bits */
68 #define	IOAT_INTRCTL_MASTER_EN	0x1
69 #define	IOAT_INTRCTL_INTR_STAT	0x2
70 
71 /* MMIO Registers */
72 #define	IOAT_CHANCNT		0x0	/* 8-bit */
73 #define	IOAT_XFERCAP		0x1	/* 8-bit */
74 #define	IOAT_GENCTRL		0x2	/* 8-bit */
75 #define	IOAT_INTRCTL		0x3	/* 8-bit */
76 #define	IOAT_ATTNSTATUS		0x4	/* 32-bit */
77 #define	IOAT_CBVER		0x8	/* 8-bit */
78 #define	IOAT_PERPORT_OFF	0xA	/* 16-bit */
79 #define	IOAT_INTRDELAY		0xC	/* 16-bit */
80 #define	IOAT_CSSTATUS		0xE	/* 16-bit */
81 #define	IOAT_DMACAPABILITY	0x10	/* 32-bit */
82 
83 #define	IOAT_CHANNELREG_OFFSET	0x80
84 
85 /* Channel Registers */
86 #define	IOAT_CHAN_CTL		0x0	/* 16-bit */
87 #define	IOAT_CHAN_COMP		0x2	/* 16-bit */
88 #define	IOAT_CHAN_CMPL_LO	0x18	/* 32-bit */
89 #define	IOAT_CHAN_CMPL_HI	0x1C	/* 32-bit */
90 #define	IOAT_CHAN_ERR		0x28	/* 32-bit */
91 #define	IOAT_CHAN_ERRMASK	0x2C	/* 32-bit */
92 #define	IOAT_CHAN_DCACTRL	0x30	/* 32-bit */
93 
94 #define	IOAT_V1_CHAN_STS_LO	0x4	/* 32-bit */
95 #define	IOAT_V1_CHAN_STS_HI	0x8	/* 32-bit */
96 #define	IOAT_V1_CHAN_ADDR_LO	0x0C	/* 32-bit */
97 #define	IOAT_V1_CHAN_ADDR_HI	0x10	/* 32-bit */
98 #define	IOAT_V1_CHAN_CMD	0x14	/* 8-bit */
99 
100 #define	IOAT_V2_CHAN_CMD	0x4	/* 8-bit */
101 #define	IOAT_V2_CHAN_CNT	0x6	/* 16-bit */
102 #define	IOAT_V2_CHAN_STS_LO	0x8	/* 32-bit */
103 #define	IOAT_V2_CHAN_STS_HI	0xC	/* 32-bit */
104 #define	IOAT_V2_CHAN_ADDR_LO	0x10	/* 32-bit */
105 #define	IOAT_V2_CHAN_ADDR_HI	0x14	/* 32-bit */
106 
107 #define	IOAT_CHAN_STS_ADDR_MASK		0xFFFFFFFFFFFFFFC0
108 #define	IOAT_CHAN_STS_XFER_MASK		0x3F
109 #define	IOAT_CHAN_STS_FAIL_MASK		0x6
110 #define	IOAT_CMPL_INDEX(channel)	\
111 	(((*channel->ic_cmpl & IOAT_CHAN_STS_ADDR_MASK) - \
112 	ring->cr_phys_desc) >> 6)
113 #define	IOAT_CMPL_FAILED(channel)	\
114 	(*channel->ic_cmpl & IOAT_CHAN_STS_FAIL_MASK)
115 
116 
117 typedef struct ioat_chan_desc_s {
118 	uint32_t	dd_res0;
119 	uint32_t	dd_ctrl;
120 	uint64_t	dd_res1;
121 	uint64_t	dd_res2;
122 	uint64_t	dd_next_desc;
123 	uint64_t	dd_res4;
124 	uint64_t	dd_res5;
125 	uint64_t	dd_res6;
126 	uint64_t	dd_res7;
127 } ioat_chan_desc_t;
128 
129 /* dca dd_ctrl bits */
130 #define	IOAT_DESC_CTRL_OP_CNTX	((uint32_t)0xFF << 24)
131 #define	IOAT_DESC_CTRL_CNTX_CHNG	0x1
132 typedef struct ioat_chan_dca_desc_s {
133 	uint32_t	dd_cntx;
134 	uint32_t	dd_ctrl;
135 	uint64_t	dd_res1;
136 	uint64_t	dd_res2;
137 	uint64_t	dd_next_desc;
138 	uint64_t	dd_res4;
139 	uint64_t	dd_res5;
140 	uint64_t	dd_res6;
141 	uint64_t	dd_res7;
142 } ioat_chan_dca_desc_t;
143 
144 /* dma dd_ctrl bits */
145 #define	IOAT_DESC_CTRL_OP_DMA	(0x0 << 24)
146 #define	IOAT_DESC_DMACTRL_NULL	0x20
147 #define	IOAT_DESC_CTRL_FENCE	0x10
148 #define	IOAT_DESC_CTRL_CMPL	0x8
149 #define	IOAT_DESC_CTRL_INTR	0x1
150 typedef struct ioat_chan_dma_desc_s {
151 	uint32_t	dd_size;
152 	uint32_t	dd_ctrl;
153 	uint64_t	dd_src_paddr;
154 	uint64_t	dd_dest_paddr;
155 	uint64_t	dd_next_desc;
156 	uint64_t	dd_next_src_paddr;	/* v2 only */
157 	uint64_t	dd_next_dest_paddr;	/* v2 only */
158 	uint64_t	dd_res6;
159 	uint64_t	dd_res7;
160 } ioat_chan_dma_desc_t;
161 
162 
163 typedef enum {
164 	IOAT_CBv1,
165 	IOAT_CBv2
166 } ioat_version_t;
167 
168 /* ioat private data per command */
169 typedef struct ioat_cmd_private_s {
170 	uint64_t	ip_generation;
171 	uint64_t	ip_index;
172 	dcopy_cmd_t	ip_next;
173 } ioat_cmd_private_t;
174 
175 /* descriptor ring state */
176 typedef struct ioat_channel_ring_s {
177 	/* protects cr_cmpl_gen & cr_cmpl_last */
178 	kmutex_t		cr_cmpl_mutex;
179 
180 	/* desc ring generation for the last completion we saw */
181 	uint64_t		cr_cmpl_gen;
182 
183 	/* last descriptor index we saw complete */
184 	uint64_t		cr_cmpl_last;
185 
186 	/* protects cr_desc_* */
187 	kmutex_t		cr_desc_mutex;
188 
189 	/*
190 	 * last descriptor posted. used to update its next pointer when we
191 	 * add a new desc. Also used to tack the completion (See comment for
192 	 * cr_desc_gen_prev).
193 	 */
194 	uint64_t		cr_desc_prev;
195 
196 	/* where to put the next descriptor */
197 	uint64_t		cr_desc_next;
198 
199 	/* what the current desc ring generation is */
200 	uint64_t		cr_desc_gen;
201 
202 	/*
203 	 * used during cmd_post to track the last desc posted. cr_desc_next
204 	 * and cr_desc_gen will be pointing to the next free desc after
205 	 * writing the descriptor to the ring. But we want to track the
206 	 * completion for the last descriptor posted.
207 	 */
208 	uint64_t		cr_desc_gen_prev;
209 
210 	/* the last desc in the ring (for wrap) */
211 	uint64_t		cr_desc_last;
212 
213 	/* pointer to the head of the ring */
214 	ioat_chan_desc_t	*cr_desc;
215 
216 	/* physical address of the head of the ring */
217 	uint64_t		cr_phys_desc;
218 
219 	/* back pointer to the channel state */
220 	struct ioat_channel_s	*cr_chan;
221 
222 	/* for CB v2, number of desc posted (written to IOAT_V2_CHAN_CNT) */
223 	uint_t			cr_post_cnt;
224 } ioat_channel_ring_t;
225 
226 /* track channel state so we can handle a failure */
227 typedef enum {
228 	IOAT_CHANNEL_OK = 0,
229 	IOAT_CHANNEL_IN_FAILURE = 1
230 } ic_channel_state_t;
231 
232 typedef struct ioat_channel_s *ioat_channel_t;
233 struct ioat_channel_s {
234 	/* channel's ring state */
235 	ioat_channel_ring_t	*ic_ring;
236 
237 	/* IOAT_CBv1 || IOAT_CBv2 */
238 	ioat_version_t		ic_ver;
239 
240 	/*
241 	 * state to determine if it's OK to post the the channel and if all
242 	 * future polls should return failure.
243 	 */
244 	ic_channel_state_t	ic_channel_state;
245 
246 	/* channel command cache (*_cmd_alloc, *_cmd_free, etc) */
247 	kmem_cache_t		*ic_cmd_cache;
248 
249 	/* dcopy state for dcopy_device_channel_notify() call */
250 	dcopy_handle_t		ic_dcopy_handle;
251 
252 	/* location in memory where completions are DMA'ed into */
253 	volatile uint64_t	*ic_cmpl;
254 
255 	/* channel specific registers */
256 	uint8_t			*ic_regs;
257 
258 	/* if this channel is using DCA */
259 	boolean_t		ic_dca_active;
260 
261 	/* DCA ID the channel is currently pointing to */
262 	uint32_t		ic_dca_current;
263 
264 	/* devices channel number */
265 	uint_t			ic_chan_num;
266 
267 	/* number of descriptors in ring */
268 	uint_t			ic_chan_desc_cnt;
269 
270 	/* descriptor ring alloc state */
271 	ddi_dma_handle_t	ic_desc_dma_handle;
272 	size_t			ic_desc_alloc_size;
273 	ddi_acc_handle_t	ic_desc_handle;
274 	ddi_dma_cookie_t	ic_desc_cookies;
275 
276 	/* completion buffer alloc state */
277 	ddi_dma_handle_t	ic_cmpl_dma_handle;
278 	size_t			ic_cmpl_alloc_size;
279 	ddi_acc_handle_t	ic_cmpl_handle;
280 	ddi_dma_cookie_t	ic_cmpl_cookie;
281 	uint64_t		ic_phys_cmpl;
282 
283 	/* if inuse, we need to re-init the channel during resume */
284 	boolean_t		ic_inuse;
285 
286 	/* backpointer to driver state */
287 	struct ioat_state_s	*ic_state;
288 };
289 
290 typedef struct ioat_rs_s *ioat_rs_hdl_t;
291 
292 /* driver state */
293 typedef struct ioat_state_s {
294 	dev_info_t		*is_dip;
295 	int			is_instance;
296 
297 	kmutex_t		is_mutex;
298 
299 	/* register handle and pointer to registers */
300 	ddi_acc_handle_t	is_reg_handle;
301 	uint8_t			*is_genregs;
302 
303 	/* IOAT_CBv1 || IOAT_CBv2 */
304 	ioat_version_t		is_ver;
305 
306 	/* channel state */
307 	ioat_channel_t		is_channel;
308 	size_t			is_chansize;
309 	ioat_rs_hdl_t		is_channel_rs;
310 
311 	ddi_iblock_cookie_t	is_iblock_cookie;
312 
313 	/* device info */
314 	uint_t			is_chanoff;
315 	uint_t			is_num_channels;
316 	uint_t			is_maxxfer;
317 	uint_t			is_cbver;
318 	uint_t			is_intrdelay;
319 	uint_t			is_status;
320 	uint_t			is_capabilities;
321 
322 	/* dcopy_device_register()/dcopy_device_unregister() state */
323 	dcopy_device_handle_t	is_device_handle;
324 	dcopy_device_info_t	is_deviceinfo;
325 } ioat_state_t;
326 
327 
328 int ioat_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred,
329     int *rval);
330 
331 void ioat_rs_init(ioat_state_t *state, uint_t min_val, uint_t max_val,
332     ioat_rs_hdl_t *handle);
333 void ioat_rs_fini(ioat_rs_hdl_t *handle);
334 int ioat_rs_alloc(ioat_rs_hdl_t handle, uint_t *rs);
335 void ioat_rs_free(ioat_rs_hdl_t handle, uint_t rs);
336 
337 int ioat_channel_init(ioat_state_t *state);
338 void ioat_channel_fini(ioat_state_t *state);
339 void ioat_channel_suspend(ioat_state_t *state);
340 int ioat_channel_resume(ioat_state_t *state);
341 
342 int ioat_channel_alloc(void *device_private, dcopy_handle_t handle, int flags,
343     uint_t size, dcopy_query_channel_t *info, void *channel_private);
344 void ioat_channel_free(void *channel_private);
345 void ioat_channel_intr(ioat_channel_t channel);
346 int ioat_cmd_alloc(void *channel, int flags, dcopy_cmd_t *cmd);
347 void ioat_cmd_free(void *channel, dcopy_cmd_t *cmd);
348 int ioat_cmd_post(void *channel, dcopy_cmd_t cmd);
349 int ioat_cmd_poll(void *channel, dcopy_cmd_t cmd);
350 void ioat_unregister_complete(void *device_private, int status);
351 
352 
353 #endif /* _KERNEL */
354 
355 #ifdef __cplusplus
356 }
357 #endif
358 
359 #endif /* _SYS_IOAT_H */
360