xref: /titanic_50/usr/src/uts/i86pc/sys/ioat.h (revision 7c2fbfb345896881c631598ee3852ce9ce33fb07)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #ifndef _SYS_IOAT_H
28 #define	_SYS_IOAT_H
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #include <sys/types.h>
35 #include <sys/dcopy.h>
36 #include <sys/dcopy_device.h>
37 
38 
39 /* ioat ioctls */
40 #define	IOATIOC			('T'<< 8)
41 typedef enum {
42 	IOAT_IOCTL_WRITE_REG	= (IOATIOC | 0x0),
43 	IOAT_IOCTL_READ_REG	= (IOATIOC | 0x1),
44 	IOAT_IOCTL_TEST		= (IOATIOC | 0x2)
45 } ioat_ioctl_enum_t;
46 
47 typedef struct ioat_ioctl_reg_s {
48 	uint_t		size;
49 	uint_t		addr;
50 	uint64_t	data;
51 } ioat_ioctl_reg_t;
52 typedef ioat_ioctl_reg_t ioat_ioctl_wrreg_t;
53 typedef ioat_ioctl_reg_t ioat_ioctl_rdreg_t;
54 
55 #ifdef _KERNEL
56 /* *** Driver Private Below *** */
57 
58 /* IOAT_DMACAPABILITY flags */
59 #define	IOAT_DMACAP_PAGEBREAK	0x1
60 #define	IOAT_DMACAP_CRC		0x2
61 #define	IOAT_DMACAP_MARKERSKIP	0x4
62 #define	IOAT_DMACAP_XOR		0x8
63 #define	IOAT_DMACAP_DCA		0x10
64 
65 /* IOAT_INTRCTL bits */
66 #define	IOAT_INTRCTL_MASTER_EN	0x1
67 #define	IOAT_INTRCTL_INTR_STAT	0x2
68 
69 /* MMIO Registers */
70 #define	IOAT_CHANCNT		0x0	/* 8-bit */
71 #define	IOAT_XFERCAP		0x1	/* 8-bit */
72 #define	IOAT_GENCTRL		0x2	/* 8-bit */
73 #define	IOAT_INTRCTL		0x3	/* 8-bit */
74 #define	IOAT_ATTNSTATUS		0x4	/* 32-bit */
75 #define	IOAT_CBVER		0x8	/* 8-bit */
76 #define	IOAT_PERPORT_OFF	0xA	/* 16-bit */
77 #define	IOAT_INTRDELAY		0xC	/* 16-bit */
78 #define	IOAT_CSSTATUS		0xE	/* 16-bit */
79 #define	IOAT_DMACAPABILITY	0x10	/* 32-bit */
80 
81 #define	IOAT_CHANNELREG_OFFSET	0x80
82 
83 /* Channel Registers */
84 #define	IOAT_CHAN_CTL		0x0	/* 16-bit */
85 #define	IOAT_CHAN_COMP		0x2	/* 16-bit */
86 #define	IOAT_CHAN_CMPL_LO	0x18	/* 32-bit */
87 #define	IOAT_CHAN_CMPL_HI	0x1C	/* 32-bit */
88 #define	IOAT_CHAN_ERR		0x28	/* 32-bit */
89 #define	IOAT_CHAN_ERRMASK	0x2C	/* 32-bit */
90 #define	IOAT_CHAN_DCACTRL	0x30	/* 32-bit */
91 
92 #define	IOAT_V1_CHAN_STS_LO	0x4	/* 32-bit */
93 #define	IOAT_V1_CHAN_STS_HI	0x8	/* 32-bit */
94 #define	IOAT_V1_CHAN_ADDR_LO	0x0C	/* 32-bit */
95 #define	IOAT_V1_CHAN_ADDR_HI	0x10	/* 32-bit */
96 #define	IOAT_V1_CHAN_CMD	0x14	/* 8-bit */
97 
98 #define	IOAT_V2_CHAN_CMD	0x4	/* 8-bit */
99 #define	IOAT_V2_CHAN_CNT	0x6	/* 16-bit */
100 #define	IOAT_V2_CHAN_STS_LO	0x8	/* 32-bit */
101 #define	IOAT_V2_CHAN_STS_HI	0xC	/* 32-bit */
102 #define	IOAT_V2_CHAN_ADDR_LO	0x10	/* 32-bit */
103 #define	IOAT_V2_CHAN_ADDR_HI	0x14	/* 32-bit */
104 
105 #define	IOAT_CHAN_STS_ADDR_MASK		0xFFFFFFFFFFFFFFC0
106 #define	IOAT_CHAN_STS_XFER_MASK		0x3F
107 #define	IOAT_CHAN_STS_FAIL_MASK		0x6
108 #define	IOAT_CMPL_INDEX(channel)	\
109 	(((*channel->ic_cmpl & IOAT_CHAN_STS_ADDR_MASK) - \
110 	ring->cr_phys_desc) >> 6)
111 #define	IOAT_CMPL_FAILED(channel)	\
112 	(*channel->ic_cmpl & IOAT_CHAN_STS_FAIL_MASK)
113 
114 
115 typedef struct ioat_chan_desc_s {
116 	uint32_t	dd_res0;
117 	uint32_t	dd_ctrl;
118 	uint64_t	dd_res1;
119 	uint64_t	dd_res2;
120 	uint64_t	dd_next_desc;
121 	uint64_t	dd_res4;
122 	uint64_t	dd_res5;
123 	uint64_t	dd_res6;
124 	uint64_t	dd_res7;
125 } ioat_chan_desc_t;
126 
127 /* dca dd_ctrl bits */
128 #define	IOAT_DESC_CTRL_OP_CNTX	((uint32_t)0xFF << 24)
129 #define	IOAT_DESC_CTRL_CNTX_CHNG	0x1
130 typedef struct ioat_chan_dca_desc_s {
131 	uint32_t	dd_cntx;
132 	uint32_t	dd_ctrl;
133 	uint64_t	dd_res1;
134 	uint64_t	dd_res2;
135 	uint64_t	dd_next_desc;
136 	uint64_t	dd_res4;
137 	uint64_t	dd_res5;
138 	uint64_t	dd_res6;
139 	uint64_t	dd_res7;
140 } ioat_chan_dca_desc_t;
141 
142 /* dma dd_ctrl bits */
143 #define	IOAT_DESC_CTRL_OP_DMA	(0x0 << 24)
144 #define	IOAT_DESC_DMACTRL_NULL	0x20
145 #define	IOAT_DESC_CTRL_FENCE	0x10
146 #define	IOAT_DESC_CTRL_CMPL	0x8
147 #define	IOAT_DESC_CTRL_INTR	0x1
148 typedef struct ioat_chan_dma_desc_s {
149 	uint32_t	dd_size;
150 	uint32_t	dd_ctrl;
151 	uint64_t	dd_src_paddr;
152 	uint64_t	dd_dest_paddr;
153 	uint64_t	dd_next_desc;
154 	uint64_t	dd_next_src_paddr;	/* v2 only */
155 	uint64_t	dd_next_dest_paddr;	/* v2 only */
156 	uint64_t	dd_res6;
157 	uint64_t	dd_res7;
158 } ioat_chan_dma_desc_t;
159 
160 
161 typedef enum {
162 	IOAT_CBv1,
163 	IOAT_CBv2
164 } ioat_version_t;
165 
166 /* ioat private data per command */
167 typedef struct ioat_cmd_private_s {
168 	uint64_t	ip_generation;
169 	uint64_t	ip_index;
170 	dcopy_cmd_t	ip_next;
171 } ioat_cmd_private_t;
172 
173 /* descriptor ring state */
174 typedef struct ioat_channel_ring_s {
175 	/* protects cr_cmpl_gen & cr_cmpl_last */
176 	kmutex_t		cr_cmpl_mutex;
177 
178 	/* desc ring generation for the last completion we saw */
179 	uint64_t		cr_cmpl_gen;
180 
181 	/* last descriptor index we saw complete */
182 	uint64_t		cr_cmpl_last;
183 
184 	/* protects cr_desc_* */
185 	kmutex_t		cr_desc_mutex;
186 
187 	/*
188 	 * last descriptor posted. used to update its next pointer when we
189 	 * add a new desc. Also used to tack the completion (See comment for
190 	 * cr_desc_gen_prev).
191 	 */
192 	uint64_t		cr_desc_prev;
193 
194 	/* where to put the next descriptor */
195 	uint64_t		cr_desc_next;
196 
197 	/* what the current desc ring generation is */
198 	uint64_t		cr_desc_gen;
199 
200 	/*
201 	 * used during cmd_post to track the last desc posted. cr_desc_next
202 	 * and cr_desc_gen will be pointing to the next free desc after
203 	 * writing the descriptor to the ring. But we want to track the
204 	 * completion for the last descriptor posted.
205 	 */
206 	uint64_t		cr_desc_gen_prev;
207 
208 	/* the last desc in the ring (for wrap) */
209 	uint64_t		cr_desc_last;
210 
211 	/* pointer to the head of the ring */
212 	ioat_chan_desc_t	*cr_desc;
213 
214 	/* physical address of the head of the ring */
215 	uint64_t		cr_phys_desc;
216 
217 	/* back pointer to the channel state */
218 	struct ioat_channel_s	*cr_chan;
219 
220 	/* for CB v2, number of desc posted (written to IOAT_V2_CHAN_CNT) */
221 	uint_t			cr_post_cnt;
222 } ioat_channel_ring_t;
223 
224 /* track channel state so we can handle a failure */
225 typedef enum {
226 	IOAT_CHANNEL_OK = 0,
227 	IOAT_CHANNEL_IN_FAILURE = 1
228 } ic_channel_state_t;
229 
230 typedef struct ioat_channel_s *ioat_channel_t;
231 struct ioat_channel_s {
232 	/* channel's ring state */
233 	ioat_channel_ring_t	*ic_ring;
234 
235 	/* IOAT_CBv1 || IOAT_CBv2 */
236 	ioat_version_t		ic_ver;
237 
238 	/*
239 	 * state to determine if it's OK to post the the channel and if all
240 	 * future polls should return failure.
241 	 */
242 	ic_channel_state_t	ic_channel_state;
243 
244 	/* channel command cache (*_cmd_alloc, *_cmd_free, etc) */
245 	kmem_cache_t		*ic_cmd_cache;
246 
247 	/* dcopy state for dcopy_device_channel_notify() call */
248 	dcopy_handle_t		ic_dcopy_handle;
249 
250 	/* location in memory where completions are DMA'ed into */
251 	volatile uint64_t	*ic_cmpl;
252 
253 	/* channel specific registers */
254 	uint8_t			*ic_regs;
255 
256 	/* if this channel is using DCA */
257 	boolean_t		ic_dca_active;
258 
259 	/* DCA ID the channel is currently pointing to */
260 	uint32_t		ic_dca_current;
261 
262 	/* devices channel number */
263 	uint_t			ic_chan_num;
264 
265 	/* number of descriptors in ring */
266 	uint_t			ic_chan_desc_cnt;
267 
268 	/* descriptor ring alloc state */
269 	ddi_dma_handle_t	ic_desc_dma_handle;
270 	size_t			ic_desc_alloc_size;
271 	ddi_acc_handle_t	ic_desc_handle;
272 	ddi_dma_cookie_t	ic_desc_cookies;
273 
274 	/* completion buffer alloc state */
275 	ddi_dma_handle_t	ic_cmpl_dma_handle;
276 	size_t			ic_cmpl_alloc_size;
277 	ddi_acc_handle_t	ic_cmpl_handle;
278 	ddi_dma_cookie_t	ic_cmpl_cookie;
279 	uint64_t		ic_phys_cmpl;
280 
281 	/* if inuse, we need to re-init the channel during resume */
282 	boolean_t		ic_inuse;
283 
284 	/* backpointer to driver state */
285 	struct ioat_state_s	*ic_state;
286 };
287 
288 typedef struct ioat_rs_s *ioat_rs_hdl_t;
289 
290 /* driver state */
291 typedef struct ioat_state_s {
292 	dev_info_t		*is_dip;
293 	int			is_instance;
294 
295 	kmutex_t		is_mutex;
296 
297 	/* register handle and pointer to registers */
298 	ddi_acc_handle_t	is_reg_handle;
299 	uint8_t			*is_genregs;
300 
301 	/* IOAT_CBv1 || IOAT_CBv2 */
302 	ioat_version_t		is_ver;
303 
304 	/* channel state */
305 	ioat_channel_t		is_channel;
306 	size_t			is_chansize;
307 	ioat_rs_hdl_t		is_channel_rs;
308 
309 	ddi_iblock_cookie_t	is_iblock_cookie;
310 
311 	/* device info */
312 	uint_t			is_chanoff;
313 	uint_t			is_num_channels;
314 	uint_t			is_maxxfer;
315 	uint_t			is_cbver;
316 	uint_t			is_intrdelay;
317 	uint_t			is_status;
318 	uint_t			is_capabilities;
319 
320 	/* dcopy_device_register()/dcopy_device_unregister() state */
321 	dcopy_device_handle_t	is_device_handle;
322 	dcopy_device_info_t	is_deviceinfo;
323 } ioat_state_t;
324 
325 
326 int ioat_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred,
327     int *rval);
328 
329 void ioat_rs_init(ioat_state_t *state, uint_t min_val, uint_t max_val,
330     ioat_rs_hdl_t *handle);
331 void ioat_rs_fini(ioat_rs_hdl_t *handle);
332 int ioat_rs_alloc(ioat_rs_hdl_t handle, uint_t *rs);
333 void ioat_rs_free(ioat_rs_hdl_t handle, uint_t rs);
334 
335 int ioat_channel_init(ioat_state_t *state);
336 void ioat_channel_fini(ioat_state_t *state);
337 void ioat_channel_suspend(ioat_state_t *state);
338 int ioat_channel_resume(ioat_state_t *state);
339 void ioat_channel_quiesce(ioat_state_t *);
340 
341 int ioat_channel_alloc(void *device_private, dcopy_handle_t handle, int flags,
342     uint_t size, dcopy_query_channel_t *info, void *channel_private);
343 void ioat_channel_free(void *channel_private);
344 void ioat_channel_intr(ioat_channel_t channel);
345 int ioat_cmd_alloc(void *channel, int flags, dcopy_cmd_t *cmd);
346 void ioat_cmd_free(void *channel, dcopy_cmd_t *cmd);
347 int ioat_cmd_post(void *channel, dcopy_cmd_t cmd);
348 int ioat_cmd_poll(void *channel, dcopy_cmd_t cmd);
349 void ioat_unregister_complete(void *device_private, int status);
350 
351 
352 #endif /* _KERNEL */
353 
354 #ifdef __cplusplus
355 }
356 #endif
357 
358 #endif /* _SYS_IOAT_H */
359