xref: /freebsd/sys/dev/ioat/ioat_internal.h (revision bd81e07d2761cf1c13063eb49a5c0cb4a6951318)
1 /*-
2  * Copyright (C) 2012 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 __FBSDID("$FreeBSD$");
28 
29 #ifndef __IOAT_INTERNAL_H__
30 #define __IOAT_INTERNAL_H__
31 
32 #define DEVICE2SOFTC(dev) ((struct ioat_softc *) device_get_softc(dev))
33 
34 #define	ioat_read_chancnt(ioat) \
35 	ioat_read_1((ioat), IOAT_CHANCNT_OFFSET)
36 
37 #define	ioat_read_xfercap(ioat) \
38 	ioat_read_1((ioat), IOAT_XFERCAP_OFFSET)
39 
40 #define	ioat_write_intrctrl(ioat, value) \
41 	ioat_write_1((ioat), IOAT_INTRCTRL_OFFSET, (value))
42 
43 #define	ioat_read_cbver(ioat) \
44 	(ioat_read_1((ioat), IOAT_CBVER_OFFSET) & 0xFF)
45 
46 #define	ioat_read_dmacapability(ioat) \
47 	ioat_read_4((ioat), IOAT_DMACAPABILITY_OFFSET)
48 
49 #define	ioat_write_chanctrl(ioat, value) \
50 	ioat_write_2((ioat), IOAT_CHANCTRL_OFFSET, (value))
51 
52 static __inline uint64_t
53 ioat_bus_space_read_8_lower_first(bus_space_tag_t tag,
54     bus_space_handle_t handle, bus_size_t offset)
55 {
56 	return (bus_space_read_4(tag, handle, offset) |
57 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
58 }
59 
60 static __inline void
61 ioat_bus_space_write_8_lower_first(bus_space_tag_t tag,
62     bus_space_handle_t handle, bus_size_t offset, uint64_t val)
63 {
64 	bus_space_write_4(tag, handle, offset, val);
65 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
66 }
67 
68 #ifdef i386
69 #define ioat_bus_space_read_8 ioat_bus_space_read_8_lower_first
70 #define ioat_bus_space_write_8 ioat_bus_space_write_8_lower_first
71 #else
72 #define ioat_bus_space_read_8(tag, handle, offset) \
73 	bus_space_read_8((tag), (handle), (offset))
74 #define ioat_bus_space_write_8(tag, handle, offset, val) \
75 	bus_space_write_8((tag), (handle), (offset), (val))
76 #endif
77 
78 #define ioat_read_1(ioat, offset) \
79 	bus_space_read_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
80 	    (offset))
81 
82 #define ioat_read_2(ioat, offset) \
83 	bus_space_read_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
84 	    (offset))
85 
86 #define ioat_read_4(ioat, offset) \
87 	bus_space_read_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
88 	    (offset))
89 
90 #define ioat_read_8(ioat, offset) \
91 	ioat_bus_space_read_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
92 	    (offset))
93 
94 #define ioat_read_double_4(ioat, offset) \
95 	ioat_bus_space_read_8_lower_first((ioat)->pci_bus_tag, \
96 	    (ioat)->pci_bus_handle, (offset))
97 
98 #define ioat_write_1(ioat, offset, value) \
99 	bus_space_write_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
100 	    (offset), (value))
101 
102 #define ioat_write_2(ioat, offset, value) \
103 	bus_space_write_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
104 	    (offset), (value))
105 
106 #define ioat_write_4(ioat, offset, value) \
107 	bus_space_write_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
108 	    (offset), (value))
109 
110 #define ioat_write_8(ioat, offset, value) \
111 	ioat_bus_space_write_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
112 	    (offset), (value))
113 
114 #define ioat_write_double_4(ioat, offset, value) \
115 	ioat_bus_space_write_8_lower_first((ioat)->pci_bus_tag, \
116 	    (ioat)->pci_bus_handle, (offset), (value))
117 
118 MALLOC_DECLARE(M_IOAT);
119 
120 SYSCTL_DECL(_hw_ioat);
121 
122 void ioat_log_message(int verbosity, char *fmt, ...);
123 
124 struct ioat_dma_hw_descriptor {
125 	uint32_t size;
126 	union {
127 		uint32_t control_raw;
128 		struct {
129 			uint32_t int_enable:1;
130 			uint32_t src_snoop_disable:1;
131 			uint32_t dest_snoop_disable:1;
132 			uint32_t completion_update:1;
133 			uint32_t fence:1;
134 			uint32_t null:1;
135 			uint32_t src_page_break:1;
136 			uint32_t dest_page_break:1;
137 			uint32_t bundle:1;
138 			uint32_t dest_dca:1;
139 			uint32_t hint:1;
140 			uint32_t reserved:13;
141 			#define IOAT_OP_COPY 0x00
142 			uint32_t op:8;
143 		} control;
144 	} u;
145 	uint64_t src_addr;
146 	uint64_t dest_addr;
147 	uint64_t next;
148 	uint64_t reserved;
149 	uint64_t reserved2;
150 	uint64_t user1;
151 	uint64_t user2;
152 };
153 
154 struct ioat_fill_hw_descriptor {
155 	uint32_t size;
156 	union {
157 		uint32_t control_raw;
158 		struct {
159 			uint32_t int_enable:1;
160 			uint32_t reserved:1;
161 			uint32_t dest_snoop_disable:1;
162 			uint32_t completion_update:1;
163 			uint32_t fence:1;
164 			uint32_t reserved2:2;
165 			uint32_t dest_page_break:1;
166 			uint32_t bundle:1;
167 			uint32_t reserved3:15;
168 			#define IOAT_OP_FILL 0x01
169 			uint32_t op:8;
170 		} control;
171 	} u;
172 	uint64_t src_data;
173 	uint64_t dest_addr;
174 	uint64_t next;
175 	uint64_t reserved;
176 	uint64_t next_dest_addr;
177 	uint64_t user1;
178 	uint64_t user2;
179 };
180 
181 struct ioat_xor_hw_descriptor {
182 	uint32_t size;
183 	union {
184 		uint32_t control_raw;
185 		struct {
186 			uint32_t int_enable:1;
187 			uint32_t src_snoop_disable:1;
188 			uint32_t dest_snoop_disable:1;
189 			uint32_t completion_update:1;
190 			uint32_t fence:1;
191 			uint32_t src_count:3;
192 			uint32_t bundle:1;
193 			uint32_t dest_dca:1;
194 			uint32_t hint:1;
195 			uint32_t reserved:13;
196 			#define IOAT_OP_XOR 0x87
197 			#define IOAT_OP_XOR_VAL 0x88
198 			uint32_t op:8;
199 		} control;
200 	} u;
201 	uint64_t src_addr;
202 	uint64_t dest_addr;
203 	uint64_t next;
204 	uint64_t src_addr2;
205 	uint64_t src_addr3;
206 	uint64_t src_addr4;
207 	uint64_t src_addr5;
208 };
209 
210 struct ioat_xor_ext_hw_descriptor {
211 	uint64_t src_addr6;
212 	uint64_t src_addr7;
213 	uint64_t src_addr8;
214 	uint64_t next;
215 	uint64_t reserved[4];
216 };
217 
218 struct ioat_pq_hw_descriptor {
219 	uint32_t size;
220 	union {
221 		uint32_t control_raw;
222 		struct {
223 			uint32_t int_enable:1;
224 			uint32_t src_snoop_disable:1;
225 			uint32_t dest_snoop_disable:1;
226 			uint32_t completion_update:1;
227 			uint32_t fence:1;
228 			uint32_t src_count:3;
229 			uint32_t bundle:1;
230 			uint32_t dest_dca:1;
231 			uint32_t hint:1;
232 			uint32_t p_disable:1;
233 			uint32_t q_disable:1;
234 			uint32_t reserved:11;
235 			#define IOAT_OP_PQ 0x89
236 			#define IOAT_OP_PQ_VAL 0x8a
237 			uint32_t op:8;
238 		} control;
239 	} u;
240 	uint64_t src_addr;
241 	uint64_t p_addr;
242 	uint64_t next;
243 	uint64_t src_addr2;
244 	uint64_t src_addr3;
245 	uint8_t  coef[8];
246 	uint64_t q_addr;
247 };
248 
249 struct ioat_pq_ext_hw_descriptor {
250 	uint64_t src_addr4;
251 	uint64_t src_addr5;
252 	uint64_t src_addr6;
253 	uint64_t next;
254 	uint64_t src_addr7;
255 	uint64_t src_addr8;
256 	uint64_t reserved[2];
257 };
258 
259 struct ioat_pq_update_hw_descriptor {
260 	uint32_t size;
261 	union {
262 		uint32_t control_raw;
263 		struct {
264 			uint32_t int_enable:1;
265 			uint32_t src_snoop_disable:1;
266 			uint32_t dest_snoop_disable:1;
267 			uint32_t completion_update:1;
268 			uint32_t fence:1;
269 			uint32_t src_cnt:3;
270 			uint32_t bundle:1;
271 			uint32_t dest_dca:1;
272 			uint32_t hint:1;
273 			uint32_t p_disable:1;
274 			uint32_t q_disable:1;
275 			uint32_t reserved:3;
276 			uint32_t coef:8;
277 			#define IOAT_OP_PQ_UP 0x8b
278 			uint32_t op:8;
279 		} control;
280 	} u;
281 	uint64_t src_addr;
282 	uint64_t p_addr;
283 	uint64_t next;
284 	uint64_t src_addr2;
285 	uint64_t p_src;
286 	uint64_t q_src;
287 	uint64_t q_addr;
288 };
289 
290 struct ioat_raw_hw_descriptor {
291 	uint64_t field[8];
292 };
293 
294 struct bus_dmadesc {
295 	bus_dmaengine_callback_t callback_fn;
296 	void			 *callback_arg;
297 };
298 
299 struct ioat_descriptor {
300 	struct bus_dmadesc	bus_dmadesc;
301 	union {
302 		struct ioat_dma_hw_descriptor		*dma;
303 		struct ioat_fill_hw_descriptor		*fill;
304 		struct ioat_xor_hw_descriptor		*xor;
305 		struct ioat_xor_ext_hw_descriptor	*xor_ext;
306 		struct ioat_pq_hw_descriptor		*pq;
307 		struct ioat_pq_ext_hw_descriptor	*pq_ext;
308 		struct ioat_raw_hw_descriptor		*raw;
309 	} u;
310 	uint32_t		id;
311 	uint32_t		length;
312 	enum validate_flags	*validate_result;
313 	bus_addr_t		hw_desc_bus_addr;
314 };
315 
316 /* One of these per allocated PCI device. */
317 struct ioat_softc {
318 	bus_dmaengine_t		dmaengine;
319 #define	to_ioat_softc(_dmaeng)						\
320 ({									\
321 	bus_dmaengine_t *_p = (_dmaeng);				\
322 	(struct ioat_softc *)((char *)_p -				\
323 	    offsetof(struct ioat_softc, dmaengine));			\
324 })
325 
326 	int			version;
327 
328 	struct mtx		submit_lock;
329 	int			num_interrupts;
330 	device_t		device;
331 	bus_space_tag_t		pci_bus_tag;
332 	bus_space_handle_t	pci_bus_handle;
333 	int			pci_resource_id;
334 	struct resource		*pci_resource;
335 	uint32_t		max_xfer_size;
336 
337 	struct resource		*res;
338 	int			rid;
339 	void			*tag;
340 
341 	bus_dma_tag_t		hw_desc_tag;
342 	bus_dmamap_t		hw_desc_map;
343 
344 	bus_dma_tag_t		comp_update_tag;
345 	bus_dmamap_t		comp_update_map;
346 	uint64_t		*comp_update;
347 	bus_addr_t		comp_update_bus_addr;
348 
349 	struct callout		timer;
350 
351 	boolean_t		is_resize_pending;
352 	boolean_t		is_completion_pending;
353 	boolean_t		is_reset_pending;
354 	boolean_t		is_channel_running;
355 	boolean_t		is_waiting_for_ack;
356 
357 	uint32_t		xfercap_log;
358 	uint32_t		head;
359 	uint32_t		tail;
360 	uint16_t		reserved;
361 	uint32_t		ring_size_order;
362 	bus_addr_t		last_seen;
363 
364 	struct ioat_descriptor	**ring;
365 
366 	struct mtx		cleanup_lock;
367 };
368 
369 static inline uint64_t
370 ioat_get_chansts(struct ioat_softc *ioat)
371 {
372 	uint64_t status;
373 
374 	if (ioat->version >= IOAT_VER_3_3)
375 		status = ioat_read_8(ioat, IOAT_CHANSTS_OFFSET);
376 	else
377 		/* Must read lower 4 bytes before upper 4 bytes. */
378 		status = ioat_read_double_4(ioat, IOAT_CHANSTS_OFFSET);
379 	return (status);
380 }
381 
382 static inline void
383 ioat_write_chancmp(struct ioat_softc *ioat, uint64_t addr)
384 {
385 
386 	if (ioat->version >= IOAT_VER_3_3)
387 		ioat_write_8(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
388 	else
389 		ioat_write_double_4(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
390 }
391 
392 static inline void
393 ioat_write_chainaddr(struct ioat_softc *ioat, uint64_t addr)
394 {
395 
396 	if (ioat->version >= IOAT_VER_3_3)
397 		ioat_write_8(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
398 	else
399 		ioat_write_double_4(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
400 }
401 
402 static inline boolean_t
403 is_ioat_active(uint64_t status)
404 {
405 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
406 }
407 
408 static inline boolean_t
409 is_ioat_idle(uint64_t status)
410 {
411 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE);
412 }
413 
414 static inline boolean_t
415 is_ioat_halted(uint64_t status)
416 {
417 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
418 }
419 
420 static inline boolean_t
421 is_ioat_suspended(uint64_t status)
422 {
423 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
424 }
425 
426 static inline void
427 ioat_suspend(struct ioat_softc *ioat)
428 {
429 	ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_SUSPEND);
430 }
431 
432 static inline void
433 ioat_reset(struct ioat_softc *ioat)
434 {
435 	ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
436 }
437 
438 static inline boolean_t
439 ioat_reset_pending(struct ioat_softc *ioat)
440 {
441 	uint8_t cmd;
442 
443 	cmd = ioat_read_1(ioat, IOAT_CHANCMD_OFFSET);
444 	return ((cmd & IOAT_CHANCMD_RESET) != 0);
445 }
446 
447 #endif /* __IOAT_INTERNAL_H__ */
448