xref: /freebsd/sys/dev/ioat/ioat_internal.h (revision 1ce1c6895245648ba022f7187df1626904dc1f89)
1 /*-
2  * Copyright (C) 2012 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 __FBSDID("$FreeBSD$");
28 
29 #ifndef __IOAT_INTERNAL_H__
30 #define __IOAT_INTERNAL_H__
31 
32 #define	DEVICE2SOFTC(dev)	((struct ioat_softc *) device_get_softc(dev))
33 #define	KTR_IOAT		KTR_SPARE3
34 
35 #define	ioat_read_chancnt(ioat) \
36 	ioat_read_1((ioat), IOAT_CHANCNT_OFFSET)
37 
38 #define	ioat_read_xfercap(ioat) \
39 	(ioat_read_1((ioat), IOAT_XFERCAP_OFFSET) & IOAT_XFERCAP_VALID_MASK)
40 
41 #define	ioat_write_intrctrl(ioat, value) \
42 	ioat_write_1((ioat), IOAT_INTRCTRL_OFFSET, (value))
43 
44 #define	ioat_read_cbver(ioat) \
45 	(ioat_read_1((ioat), IOAT_CBVER_OFFSET) & 0xFF)
46 
47 #define	ioat_read_dmacapability(ioat) \
48 	ioat_read_4((ioat), IOAT_DMACAPABILITY_OFFSET)
49 
50 #define	ioat_write_chanctrl(ioat, value) \
51 	ioat_write_2((ioat), IOAT_CHANCTRL_OFFSET, (value))
52 
53 static __inline uint64_t
54 ioat_bus_space_read_8_lower_first(bus_space_tag_t tag,
55     bus_space_handle_t handle, bus_size_t offset)
56 {
57 	return (bus_space_read_4(tag, handle, offset) |
58 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
59 }
60 
61 static __inline void
62 ioat_bus_space_write_8_lower_first(bus_space_tag_t tag,
63     bus_space_handle_t handle, bus_size_t offset, uint64_t val)
64 {
65 	bus_space_write_4(tag, handle, offset, val);
66 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
67 }
68 
69 #ifdef __i386__
70 #define ioat_bus_space_read_8 ioat_bus_space_read_8_lower_first
71 #define ioat_bus_space_write_8 ioat_bus_space_write_8_lower_first
72 #else
73 #define ioat_bus_space_read_8(tag, handle, offset) \
74 	bus_space_read_8((tag), (handle), (offset))
75 #define ioat_bus_space_write_8(tag, handle, offset, val) \
76 	bus_space_write_8((tag), (handle), (offset), (val))
77 #endif
78 
79 #define ioat_read_1(ioat, offset) \
80 	bus_space_read_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
81 	    (offset))
82 
83 #define ioat_read_2(ioat, offset) \
84 	bus_space_read_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
85 	    (offset))
86 
87 #define ioat_read_4(ioat, offset) \
88 	bus_space_read_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
89 	    (offset))
90 
91 #define ioat_read_8(ioat, offset) \
92 	ioat_bus_space_read_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
93 	    (offset))
94 
95 #define ioat_read_double_4(ioat, offset) \
96 	ioat_bus_space_read_8_lower_first((ioat)->pci_bus_tag, \
97 	    (ioat)->pci_bus_handle, (offset))
98 
99 #define ioat_write_1(ioat, offset, value) \
100 	bus_space_write_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
101 	    (offset), (value))
102 
103 #define ioat_write_2(ioat, offset, value) \
104 	bus_space_write_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
105 	    (offset), (value))
106 
107 #define ioat_write_4(ioat, offset, value) \
108 	bus_space_write_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
109 	    (offset), (value))
110 
111 #define ioat_write_8(ioat, offset, value) \
112 	ioat_bus_space_write_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \
113 	    (offset), (value))
114 
115 #define ioat_write_double_4(ioat, offset, value) \
116 	ioat_bus_space_write_8_lower_first((ioat)->pci_bus_tag, \
117 	    (ioat)->pci_bus_handle, (offset), (value))
118 
119 MALLOC_DECLARE(M_IOAT);
120 
121 SYSCTL_DECL(_hw_ioat);
122 
123 extern int g_ioat_debug_level;
124 
125 struct generic_dma_control {
126 	uint32_t int_enable:1;
127 	uint32_t src_snoop_disable:1;
128 	uint32_t dest_snoop_disable:1;
129 	uint32_t completion_update:1;
130 	uint32_t fence:1;
131 	uint32_t reserved1:1;
132 	uint32_t src_page_break:1;
133 	uint32_t dest_page_break:1;
134 	uint32_t bundle:1;
135 	uint32_t dest_dca:1;
136 	uint32_t hint:1;
137 	uint32_t reserved2:13;
138 	uint32_t op:8;
139 };
140 
141 struct ioat_generic_hw_descriptor {
142 	uint32_t size;
143 	union {
144 		uint32_t control_raw;
145 		struct generic_dma_control control_generic;
146 	} u;
147 	uint64_t src_addr;
148 	uint64_t dest_addr;
149 	uint64_t next;
150 	uint64_t reserved[4];
151 };
152 
153 struct ioat_dma_hw_descriptor {
154 	uint32_t size;
155 	union {
156 		uint32_t control_raw;
157 		struct generic_dma_control control_generic;
158 		struct {
159 			uint32_t int_enable:1;
160 			uint32_t src_snoop_disable:1;
161 			uint32_t dest_snoop_disable:1;
162 			uint32_t completion_update:1;
163 			uint32_t fence:1;
164 			uint32_t null:1;
165 			uint32_t src_page_break:1;
166 			uint32_t dest_page_break:1;
167 			uint32_t bundle:1;
168 			uint32_t dest_dca:1;
169 			uint32_t hint:1;
170 			uint32_t reserved:13;
171 			#define IOAT_OP_COPY 0x00
172 			uint32_t op:8;
173 		} control;
174 	} u;
175 	uint64_t src_addr;
176 	uint64_t dest_addr;
177 	uint64_t next;
178 	uint64_t next_src_addr;
179 	uint64_t next_dest_addr;
180 	uint64_t user1;
181 	uint64_t user2;
182 };
183 
184 struct ioat_fill_hw_descriptor {
185 	uint32_t size;
186 	union {
187 		uint32_t control_raw;
188 		struct generic_dma_control control_generic;
189 		struct {
190 			uint32_t int_enable:1;
191 			uint32_t reserved:1;
192 			uint32_t dest_snoop_disable:1;
193 			uint32_t completion_update:1;
194 			uint32_t fence:1;
195 			uint32_t reserved2:2;
196 			uint32_t dest_page_break:1;
197 			uint32_t bundle:1;
198 			uint32_t reserved3:15;
199 			#define IOAT_OP_FILL 0x01
200 			uint32_t op:8;
201 		} control;
202 	} u;
203 	uint64_t src_data;
204 	uint64_t dest_addr;
205 	uint64_t next;
206 	uint64_t reserved;
207 	uint64_t next_dest_addr;
208 	uint64_t user1;
209 	uint64_t user2;
210 };
211 
212 struct ioat_xor_hw_descriptor {
213 	uint32_t size;
214 	union {
215 		uint32_t control_raw;
216 		struct generic_dma_control control_generic;
217 		struct {
218 			uint32_t int_enable:1;
219 			uint32_t src_snoop_disable:1;
220 			uint32_t dest_snoop_disable:1;
221 			uint32_t completion_update:1;
222 			uint32_t fence:1;
223 			uint32_t src_count:3;
224 			uint32_t bundle:1;
225 			uint32_t dest_dca:1;
226 			uint32_t hint:1;
227 			uint32_t reserved:13;
228 			#define IOAT_OP_XOR 0x87
229 			#define IOAT_OP_XOR_VAL 0x88
230 			uint32_t op:8;
231 		} control;
232 	} u;
233 	uint64_t src_addr;
234 	uint64_t dest_addr;
235 	uint64_t next;
236 	uint64_t src_addr2;
237 	uint64_t src_addr3;
238 	uint64_t src_addr4;
239 	uint64_t src_addr5;
240 };
241 
242 struct ioat_xor_ext_hw_descriptor {
243 	uint64_t src_addr6;
244 	uint64_t src_addr7;
245 	uint64_t src_addr8;
246 	uint64_t next;
247 	uint64_t reserved[4];
248 };
249 
250 struct ioat_pq_hw_descriptor {
251 	uint32_t size;
252 	union {
253 		uint32_t control_raw;
254 		struct generic_dma_control control_generic;
255 		struct {
256 			uint32_t int_enable:1;
257 			uint32_t src_snoop_disable:1;
258 			uint32_t dest_snoop_disable:1;
259 			uint32_t completion_update:1;
260 			uint32_t fence:1;
261 			uint32_t src_count:3;
262 			uint32_t bundle:1;
263 			uint32_t dest_dca:1;
264 			uint32_t hint:1;
265 			uint32_t p_disable:1;
266 			uint32_t q_disable:1;
267 			uint32_t reserved:11;
268 			#define IOAT_OP_PQ 0x89
269 			#define IOAT_OP_PQ_VAL 0x8a
270 			uint32_t op:8;
271 		} control;
272 	} u;
273 	uint64_t src_addr;
274 	uint64_t p_addr;
275 	uint64_t next;
276 	uint64_t src_addr2;
277 	uint64_t src_addr3;
278 	uint8_t  coef[8];
279 	uint64_t q_addr;
280 };
281 
282 struct ioat_pq_ext_hw_descriptor {
283 	uint64_t src_addr4;
284 	uint64_t src_addr5;
285 	uint64_t src_addr6;
286 	uint64_t next;
287 	uint64_t src_addr7;
288 	uint64_t src_addr8;
289 	uint64_t reserved[2];
290 };
291 
292 struct ioat_pq_update_hw_descriptor {
293 	uint32_t size;
294 	union {
295 		uint32_t control_raw;
296 		struct generic_dma_control control_generic;
297 		struct {
298 			uint32_t int_enable:1;
299 			uint32_t src_snoop_disable:1;
300 			uint32_t dest_snoop_disable:1;
301 			uint32_t completion_update:1;
302 			uint32_t fence:1;
303 			uint32_t src_cnt:3;
304 			uint32_t bundle:1;
305 			uint32_t dest_dca:1;
306 			uint32_t hint:1;
307 			uint32_t p_disable:1;
308 			uint32_t q_disable:1;
309 			uint32_t reserved:3;
310 			uint32_t coef:8;
311 			#define IOAT_OP_PQ_UP 0x8b
312 			uint32_t op:8;
313 		} control;
314 	} u;
315 	uint64_t src_addr;
316 	uint64_t p_addr;
317 	uint64_t next;
318 	uint64_t src_addr2;
319 	uint64_t p_src;
320 	uint64_t q_src;
321 	uint64_t q_addr;
322 };
323 
324 struct ioat_raw_hw_descriptor {
325 	uint64_t field[8];
326 };
327 
328 struct bus_dmadesc {
329 	bus_dmaengine_callback_t callback_fn;
330 	void			 *callback_arg;
331 };
332 
333 struct ioat_descriptor {
334 	struct bus_dmadesc	bus_dmadesc;
335 	union {
336 		struct ioat_generic_hw_descriptor	*generic;
337 		struct ioat_dma_hw_descriptor		*dma;
338 		struct ioat_fill_hw_descriptor		*fill;
339 		struct ioat_xor_hw_descriptor		*xor;
340 		struct ioat_xor_ext_hw_descriptor	*xor_ext;
341 		struct ioat_pq_hw_descriptor		*pq;
342 		struct ioat_pq_ext_hw_descriptor	*pq_ext;
343 		struct ioat_raw_hw_descriptor		*raw;
344 	} u;
345 	uint32_t		id;
346 	bus_addr_t		hw_desc_bus_addr;
347 };
348 
349 enum ioat_ref_kind {
350 	IOAT_DMAENGINE_REF = 0,
351 	IOAT_ACTIVE_DESCR_REF,
352 	IOAT_NUM_REF_KINDS
353 };
354 
355 /* One of these per allocated PCI device. */
356 struct ioat_softc {
357 	bus_dmaengine_t		dmaengine;
358 #define	to_ioat_softc(_dmaeng)						\
359 ({									\
360 	bus_dmaengine_t *_p = (_dmaeng);				\
361 	(struct ioat_softc *)((char *)_p -				\
362 	    offsetof(struct ioat_softc, dmaengine));			\
363 })
364 
365 	int			version;
366 	int			chan_idx;
367 
368 	struct mtx		submit_lock;
369 	device_t		device;
370 	bus_space_tag_t		pci_bus_tag;
371 	bus_space_handle_t	pci_bus_handle;
372 	int			pci_resource_id;
373 	struct resource		*pci_resource;
374 	uint32_t		max_xfer_size;
375 	uint32_t		capabilities;
376 
377 	struct resource		*res;
378 	int			rid;
379 	void			*tag;
380 
381 	bus_dma_tag_t		hw_desc_tag;
382 	bus_dmamap_t		hw_desc_map;
383 
384 	bus_dma_tag_t		comp_update_tag;
385 	bus_dmamap_t		comp_update_map;
386 	uint64_t		*comp_update;
387 	bus_addr_t		comp_update_bus_addr;
388 
389 	struct callout		timer;
390 
391 	boolean_t		quiescing;
392 	boolean_t		is_resize_pending;
393 	boolean_t		is_completion_pending;
394 	boolean_t		is_reset_pending;
395 	boolean_t		is_channel_running;
396 
397 	uint32_t		head;
398 	uint32_t		tail;
399 	uint32_t		hw_head;
400 	uint32_t		ring_size_order;
401 	bus_addr_t		last_seen;
402 
403 	struct ioat_descriptor	**ring;
404 
405 	struct mtx		cleanup_lock;
406 	volatile uint32_t	refcnt;
407 #ifdef INVARIANTS
408 	volatile uint32_t	refkinds[IOAT_NUM_REF_KINDS];
409 #endif
410 };
411 
412 void ioat_test_attach(void);
413 void ioat_test_detach(void);
414 
415 static inline uint64_t
416 ioat_get_chansts(struct ioat_softc *ioat)
417 {
418 	uint64_t status;
419 
420 	if (ioat->version >= IOAT_VER_3_3)
421 		status = ioat_read_8(ioat, IOAT_CHANSTS_OFFSET);
422 	else
423 		/* Must read lower 4 bytes before upper 4 bytes. */
424 		status = ioat_read_double_4(ioat, IOAT_CHANSTS_OFFSET);
425 	return (status);
426 }
427 
428 static inline void
429 ioat_write_chancmp(struct ioat_softc *ioat, uint64_t addr)
430 {
431 
432 	if (ioat->version >= IOAT_VER_3_3)
433 		ioat_write_8(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
434 	else
435 		ioat_write_double_4(ioat, IOAT_CHANCMP_OFFSET_LOW, addr);
436 }
437 
438 static inline void
439 ioat_write_chainaddr(struct ioat_softc *ioat, uint64_t addr)
440 {
441 
442 	if (ioat->version >= IOAT_VER_3_3)
443 		ioat_write_8(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
444 	else
445 		ioat_write_double_4(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr);
446 }
447 
448 static inline boolean_t
449 is_ioat_active(uint64_t status)
450 {
451 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
452 }
453 
454 static inline boolean_t
455 is_ioat_idle(uint64_t status)
456 {
457 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE);
458 }
459 
460 static inline boolean_t
461 is_ioat_halted(uint64_t status)
462 {
463 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
464 }
465 
466 static inline boolean_t
467 is_ioat_suspended(uint64_t status)
468 {
469 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
470 }
471 
472 static inline void
473 ioat_suspend(struct ioat_softc *ioat)
474 {
475 	ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_SUSPEND);
476 }
477 
478 static inline void
479 ioat_reset(struct ioat_softc *ioat)
480 {
481 	ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
482 }
483 
484 static inline boolean_t
485 ioat_reset_pending(struct ioat_softc *ioat)
486 {
487 	uint8_t cmd;
488 
489 	cmd = ioat_read_1(ioat, IOAT_CHANCMD_OFFSET);
490 	return ((cmd & IOAT_CHANCMD_RESET) != 0);
491 }
492 
493 #endif /* __IOAT_INTERNAL_H__ */
494