xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2  * Copyright (c) 2010-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * The views and conclusions contained in the software and documentation are
30  * those of the authors and should not be interpreted as representing official
31  * policies, either expressed or implied, of the FreeBSD Project.
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef	_SYS_EFSYS_H
37 #define	_SYS_EFSYS_H
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/systm.h>
53 
54 #include <machine/bus.h>
55 #include <machine/endian.h>
56 
57 #define	EFSYS_HAS_UINT64 1
58 #if defined(__x86_64__)
59 #define	EFSYS_USE_UINT64 1
60 #else
61 #define	EFSYS_USE_UINT64 0
62 #endif
63 #define	EFSYS_HAS_SSE2_M128 0
64 #if _BYTE_ORDER == _BIG_ENDIAN
65 #define	EFSYS_IS_BIG_ENDIAN 1
66 #define	EFSYS_IS_LITTLE_ENDIAN 0
67 #elif _BYTE_ORDER == _LITTLE_ENDIAN
68 #define	EFSYS_IS_BIG_ENDIAN 0
69 #define	EFSYS_IS_LITTLE_ENDIAN 1
70 #endif
71 #include "efx_types.h"
72 
73 /* Common code requires this */
74 #if __FreeBSD_version < 800068
75 #define	memmove(d, s, l) bcopy(s, d, l)
76 #endif
77 
78 /* FreeBSD equivalents of Solaris things */
79 #ifndef _NOTE
80 #define	_NOTE(s)
81 #endif
82 
83 #ifndef B_FALSE
84 #define	B_FALSE	FALSE
85 #endif
86 #ifndef B_TRUE
87 #define	B_TRUE	TRUE
88 #endif
89 
90 #ifndef IS_P2ALIGNED
91 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
92 #endif
93 
94 #ifndef P2ROUNDUP
95 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
96 #endif
97 
98 #ifndef P2ALIGN
99 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
100 #endif
101 
102 #ifndef IS2P
103 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
104 #endif
105 
106 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
107 
108 #define	SFXGE_USE_BUS_SPACE_8		1
109 
110 #if !defined(bus_space_read_stream_8)
111 
112 #define	bus_space_read_stream_8(t, h, o)				\
113 	bus_space_read_8((t), (h), (o))
114 
115 #define	bus_space_write_stream_8(t, h, o, v)				\
116 	bus_space_write_8((t), (h), (o), (v))
117 
118 #endif
119 
120 #endif
121 
122 #define	ENOTACTIVE EINVAL
123 
124 /* Memory type to use on FreeBSD */
125 MALLOC_DECLARE(M_SFXGE);
126 
127 /* Machine dependend prefetch wrappers */
128 #if defined(__i386__) || defined(__amd64__)
129 static __inline void
130 prefetch_read_many(void *addr)
131 {
132 
133 	__asm__(
134 	    "prefetcht0 (%0)"
135 	    :
136 	    : "r" (addr));
137 }
138 
139 static __inline void
140 prefetch_read_once(void *addr)
141 {
142 
143 	__asm__(
144 	    "prefetchnta (%0)"
145 	    :
146 	    : "r" (addr));
147 }
148 #elif defined(__sparc64__)
149 static __inline void
150 prefetch_read_many(void *addr)
151 {
152 
153 	__asm__(
154 	    "prefetch [%0], 0"
155 	    :
156 	    : "r" (addr));
157 }
158 
159 static __inline void
160 prefetch_read_once(void *addr)
161 {
162 
163 	__asm__(
164 	    "prefetch [%0], 1"
165 	    :
166 	    : "r" (addr));
167 }
168 #else
169 static __inline void
170 prefetch_read_many(void *addr)
171 {
172 
173 }
174 
175 static __inline void
176 prefetch_read_once(void *addr)
177 {
178 
179 }
180 #endif
181 
182 #if defined(__i386__) || defined(__amd64__)
183 #include <vm/vm.h>
184 #include <vm/pmap.h>
185 #endif
186 static __inline void
187 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
188 		    struct mbuf *m, bus_dma_segment_t *seg)
189 {
190 #if defined(__i386__) || defined(__amd64__)
191 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
192 	seg->ds_len = m->m_len;
193 #else
194 	int nsegstmp;
195 
196 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
197 #endif
198 }
199 
200 /* Modifiers used for Windows builds */
201 #define	__in
202 #define	__in_opt
203 #define	__in_ecount(_n)
204 #define	__in_ecount_opt(_n)
205 #define	__in_bcount(_n)
206 #define	__in_bcount_opt(_n)
207 
208 #define	__out
209 #define	__out_opt
210 #define	__out_ecount(_n)
211 #define	__out_ecount_opt(_n)
212 #define	__out_bcount(_n)
213 #define	__out_bcount_opt(_n)
214 #define	__out_bcount_part(_n, _l)
215 #define	__out_bcount_part_opt(_n, _l)
216 
217 #define	__deref_out
218 
219 #define	__inout
220 #define	__inout_opt
221 #define	__inout_ecount(_n)
222 #define	__inout_ecount_opt(_n)
223 #define	__inout_bcount(_n)
224 #define	__inout_bcount_opt(_n)
225 #define	__inout_bcount_full_opt(_n)
226 
227 #define	__deref_out_bcount_opt(n)
228 
229 #define	__checkReturn
230 #define	__success(_x)
231 
232 #define	__drv_when(_p, _c)
233 
234 /* Code inclusion options */
235 
236 
237 #define	EFSYS_OPT_NAMES 1
238 
239 #define	EFSYS_OPT_SIENA 1
240 #define	EFSYS_OPT_HUNTINGTON 1
241 #define	EFSYS_OPT_MEDFORD 1
242 #ifdef DEBUG
243 #define	EFSYS_OPT_CHECK_REG 1
244 #else
245 #define	EFSYS_OPT_CHECK_REG 0
246 #endif
247 
248 #define	EFSYS_OPT_MCDI 1
249 #define	EFSYS_OPT_MCDI_LOGGING 0
250 #define	EFSYS_OPT_MCDI_PROXY_AUTH 0
251 
252 #define	EFSYS_OPT_MAC_STATS 1
253 
254 #define	EFSYS_OPT_LOOPBACK 0
255 
256 #define	EFSYS_OPT_MON_MCDI 0
257 #define	EFSYS_OPT_MON_STATS 0
258 
259 #define	EFSYS_OPT_PHY_STATS 1
260 #define	EFSYS_OPT_BIST 1
261 #define	EFSYS_OPT_PHY_LED_CONTROL 1
262 #define	EFSYS_OPT_PHY_FLAGS 0
263 
264 #define	EFSYS_OPT_VPD 1
265 #define	EFSYS_OPT_NVRAM 1
266 #define	EFSYS_OPT_BOOTCFG 0
267 
268 #define	EFSYS_OPT_DIAG 0
269 #define	EFSYS_OPT_RX_SCALE 1
270 #define	EFSYS_OPT_QSTATS 1
271 #define	EFSYS_OPT_FILTER 1
272 #define	EFSYS_OPT_RX_SCATTER 0
273 
274 #define	EFSYS_OPT_EV_PREFETCH 0
275 
276 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
277 
278 #define	EFSYS_OPT_LICENSING 0
279 
280 #define	EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
281 
282 /* ID */
283 
284 typedef struct __efsys_identifier_s	efsys_identifier_t;
285 
286 /* PROBE */
287 
288 #ifndef DTRACE_PROBE
289 
290 #define	EFSYS_PROBE(_name)
291 
292 #define	EFSYS_PROBE1(_name, _type1, _arg1)
293 
294 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
295 
296 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
297 	    _type3, _arg3)
298 
299 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
300 	    _type3, _arg3, _type4, _arg4)
301 
302 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
303 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
304 
305 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
306 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
307 	    _type6, _arg6)
308 
309 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
310 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
311 	    _type6, _arg6, _type7, _arg7)
312 
313 #else /* DTRACE_PROBE */
314 
315 #define	EFSYS_PROBE(_name)						\
316 	DTRACE_PROBE(_name)
317 
318 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
319 	DTRACE_PROBE1(_name, _type1, _arg1)
320 
321 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
322 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
323 
324 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
325 	    _type3, _arg3)						\
326 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
327 	    _type3, _arg3)
328 
329 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
330 	    _type3, _arg3, _type4, _arg4)				\
331 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
332 	    _type3, _arg3, _type4, _arg4)
333 
334 #ifdef DTRACE_PROBE5
335 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
336 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
337 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
338 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
339 #else
340 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
341 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
342 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
343 	    _type3, _arg3, _type4, _arg4)
344 #endif
345 
346 #ifdef DTRACE_PROBE6
347 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
348 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
349 	    _type6, _arg6)						\
350 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
351 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
352 	    _type6, _arg6)
353 #else
354 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
355 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
356 	    _type6, _arg6)						\
357 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
358 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
359 #endif
360 
361 #ifdef DTRACE_PROBE7
362 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
363 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
364 	    _type6, _arg6, _type7, _arg7)				\
365 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
366 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
367 	    _type6, _arg6, _type7, _arg7)
368 #else
369 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
370 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
371 	    _type6, _arg6, _type7, _arg7)				\
372 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
373 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
374 	    _type6, _arg6)
375 #endif
376 
377 #endif /* DTRACE_PROBE */
378 
379 /* DMA */
380 
381 typedef uint64_t		efsys_dma_addr_t;
382 
383 typedef struct efsys_mem_s {
384 	bus_dma_tag_t		esm_tag;
385 	bus_dmamap_t		esm_map;
386 	caddr_t			esm_base;
387 	efsys_dma_addr_t	esm_addr;
388 } efsys_mem_t;
389 
390 
391 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
392 	do {								\
393 		(void) memset((_esmp)->esm_base, 0, (_size));		\
394 									\
395 	_NOTE(CONSTANTCONDITION)					\
396 	} while (B_FALSE)
397 
398 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
399 	do {								\
400 		uint32_t *addr;						\
401 									\
402 		_NOTE(CONSTANTCONDITION)				\
403 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
404 		    ("not power of 2 aligned"));			\
405 									\
406 		addr = (void *)((_esmp)->esm_base + (_offset));		\
407 									\
408 		(_edp)->ed_u32[0] = *addr;				\
409 									\
410 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
411 		    uint32_t, (_edp)->ed_u32[0]);			\
412 									\
413 	_NOTE(CONSTANTCONDITION)					\
414 	} while (B_FALSE)
415 
416 #if defined(__x86_64__)
417 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
418 	do {								\
419 		uint64_t *addr;						\
420 									\
421 		_NOTE(CONSTANTCONDITION)				\
422 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
423 		    ("not power of 2 aligned"));			\
424 									\
425 		addr = (void *)((_esmp)->esm_base + (_offset));		\
426 									\
427 		(_eqp)->eq_u64[0] = *addr;				\
428 									\
429 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
430 		    uint32_t, (_eqp)->eq_u32[1],			\
431 		    uint32_t, (_eqp)->eq_u32[0]);			\
432 									\
433 	_NOTE(CONSTANTCONDITION)					\
434 	} while (B_FALSE)
435 #else
436 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
437 	do {								\
438 		uint32_t *addr;						\
439 									\
440 		_NOTE(CONSTANTCONDITION)				\
441 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
442 		    ("not power of 2 aligned"));			\
443 									\
444 		addr = (void *)((_esmp)->esm_base + (_offset));		\
445 									\
446 		(_eqp)->eq_u32[0] = *addr++;				\
447 		(_eqp)->eq_u32[1] = *addr;				\
448 									\
449 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
450 		    uint32_t, (_eqp)->eq_u32[1],			\
451 		    uint32_t, (_eqp)->eq_u32[0]);			\
452 									\
453 	_NOTE(CONSTANTCONDITION)					\
454 	} while (B_FALSE)
455 #endif
456 
457 #if defined(__x86_64__)
458 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
459 	do {								\
460 		uint64_t *addr;						\
461 									\
462 		_NOTE(CONSTANTCONDITION)				\
463 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
464 		    ("not power of 2 aligned"));			\
465 									\
466 		addr = (void *)((_esmp)->esm_base + (_offset));		\
467 									\
468 		(_eop)->eo_u64[0] = *addr++;				\
469 		(_eop)->eo_u64[1] = *addr;				\
470 									\
471 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
472 		    uint32_t, (_eop)->eo_u32[3],			\
473 		    uint32_t, (_eop)->eo_u32[2],			\
474 		    uint32_t, (_eop)->eo_u32[1],			\
475 		    uint32_t, (_eop)->eo_u32[0]);			\
476 									\
477 	_NOTE(CONSTANTCONDITION)					\
478 	} while (B_FALSE)
479 #else
480 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
481 	do {								\
482 		uint32_t *addr;						\
483 									\
484 		_NOTE(CONSTANTCONDITION)				\
485 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
486 		    ("not power of 2 aligned"));			\
487 									\
488 		addr = (void *)((_esmp)->esm_base + (_offset));		\
489 									\
490 		(_eop)->eo_u32[0] = *addr++;				\
491 		(_eop)->eo_u32[1] = *addr++;				\
492 		(_eop)->eo_u32[2] = *addr++;				\
493 		(_eop)->eo_u32[3] = *addr;				\
494 									\
495 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
496 		    uint32_t, (_eop)->eo_u32[3],			\
497 		    uint32_t, (_eop)->eo_u32[2],			\
498 		    uint32_t, (_eop)->eo_u32[1],			\
499 		    uint32_t, (_eop)->eo_u32[0]);			\
500 									\
501 	_NOTE(CONSTANTCONDITION)					\
502 	} while (B_FALSE)
503 #endif
504 
505 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
506 	do {								\
507 		uint32_t *addr;						\
508 									\
509 		_NOTE(CONSTANTCONDITION)				\
510 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
511 		    ("not power of 2 aligned"));			\
512 									\
513 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
514 		    uint32_t, (_edp)->ed_u32[0]);			\
515 									\
516 		addr = (void *)((_esmp)->esm_base + (_offset));		\
517 									\
518 		*addr = (_edp)->ed_u32[0];				\
519 									\
520 	_NOTE(CONSTANTCONDITION)					\
521 	} while (B_FALSE)
522 
523 #if defined(__x86_64__)
524 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
525 	do {								\
526 		uint64_t *addr;						\
527 									\
528 		_NOTE(CONSTANTCONDITION)				\
529 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
530 		    ("not power of 2 aligned"));			\
531 									\
532 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
533 		    uint32_t, (_eqp)->eq_u32[1],			\
534 		    uint32_t, (_eqp)->eq_u32[0]);			\
535 									\
536 		addr = (void *)((_esmp)->esm_base + (_offset));		\
537 									\
538 		*addr   = (_eqp)->eq_u64[0];				\
539 									\
540 	_NOTE(CONSTANTCONDITION)					\
541 	} while (B_FALSE)
542 
543 #else
544 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
545 	do {								\
546 		uint32_t *addr;						\
547 									\
548 		_NOTE(CONSTANTCONDITION)				\
549 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
550 		    ("not power of 2 aligned"));			\
551 									\
552 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
553 		    uint32_t, (_eqp)->eq_u32[1],			\
554 		    uint32_t, (_eqp)->eq_u32[0]);			\
555 									\
556 		addr = (void *)((_esmp)->esm_base + (_offset));		\
557 									\
558 		*addr++ = (_eqp)->eq_u32[0];				\
559 		*addr   = (_eqp)->eq_u32[1];				\
560 									\
561 	_NOTE(CONSTANTCONDITION)					\
562 	} while (B_FALSE)
563 #endif
564 
565 #if defined(__x86_64__)
566 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
567 	do {								\
568 		uint64_t *addr;						\
569 									\
570 		_NOTE(CONSTANTCONDITION)				\
571 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
572 		    ("not power of 2 aligned"));			\
573 									\
574 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
575 		    uint32_t, (_eop)->eo_u32[3],			\
576 		    uint32_t, (_eop)->eo_u32[2],			\
577 		    uint32_t, (_eop)->eo_u32[1],			\
578 		    uint32_t, (_eop)->eo_u32[0]);			\
579 									\
580 		addr = (void *)((_esmp)->esm_base + (_offset));		\
581 									\
582 		*addr++ = (_eop)->eo_u64[0];				\
583 		*addr   = (_eop)->eo_u64[1];				\
584 									\
585 	_NOTE(CONSTANTCONDITION)					\
586 	} while (B_FALSE)
587 #else
588 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
589 	do {								\
590 		uint32_t *addr;						\
591 									\
592 		_NOTE(CONSTANTCONDITION)				\
593 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
594 		    ("not power of 2 aligned"));			\
595 									\
596 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
597 		    uint32_t, (_eop)->eo_u32[3],			\
598 		    uint32_t, (_eop)->eo_u32[2],			\
599 		    uint32_t, (_eop)->eo_u32[1],			\
600 		    uint32_t, (_eop)->eo_u32[0]);			\
601 									\
602 		addr = (void *)((_esmp)->esm_base + (_offset));		\
603 									\
604 		*addr++ = (_eop)->eo_u32[0];				\
605 		*addr++ = (_eop)->eo_u32[1];				\
606 		*addr++ = (_eop)->eo_u32[2];				\
607 		*addr   = (_eop)->eo_u32[3];				\
608 									\
609 	_NOTE(CONSTANTCONDITION)					\
610 	} while (B_FALSE)
611 #endif
612 
613 #define	EFSYS_MEM_ADDR(_esmp)						\
614 	((_esmp)->esm_addr)
615 
616 #define	EFSYS_MEM_IS_NULL(_esmp)					\
617 	((_esmp)->esm_base == NULL)
618 
619 /* BAR */
620 
621 #define	SFXGE_LOCK_NAME_MAX	16
622 
623 typedef struct efsys_bar_s {
624 	struct mtx		esb_lock;
625 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
626 	bus_space_tag_t		esb_tag;
627 	bus_space_handle_t	esb_handle;
628 	int			esb_rid;
629 	struct resource		*esb_res;
630 } efsys_bar_t;
631 
632 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
633 	do {								\
634 		snprintf((_esbp)->esb_lock_name,			\
635 			 sizeof((_esbp)->esb_lock_name),		\
636 			 "%s:bar", (_ifname));				\
637 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
638 			 NULL, MTX_DEF);				\
639 	_NOTE(CONSTANTCONDITION)					\
640 	} while (B_FALSE)
641 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
642 	mtx_destroy(&(_esbp)->esb_lock)
643 #define	SFXGE_BAR_LOCK(_esbp)						\
644 	mtx_lock(&(_esbp)->esb_lock)
645 #define	SFXGE_BAR_UNLOCK(_esbp)						\
646 	mtx_unlock(&(_esbp)->esb_lock)
647 
648 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
649 	do {								\
650 		_NOTE(CONSTANTCONDITION)				\
651 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
652 		    ("not power of 2 aligned"));			\
653 									\
654 		_NOTE(CONSTANTCONDITION)				\
655 		if (_lock)						\
656 			SFXGE_BAR_LOCK(_esbp);				\
657 									\
658 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
659 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
660 		    (_offset));						\
661 									\
662 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
663 		    uint32_t, (_edp)->ed_u32[0]);			\
664 									\
665 		_NOTE(CONSTANTCONDITION)				\
666 		if (_lock)						\
667 			SFXGE_BAR_UNLOCK(_esbp);			\
668 	_NOTE(CONSTANTCONDITION)					\
669 	} while (B_FALSE)
670 
671 #if defined(SFXGE_USE_BUS_SPACE_8)
672 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
673 	do {								\
674 		_NOTE(CONSTANTCONDITION)				\
675 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
676 		    ("not power of 2 aligned"));			\
677 									\
678 		SFXGE_BAR_LOCK(_esbp);					\
679 									\
680 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
681 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
682 		    (_offset));						\
683 									\
684 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
685 		    uint32_t, (_eqp)->eq_u32[1],			\
686 		    uint32_t, (_eqp)->eq_u32[0]);			\
687 									\
688 		SFXGE_BAR_UNLOCK(_esbp);				\
689 	_NOTE(CONSTANTCONDITION)					\
690 	} while (B_FALSE)
691 
692 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
693 	do {								\
694 		_NOTE(CONSTANTCONDITION)				\
695 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
696 		    ("not power of 2 aligned"));			\
697 									\
698 		_NOTE(CONSTANTCONDITION)				\
699 		if (_lock)						\
700 			SFXGE_BAR_LOCK(_esbp);				\
701 									\
702 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
703 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
704 		    (_offset));						\
705 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
706 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
707 		    (_offset) + 8);					\
708 									\
709 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
710 		    uint32_t, (_eop)->eo_u32[3],			\
711 		    uint32_t, (_eop)->eo_u32[2],			\
712 		    uint32_t, (_eop)->eo_u32[1],			\
713 		    uint32_t, (_eop)->eo_u32[0]);			\
714 									\
715 		_NOTE(CONSTANTCONDITION)				\
716 		if (_lock)						\
717 			SFXGE_BAR_UNLOCK(_esbp);			\
718 	_NOTE(CONSTANTCONDITION)					\
719 	} while (B_FALSE)
720 
721 #else
722 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
723 	do {								\
724 		_NOTE(CONSTANTCONDITION)				\
725 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
726 		    ("not power of 2 aligned"));			\
727 									\
728 		SFXGE_BAR_LOCK(_esbp);					\
729 									\
730 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
731 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
732 		    (_offset));						\
733 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
734 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
735 		    (_offset) + 4);					\
736 									\
737 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
738 		    uint32_t, (_eqp)->eq_u32[1],			\
739 		    uint32_t, (_eqp)->eq_u32[0]);			\
740 									\
741 		SFXGE_BAR_UNLOCK(_esbp);				\
742 	_NOTE(CONSTANTCONDITION)					\
743 	} while (B_FALSE)
744 
745 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
746 	do {								\
747 		_NOTE(CONSTANTCONDITION)				\
748 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
749 		    ("not power of 2 aligned"));			\
750 									\
751 		_NOTE(CONSTANTCONDITION)				\
752 		if (_lock)						\
753 			SFXGE_BAR_LOCK(_esbp);				\
754 									\
755 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
756 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
757 		    (_offset));						\
758 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
759 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
760 		    (_offset) + 4);					\
761 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
762 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
763 		    (_offset) + 8);					\
764 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
765 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
766 		    (_offset) + 12);					\
767 									\
768 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
769 		    uint32_t, (_eop)->eo_u32[3],			\
770 		    uint32_t, (_eop)->eo_u32[2],			\
771 		    uint32_t, (_eop)->eo_u32[1],			\
772 		    uint32_t, (_eop)->eo_u32[0]);			\
773 									\
774 		_NOTE(CONSTANTCONDITION)				\
775 		if (_lock)						\
776 			SFXGE_BAR_UNLOCK(_esbp);			\
777 	_NOTE(CONSTANTCONDITION)					\
778 	} while (B_FALSE)
779 #endif
780 
781 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
782 	do {								\
783 		_NOTE(CONSTANTCONDITION)				\
784 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
785 		    ("not power of 2 aligned"));			\
786 									\
787 		_NOTE(CONSTANTCONDITION)				\
788 		if (_lock)						\
789 			SFXGE_BAR_LOCK(_esbp);				\
790 									\
791 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
792 		    uint32_t, (_edp)->ed_u32[0]);			\
793 									\
794 		/*							\
795 		 * Make sure that previous writes to the dword have	\
796 		 * been done. It should be cheaper than barrier just	\
797 		 * after the write below.				\
798 		 */							\
799 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
800 		    (_offset), sizeof (efx_dword_t),			\
801 		    BUS_SPACE_BARRIER_WRITE);				\
802 		bus_space_write_stream_4((_esbp)->esb_tag,		\
803 		    (_esbp)->esb_handle,				\
804 		    (_offset), (_edp)->ed_u32[0]);			\
805 									\
806 		_NOTE(CONSTANTCONDITION)				\
807 		if (_lock)						\
808 			SFXGE_BAR_UNLOCK(_esbp);			\
809 	_NOTE(CONSTANTCONDITION)					\
810 	} while (B_FALSE)
811 
812 #if defined(SFXGE_USE_BUS_SPACE_8)
813 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
814 	do {								\
815 		_NOTE(CONSTANTCONDITION)				\
816 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
817 		    ("not power of 2 aligned"));			\
818 									\
819 		SFXGE_BAR_LOCK(_esbp);					\
820 									\
821 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
822 		    uint32_t, (_eqp)->eq_u32[1],			\
823 		    uint32_t, (_eqp)->eq_u32[0]);			\
824 									\
825 		/*							\
826 		 * Make sure that previous writes to the qword have	\
827 		 * been done. It should be cheaper than barrier just	\
828 		 * after the write below.				\
829 		 */							\
830 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
831 		    (_offset), sizeof (efx_qword_t),			\
832 		    BUS_SPACE_BARRIER_WRITE);				\
833 		bus_space_write_stream_8((_esbp)->esb_tag,		\
834 		    (_esbp)->esb_handle,				\
835 		    (_offset), (_eqp)->eq_u64[0]);			\
836 									\
837 		SFXGE_BAR_UNLOCK(_esbp);				\
838 	_NOTE(CONSTANTCONDITION)					\
839 	} while (B_FALSE)
840 #else
841 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
842 	do {								\
843 		_NOTE(CONSTANTCONDITION)				\
844 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
845 		    ("not power of 2 aligned"));			\
846 									\
847 		SFXGE_BAR_LOCK(_esbp);					\
848 									\
849 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
850 		    uint32_t, (_eqp)->eq_u32[1],			\
851 		    uint32_t, (_eqp)->eq_u32[0]);			\
852 									\
853 		/*							\
854 		 * Make sure that previous writes to the qword have	\
855 		 * been done. It should be cheaper than barrier just	\
856 		 * after the last write below.				\
857 		 */							\
858 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
859 		    (_offset), sizeof (efx_qword_t),			\
860 		    BUS_SPACE_BARRIER_WRITE);				\
861 		bus_space_write_stream_4((_esbp)->esb_tag,		\
862 		    (_esbp)->esb_handle,				\
863 		    (_offset), (_eqp)->eq_u32[0]);			\
864 		/*							\
865 		 * It should be guaranteed that the last dword comes	\
866 		 * the last, so barrier entire qword to be sure that	\
867 		 * neither above nor below writes are reordered.	\
868 		 */							\
869 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
870 		    (_offset), sizeof (efx_qword_t),			\
871 		    BUS_SPACE_BARRIER_WRITE);				\
872 		bus_space_write_stream_4((_esbp)->esb_tag,		\
873 		    (_esbp)->esb_handle,				\
874 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
875 									\
876 		SFXGE_BAR_UNLOCK(_esbp);				\
877 	_NOTE(CONSTANTCONDITION)					\
878 	} while (B_FALSE)
879 #endif
880 
881 /*
882  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
883  * (required by PIO hardware)
884  */
885 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
886 	do {								\
887 		_NOTE(CONSTANTCONDITION)				\
888 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
889 		    ("not power of 2 aligned"));			\
890 									\
891 		(void) (_esbp);						\
892 									\
893 		/* FIXME: Perform a 64-bit write */			\
894 		KASSERT(0, ("not implemented"));			\
895 									\
896 	_NOTE(CONSTANTCONDITION)					\
897 	} while (B_FALSE)
898 
899 #if defined(SFXGE_USE_BUS_SPACE_8)
900 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
901 	do {								\
902 		_NOTE(CONSTANTCONDITION)				\
903 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
904 		    ("not power of 2 aligned"));			\
905 									\
906 		_NOTE(CONSTANTCONDITION)				\
907 		if (_lock)						\
908 			SFXGE_BAR_LOCK(_esbp);				\
909 									\
910 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
911 		    uint32_t, (_eop)->eo_u32[3],			\
912 		    uint32_t, (_eop)->eo_u32[2],			\
913 		    uint32_t, (_eop)->eo_u32[1],			\
914 		    uint32_t, (_eop)->eo_u32[0]);			\
915 									\
916 		/*							\
917 		 * Make sure that previous writes to the oword have	\
918 		 * been done. It should be cheaper than barrier just	\
919 		 * after the last write below.				\
920 		 */							\
921 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
922 		    (_offset), sizeof (efx_oword_t),			\
923 		    BUS_SPACE_BARRIER_WRITE);				\
924 		bus_space_write_stream_8((_esbp)->esb_tag,		\
925 		    (_esbp)->esb_handle,				\
926 		    (_offset), (_eop)->eo_u64[0]);			\
927 		/*							\
928 		 * It should be guaranteed that the last qword comes	\
929 		 * the last, so barrier entire oword to be sure that	\
930 		 * neither above nor below writes are reordered.	\
931 		 */							\
932 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
933 		    (_offset), sizeof (efx_oword_t),			\
934 		    BUS_SPACE_BARRIER_WRITE);				\
935 		bus_space_write_stream_8((_esbp)->esb_tag,		\
936 		    (_esbp)->esb_handle,				\
937 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
938 									\
939 		_NOTE(CONSTANTCONDITION)				\
940 		if (_lock)						\
941 			SFXGE_BAR_UNLOCK(_esbp);			\
942 	_NOTE(CONSTANTCONDITION)					\
943 	} while (B_FALSE)
944 
945 #else
946 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
947 	do {								\
948 		_NOTE(CONSTANTCONDITION)				\
949 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
950 		    ("not power of 2 aligned"));			\
951 									\
952 		_NOTE(CONSTANTCONDITION)				\
953 		if (_lock)						\
954 			SFXGE_BAR_LOCK(_esbp);				\
955 									\
956 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
957 		    uint32_t, (_eop)->eo_u32[3],			\
958 		    uint32_t, (_eop)->eo_u32[2],			\
959 		    uint32_t, (_eop)->eo_u32[1],			\
960 		    uint32_t, (_eop)->eo_u32[0]);			\
961 									\
962 		/*							\
963 		 * Make sure that previous writes to the oword have	\
964 		 * been done. It should be cheaper than barrier just	\
965 		 * after the last write below.				\
966 		 */							\
967 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
968 		    (_offset), sizeof (efx_oword_t),			\
969 		    BUS_SPACE_BARRIER_WRITE);				\
970 		bus_space_write_stream_4((_esbp)->esb_tag,		\
971 		    (_esbp)->esb_handle,				\
972 		    (_offset), (_eop)->eo_u32[0]);			\
973 		bus_space_write_stream_4((_esbp)->esb_tag,		\
974 		    (_esbp)->esb_handle,				\
975 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
976 		bus_space_write_stream_4((_esbp)->esb_tag,		\
977 		    (_esbp)->esb_handle,				\
978 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
979 		/*							\
980 		 * It should be guaranteed that the last dword comes	\
981 		 * the last, so barrier entire oword to be sure that	\
982 		 * neither above nor below writes are reordered.	\
983 		 */							\
984 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
985 		    (_offset), sizeof (efx_oword_t),			\
986 		    BUS_SPACE_BARRIER_WRITE);				\
987 		bus_space_write_stream_4((_esbp)->esb_tag,		\
988 		    (_esbp)->esb_handle,				\
989 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
990 									\
991 		_NOTE(CONSTANTCONDITION)				\
992 		if (_lock)						\
993 			SFXGE_BAR_UNLOCK(_esbp);			\
994 	_NOTE(CONSTANTCONDITION)					\
995 	} while (B_FALSE)
996 #endif
997 
998 /* Use the standard octo-word write for doorbell writes */
999 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1000 	do {								\
1001 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1002 	_NOTE(CONSTANTCONDITION)					\
1003 	} while (B_FALSE)
1004 
1005 /* SPIN */
1006 
1007 #define	EFSYS_SPIN(_us)							\
1008 	do {								\
1009 		DELAY(_us);						\
1010 	_NOTE(CONSTANTCONDITION)					\
1011 	} while (B_FALSE)
1012 
1013 #define	EFSYS_SLEEP	EFSYS_SPIN
1014 
1015 /* BARRIERS */
1016 
1017 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1018 #define	EFSYS_PIO_WRITE_BARRIER()
1019 
1020 /* DMA SYNC */
1021 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1022 	do {								\
1023 		bus_dmamap_sync((_esmp)->esm_tag,			\
1024 		    (_esmp)->esm_map,					\
1025 		    BUS_DMASYNC_POSTREAD);				\
1026 	_NOTE(CONSTANTCONDITION)					\
1027 	} while (B_FALSE)
1028 
1029 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1030 	do {								\
1031 		bus_dmamap_sync((_esmp)->esm_tag,			\
1032 		    (_esmp)->esm_map,					\
1033 		    BUS_DMASYNC_PREWRITE);				\
1034 	_NOTE(CONSTANTCONDITION)					\
1035 	} while (B_FALSE)
1036 
1037 /* TIMESTAMP */
1038 
1039 typedef	clock_t	efsys_timestamp_t;
1040 
1041 #define	EFSYS_TIMESTAMP(_usp)						\
1042 	do {								\
1043 		clock_t now;						\
1044 									\
1045 		now = ticks;						\
1046 		*(_usp) = now * hz / 1000000;				\
1047 	_NOTE(CONSTANTCONDITION)					\
1048 	} while (B_FALSE)
1049 
1050 /* KMEM */
1051 
1052 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1053 	do {								\
1054 		(_esip) = (_esip);					\
1055 		/*							\
1056 		 * The macro is used in non-sleepable contexts, for	\
1057 		 * example, holding a mutex.				\
1058 		 */							\
1059 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1060 	_NOTE(CONSTANTCONDITION)					\
1061 	} while (B_FALSE)
1062 
1063 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1064 	do {								\
1065 		(void) (_esip);						\
1066 		(void) (_size);						\
1067 		free((_p), M_SFXGE);					\
1068 	_NOTE(CONSTANTCONDITION)					\
1069 	} while (B_FALSE)
1070 
1071 /* LOCK */
1072 
1073 typedef struct efsys_lock_s {
1074 	struct mtx	lock;
1075 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1076 } efsys_lock_t;
1077 
1078 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1079 	do {								\
1080 		efsys_lock_t *__eslp = (_eslp);				\
1081 									\
1082 		snprintf((__eslp)->lock_name,				\
1083 			 sizeof((__eslp)->lock_name),			\
1084 			 "%s:%s", (_ifname), (_label));			\
1085 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1086 			 NULL, MTX_DEF);				\
1087 	} while (B_FALSE)
1088 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1089 	mtx_destroy(&(_eslp)->lock)
1090 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1091 	mtx_lock(&(_eslp)->lock)
1092 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1093 	mtx_unlock(&(_eslp)->lock)
1094 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1095 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1096 
1097 typedef int efsys_lock_state_t;
1098 
1099 #define	EFSYS_LOCK_MAGIC	0x000010c4
1100 
1101 #define	EFSYS_LOCK(_lockp, _state)					\
1102 	do {								\
1103 		SFXGE_EFSYS_LOCK(_lockp);				\
1104 		(_state) = EFSYS_LOCK_MAGIC;				\
1105 	_NOTE(CONSTANTCONDITION)					\
1106 	} while (B_FALSE)
1107 
1108 #define	EFSYS_UNLOCK(_lockp, _state)					\
1109 	do {								\
1110 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1111 			KASSERT(B_FALSE, ("not locked"));		\
1112 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1113 	_NOTE(CONSTANTCONDITION)					\
1114 	} while (B_FALSE)
1115 
1116 /* STAT */
1117 
1118 typedef uint64_t		efsys_stat_t;
1119 
1120 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1121 	do {								\
1122 		*(_knp) += (_delta);					\
1123 	_NOTE(CONSTANTCONDITION)					\
1124 	} while (B_FALSE)
1125 
1126 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1127 	do {								\
1128 		*(_knp) -= (_delta);					\
1129 	_NOTE(CONSTANTCONDITION)					\
1130 	} while (B_FALSE)
1131 
1132 #define	EFSYS_STAT_SET(_knp, _val)					\
1133 	do {								\
1134 		*(_knp) = (_val);					\
1135 	_NOTE(CONSTANTCONDITION)					\
1136 	} while (B_FALSE)
1137 
1138 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1139 	do {								\
1140 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1141 	_NOTE(CONSTANTCONDITION)					\
1142 	} while (B_FALSE)
1143 
1144 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1145 	do {								\
1146 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1147 	_NOTE(CONSTANTCONDITION)					\
1148 	} while (B_FALSE)
1149 
1150 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1151 	do {								\
1152 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1153 	_NOTE(CONSTANTCONDITION)					\
1154 	} while (B_FALSE)
1155 
1156 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1157 	do {								\
1158 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1159 	_NOTE(CONSTANTCONDITION)					\
1160 	} while (B_FALSE)
1161 
1162 /* ERR */
1163 
1164 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1165 		    uint32_t, uint32_t);
1166 
1167 #if EFSYS_OPT_DECODE_INTR_FATAL
1168 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1169 	do {								\
1170 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1171 	_NOTE(CONSTANTCONDITION)					\
1172 	} while (B_FALSE)
1173 #endif
1174 
1175 /* ASSERT */
1176 
1177 #define	EFSYS_ASSERT(_exp) do {						\
1178 	if (!(_exp))							\
1179 		panic("%s", #_exp);					\
1180 	} while (0)
1181 
1182 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1183 	const _t __x = (_t)(_x);					\
1184 	const _t __y = (_t)(_y);					\
1185 	if (!(__x _op __y))						\
1186 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1187 	} while(0)
1188 
1189 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1190 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1191 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1192 
1193 /* ROTATE */
1194 
1195 #define	EFSYS_HAS_ROTL_DWORD 0
1196 
1197 #ifdef	__cplusplus
1198 }
1199 #endif
1200 
1201 #endif	/* _SYS_EFSYS_H */
1202