xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision 18849b5da0c5eaa88500b457be05b038813b51b1)
1 /*-
2  * Copyright (c) 2010-2015 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * The views and conclusions contained in the software and documentation are
30  * those of the authors and should not be interpreted as representing official
31  * policies, either expressed or implied, of the FreeBSD Project.
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef	_SYS_EFSYS_H
37 #define	_SYS_EFSYS_H
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/systm.h>
53 
54 #include <machine/bus.h>
55 #include <machine/endian.h>
56 
57 #define	EFSYS_HAS_UINT64 1
58 #if defined(__x86_64__)
59 #define	EFSYS_USE_UINT64 1
60 #else
61 #define	EFSYS_USE_UINT64 0
62 #endif
63 #define	EFSYS_HAS_SSE2_M128 0
64 #if _BYTE_ORDER == _BIG_ENDIAN
65 #define	EFSYS_IS_BIG_ENDIAN 1
66 #define	EFSYS_IS_LITTLE_ENDIAN 0
67 #elif _BYTE_ORDER == _LITTLE_ENDIAN
68 #define	EFSYS_IS_BIG_ENDIAN 0
69 #define	EFSYS_IS_LITTLE_ENDIAN 1
70 #endif
71 #include "efx_types.h"
72 
73 /* Common code requires this */
74 #if __FreeBSD_version < 800068
75 #define	memmove(d, s, l) bcopy(s, d, l)
76 #endif
77 
78 /* FreeBSD equivalents of Solaris things */
79 #ifndef _NOTE
80 #define	_NOTE(s)
81 #endif
82 
83 #ifndef B_FALSE
84 #define	B_FALSE	FALSE
85 #endif
86 #ifndef B_TRUE
87 #define	B_TRUE	TRUE
88 #endif
89 
90 #ifndef IS_P2ALIGNED
91 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
92 #endif
93 
94 #ifndef P2ROUNDUP
95 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
96 #endif
97 
98 #ifndef P2ALIGN
99 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
100 #endif
101 
102 #ifndef IS2P
103 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
104 #endif
105 
106 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
107 
108 #define	SFXGE_USE_BUS_SPACE_8		1
109 
110 #if !defined(bus_space_read_stream_8)
111 
112 #define	bus_space_read_stream_8(t, h, o)				\
113 	bus_space_read_8((t), (h), (o))
114 
115 #define	bus_space_write_stream_8(t, h, o, v)				\
116 	bus_space_write_8((t), (h), (o), (v))
117 
118 #endif
119 
120 #endif
121 
122 #define	ENOTACTIVE EINVAL
123 
124 /* Memory type to use on FreeBSD */
125 MALLOC_DECLARE(M_SFXGE);
126 
127 /* Machine dependend prefetch wrappers */
128 #if defined(__i386__) || defined(__amd64__)
129 static __inline void
130 prefetch_read_many(void *addr)
131 {
132 
133 	__asm__(
134 	    "prefetcht0 (%0)"
135 	    :
136 	    : "r" (addr));
137 }
138 
139 static __inline void
140 prefetch_read_once(void *addr)
141 {
142 
143 	__asm__(
144 	    "prefetchnta (%0)"
145 	    :
146 	    : "r" (addr));
147 }
148 #elif defined(__sparc64__)
149 static __inline void
150 prefetch_read_many(void *addr)
151 {
152 
153 	__asm__(
154 	    "prefetch [%0], 0"
155 	    :
156 	    : "r" (addr));
157 }
158 
159 static __inline void
160 prefetch_read_once(void *addr)
161 {
162 
163 	__asm__(
164 	    "prefetch [%0], 1"
165 	    :
166 	    : "r" (addr));
167 }
168 #else
169 static __inline void
170 prefetch_read_many(void *addr)
171 {
172 
173 }
174 
175 static __inline void
176 prefetch_read_once(void *addr)
177 {
178 
179 }
180 #endif
181 
182 #if defined(__i386__) || defined(__amd64__)
183 #include <vm/vm.h>
184 #include <vm/pmap.h>
185 #endif
186 static __inline void
187 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
188 		    struct mbuf *m, bus_dma_segment_t *seg)
189 {
190 #if defined(__i386__) || defined(__amd64__)
191 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
192 	seg->ds_len = m->m_len;
193 #else
194 	int nsegstmp;
195 
196 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
197 #endif
198 }
199 
200 /* Modifiers used for Windows builds */
201 #define	__in
202 #define	__in_opt
203 #define	__in_ecount(_n)
204 #define	__in_ecount_opt(_n)
205 #define	__in_bcount(_n)
206 #define	__in_bcount_opt(_n)
207 
208 #define	__out
209 #define	__out_opt
210 #define	__out_ecount(_n)
211 #define	__out_ecount_opt(_n)
212 #define	__out_bcount(_n)
213 #define	__out_bcount_opt(_n)
214 #define	__out_bcount_part(_n, _l)
215 #define	__out_bcount_part_opt(_n, _l)
216 
217 #define	__deref_out
218 
219 #define	__inout
220 #define	__inout_opt
221 #define	__inout_ecount(_n)
222 #define	__inout_ecount_opt(_n)
223 #define	__inout_bcount(_n)
224 #define	__inout_bcount_opt(_n)
225 #define	__inout_bcount_full_opt(_n)
226 
227 #define	__deref_out_bcount_opt(n)
228 
229 #define	__checkReturn
230 #define	__success(_x)
231 
232 #define	__drv_when(_p, _c)
233 
234 /* Code inclusion options */
235 
236 
237 #define	EFSYS_OPT_NAMES 1
238 
239 #define	EFSYS_OPT_SIENA 1
240 #define	EFSYS_OPT_HUNTINGTON 1
241 #define	EFSYS_OPT_MEDFORD 0
242 #ifdef DEBUG
243 #define	EFSYS_OPT_CHECK_REG 1
244 #else
245 #define	EFSYS_OPT_CHECK_REG 0
246 #endif
247 
248 #define	EFSYS_OPT_MCDI 1
249 #define	EFSYS_OPT_MCDI_LOGGING 0
250 #define	EFSYS_OPT_MCDI_PROXY_AUTH 0
251 
252 #define	EFSYS_OPT_MAC_STATS 1
253 
254 #define	EFSYS_OPT_LOOPBACK 0
255 
256 #define	EFSYS_OPT_MON_MCDI 0
257 #define	EFSYS_OPT_MON_STATS 0
258 
259 #define	EFSYS_OPT_PHY_STATS 1
260 #define	EFSYS_OPT_BIST 1
261 #define	EFSYS_OPT_PHY_LED_CONTROL 1
262 #define	EFSYS_OPT_PHY_FLAGS 0
263 
264 #define	EFSYS_OPT_VPD 1
265 #define	EFSYS_OPT_NVRAM 1
266 #define	EFSYS_OPT_BOOTCFG 0
267 
268 #define	EFSYS_OPT_DIAG 0
269 #define	EFSYS_OPT_WOL 1
270 #define	EFSYS_OPT_RX_SCALE 1
271 #define	EFSYS_OPT_QSTATS 1
272 #define	EFSYS_OPT_FILTER 1
273 #define	EFSYS_OPT_RX_SCATTER 0
274 
275 #define	EFSYS_OPT_EV_PREFETCH 0
276 
277 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
278 
279 #define	EFSYS_OPT_LICENSING 0
280 
281 /* ID */
282 
283 typedef struct __efsys_identifier_s	efsys_identifier_t;
284 
285 /* PROBE */
286 
287 #ifndef DTRACE_PROBE
288 
289 #define	EFSYS_PROBE(_name)
290 
291 #define	EFSYS_PROBE1(_name, _type1, _arg1)
292 
293 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
294 
295 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
296 	    _type3, _arg3)
297 
298 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
299 	    _type3, _arg3, _type4, _arg4)
300 
301 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
302 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
303 
304 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
305 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
306 	    _type6, _arg6)
307 
308 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
309 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
310 	    _type6, _arg6, _type7, _arg7)
311 
312 #else /* DTRACE_PROBE */
313 
314 #define	EFSYS_PROBE(_name)						\
315 	DTRACE_PROBE(_name)
316 
317 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
318 	DTRACE_PROBE1(_name, _type1, _arg1)
319 
320 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
321 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
322 
323 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
324 	    _type3, _arg3)						\
325 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
326 	    _type3, _arg3)
327 
328 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
329 	    _type3, _arg3, _type4, _arg4)				\
330 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
331 	    _type3, _arg3, _type4, _arg4)
332 
333 #ifdef DTRACE_PROBE5
334 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
335 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
336 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
337 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
338 #else
339 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
340 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
341 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
342 	    _type3, _arg3, _type4, _arg4)
343 #endif
344 
345 #ifdef DTRACE_PROBE6
346 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
347 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
348 	    _type6, _arg6)						\
349 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
350 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
351 	    _type6, _arg6)
352 #else
353 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
354 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
355 	    _type6, _arg6)						\
356 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
357 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
358 #endif
359 
360 #ifdef DTRACE_PROBE7
361 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
362 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
363 	    _type6, _arg6, _type7, _arg7)				\
364 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
365 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
366 	    _type6, _arg6, _type7, _arg7)
367 #else
368 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
369 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
370 	    _type6, _arg6, _type7, _arg7)				\
371 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
372 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
373 	    _type6, _arg6)
374 #endif
375 
376 #endif /* DTRACE_PROBE */
377 
378 /* DMA */
379 
380 typedef uint64_t		efsys_dma_addr_t;
381 
382 typedef struct efsys_mem_s {
383 	bus_dma_tag_t		esm_tag;
384 	bus_dmamap_t		esm_map;
385 	caddr_t			esm_base;
386 	efsys_dma_addr_t	esm_addr;
387 } efsys_mem_t;
388 
389 
390 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
391 	do {								\
392 		(void) memset((_esmp)->esm_base, 0, (_size));		\
393 									\
394 	_NOTE(CONSTANTCONDITION)					\
395 	} while (B_FALSE)
396 
397 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
398 	do {								\
399 		uint32_t *addr;						\
400 									\
401 		_NOTE(CONSTANTCONDITION)				\
402 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
403 		    ("not power of 2 aligned"));			\
404 									\
405 		addr = (void *)((_esmp)->esm_base + (_offset));		\
406 									\
407 		(_edp)->ed_u32[0] = *addr;				\
408 									\
409 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
410 		    uint32_t, (_edp)->ed_u32[0]);			\
411 									\
412 	_NOTE(CONSTANTCONDITION)					\
413 	} while (B_FALSE)
414 
415 #if defined(__x86_64__)
416 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
417 	do {								\
418 		uint64_t *addr;						\
419 									\
420 		_NOTE(CONSTANTCONDITION)				\
421 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
422 		    ("not power of 2 aligned"));			\
423 									\
424 		addr = (void *)((_esmp)->esm_base + (_offset));		\
425 									\
426 		(_eqp)->eq_u64[0] = *addr;				\
427 									\
428 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
429 		    uint32_t, (_eqp)->eq_u32[1],			\
430 		    uint32_t, (_eqp)->eq_u32[0]);			\
431 									\
432 	_NOTE(CONSTANTCONDITION)					\
433 	} while (B_FALSE)
434 #else
435 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
436 	do {								\
437 		uint32_t *addr;						\
438 									\
439 		_NOTE(CONSTANTCONDITION)				\
440 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
441 		    ("not power of 2 aligned"));			\
442 									\
443 		addr = (void *)((_esmp)->esm_base + (_offset));		\
444 									\
445 		(_eqp)->eq_u32[0] = *addr++;				\
446 		(_eqp)->eq_u32[1] = *addr;				\
447 									\
448 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
449 		    uint32_t, (_eqp)->eq_u32[1],			\
450 		    uint32_t, (_eqp)->eq_u32[0]);			\
451 									\
452 	_NOTE(CONSTANTCONDITION)					\
453 	} while (B_FALSE)
454 #endif
455 
456 #if defined(__x86_64__)
457 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
458 	do {								\
459 		uint64_t *addr;						\
460 									\
461 		_NOTE(CONSTANTCONDITION)				\
462 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
463 		    ("not power of 2 aligned"));			\
464 									\
465 		addr = (void *)((_esmp)->esm_base + (_offset));		\
466 									\
467 		(_eop)->eo_u64[0] = *addr++;				\
468 		(_eop)->eo_u64[1] = *addr;				\
469 									\
470 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
471 		    uint32_t, (_eop)->eo_u32[3],			\
472 		    uint32_t, (_eop)->eo_u32[2],			\
473 		    uint32_t, (_eop)->eo_u32[1],			\
474 		    uint32_t, (_eop)->eo_u32[0]);			\
475 									\
476 	_NOTE(CONSTANTCONDITION)					\
477 	} while (B_FALSE)
478 #else
479 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
480 	do {								\
481 		uint32_t *addr;						\
482 									\
483 		_NOTE(CONSTANTCONDITION)				\
484 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
485 		    ("not power of 2 aligned"));			\
486 									\
487 		addr = (void *)((_esmp)->esm_base + (_offset));		\
488 									\
489 		(_eop)->eo_u32[0] = *addr++;				\
490 		(_eop)->eo_u32[1] = *addr++;				\
491 		(_eop)->eo_u32[2] = *addr++;				\
492 		(_eop)->eo_u32[3] = *addr;				\
493 									\
494 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
495 		    uint32_t, (_eop)->eo_u32[3],			\
496 		    uint32_t, (_eop)->eo_u32[2],			\
497 		    uint32_t, (_eop)->eo_u32[1],			\
498 		    uint32_t, (_eop)->eo_u32[0]);			\
499 									\
500 	_NOTE(CONSTANTCONDITION)					\
501 	} while (B_FALSE)
502 #endif
503 
504 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
505 	do {								\
506 		uint32_t *addr;						\
507 									\
508 		_NOTE(CONSTANTCONDITION)				\
509 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
510 		    ("not power of 2 aligned"));			\
511 									\
512 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
513 		    uint32_t, (_edp)->ed_u32[0]);			\
514 									\
515 		addr = (void *)((_esmp)->esm_base + (_offset));		\
516 									\
517 		*addr = (_edp)->ed_u32[0];				\
518 									\
519 	_NOTE(CONSTANTCONDITION)					\
520 	} while (B_FALSE)
521 
522 #if defined(__x86_64__)
523 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
524 	do {								\
525 		uint64_t *addr;						\
526 									\
527 		_NOTE(CONSTANTCONDITION)				\
528 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
529 		    ("not power of 2 aligned"));			\
530 									\
531 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
532 		    uint32_t, (_eqp)->eq_u32[1],			\
533 		    uint32_t, (_eqp)->eq_u32[0]);			\
534 									\
535 		addr = (void *)((_esmp)->esm_base + (_offset));		\
536 									\
537 		*addr   = (_eqp)->eq_u64[0];				\
538 									\
539 	_NOTE(CONSTANTCONDITION)					\
540 	} while (B_FALSE)
541 
542 #else
543 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
544 	do {								\
545 		uint32_t *addr;						\
546 									\
547 		_NOTE(CONSTANTCONDITION)				\
548 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
549 		    ("not power of 2 aligned"));			\
550 									\
551 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
552 		    uint32_t, (_eqp)->eq_u32[1],			\
553 		    uint32_t, (_eqp)->eq_u32[0]);			\
554 									\
555 		addr = (void *)((_esmp)->esm_base + (_offset));		\
556 									\
557 		*addr++ = (_eqp)->eq_u32[0];				\
558 		*addr   = (_eqp)->eq_u32[1];				\
559 									\
560 	_NOTE(CONSTANTCONDITION)					\
561 	} while (B_FALSE)
562 #endif
563 
564 #if defined(__x86_64__)
565 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
566 	do {								\
567 		uint64_t *addr;						\
568 									\
569 		_NOTE(CONSTANTCONDITION)				\
570 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
571 		    ("not power of 2 aligned"));			\
572 									\
573 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
574 		    uint32_t, (_eop)->eo_u32[3],			\
575 		    uint32_t, (_eop)->eo_u32[2],			\
576 		    uint32_t, (_eop)->eo_u32[1],			\
577 		    uint32_t, (_eop)->eo_u32[0]);			\
578 									\
579 		addr = (void *)((_esmp)->esm_base + (_offset));		\
580 									\
581 		*addr++ = (_eop)->eo_u64[0];				\
582 		*addr   = (_eop)->eo_u64[1];				\
583 									\
584 	_NOTE(CONSTANTCONDITION)					\
585 	} while (B_FALSE)
586 #else
587 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
588 	do {								\
589 		uint32_t *addr;						\
590 									\
591 		_NOTE(CONSTANTCONDITION)				\
592 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
593 		    ("not power of 2 aligned"));			\
594 									\
595 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
596 		    uint32_t, (_eop)->eo_u32[3],			\
597 		    uint32_t, (_eop)->eo_u32[2],			\
598 		    uint32_t, (_eop)->eo_u32[1],			\
599 		    uint32_t, (_eop)->eo_u32[0]);			\
600 									\
601 		addr = (void *)((_esmp)->esm_base + (_offset));		\
602 									\
603 		*addr++ = (_eop)->eo_u32[0];				\
604 		*addr++ = (_eop)->eo_u32[1];				\
605 		*addr++ = (_eop)->eo_u32[2];				\
606 		*addr   = (_eop)->eo_u32[3];				\
607 									\
608 	_NOTE(CONSTANTCONDITION)					\
609 	} while (B_FALSE)
610 #endif
611 
612 #define	EFSYS_MEM_ADDR(_esmp)						\
613 	((_esmp)->esm_addr)
614 
615 #define	EFSYS_MEM_IS_NULL(_esmp)					\
616 	((_esmp)->esm_base == NULL)
617 
618 /* BAR */
619 
620 #define	SFXGE_LOCK_NAME_MAX	16
621 
622 typedef struct efsys_bar_s {
623 	struct mtx		esb_lock;
624 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
625 	bus_space_tag_t		esb_tag;
626 	bus_space_handle_t	esb_handle;
627 	int			esb_rid;
628 	struct resource		*esb_res;
629 } efsys_bar_t;
630 
631 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
632 	do {								\
633 		snprintf((_esbp)->esb_lock_name,			\
634 			 sizeof((_esbp)->esb_lock_name),		\
635 			 "%s:bar", (_ifname));				\
636 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
637 			 NULL, MTX_DEF);				\
638 	_NOTE(CONSTANTCONDITION)					\
639 	} while (B_FALSE)
640 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
641 	mtx_destroy(&(_esbp)->esb_lock)
642 #define	SFXGE_BAR_LOCK(_esbp)						\
643 	mtx_lock(&(_esbp)->esb_lock)
644 #define	SFXGE_BAR_UNLOCK(_esbp)						\
645 	mtx_unlock(&(_esbp)->esb_lock)
646 
647 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
648 	do {								\
649 		_NOTE(CONSTANTCONDITION)				\
650 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
651 		    ("not power of 2 aligned"));			\
652 									\
653 		_NOTE(CONSTANTCONDITION)				\
654 		if (_lock)						\
655 			SFXGE_BAR_LOCK(_esbp);				\
656 									\
657 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
658 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
659 		    (_offset));						\
660 									\
661 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
662 		    uint32_t, (_edp)->ed_u32[0]);			\
663 									\
664 		_NOTE(CONSTANTCONDITION)				\
665 		if (_lock)						\
666 			SFXGE_BAR_UNLOCK(_esbp);			\
667 	_NOTE(CONSTANTCONDITION)					\
668 	} while (B_FALSE)
669 
670 #if defined(SFXGE_USE_BUS_SPACE_8)
671 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
672 	do {								\
673 		_NOTE(CONSTANTCONDITION)				\
674 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
675 		    ("not power of 2 aligned"));			\
676 									\
677 		SFXGE_BAR_LOCK(_esbp);					\
678 									\
679 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
680 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
681 		    (_offset));						\
682 									\
683 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
684 		    uint32_t, (_eqp)->eq_u32[1],			\
685 		    uint32_t, (_eqp)->eq_u32[0]);			\
686 									\
687 		SFXGE_BAR_UNLOCK(_esbp);				\
688 	_NOTE(CONSTANTCONDITION)					\
689 	} while (B_FALSE)
690 
691 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
692 	do {								\
693 		_NOTE(CONSTANTCONDITION)				\
694 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
695 		    ("not power of 2 aligned"));			\
696 									\
697 		_NOTE(CONSTANTCONDITION)				\
698 		if (_lock)						\
699 			SFXGE_BAR_LOCK(_esbp);				\
700 									\
701 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
702 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
703 		    (_offset));						\
704 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
705 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
706 		    (_offset) + 8);					\
707 									\
708 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
709 		    uint32_t, (_eop)->eo_u32[3],			\
710 		    uint32_t, (_eop)->eo_u32[2],			\
711 		    uint32_t, (_eop)->eo_u32[1],			\
712 		    uint32_t, (_eop)->eo_u32[0]);			\
713 									\
714 		_NOTE(CONSTANTCONDITION)				\
715 		if (_lock)						\
716 			SFXGE_BAR_UNLOCK(_esbp);			\
717 	_NOTE(CONSTANTCONDITION)					\
718 	} while (B_FALSE)
719 
720 #else
721 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
722 	do {								\
723 		_NOTE(CONSTANTCONDITION)				\
724 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
725 		    ("not power of 2 aligned"));			\
726 									\
727 		SFXGE_BAR_LOCK(_esbp);					\
728 									\
729 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
730 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
731 		    (_offset));						\
732 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
733 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
734 		    (_offset) + 4);					\
735 									\
736 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
737 		    uint32_t, (_eqp)->eq_u32[1],			\
738 		    uint32_t, (_eqp)->eq_u32[0]);			\
739 									\
740 		SFXGE_BAR_UNLOCK(_esbp);				\
741 	_NOTE(CONSTANTCONDITION)					\
742 	} while (B_FALSE)
743 
744 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
745 	do {								\
746 		_NOTE(CONSTANTCONDITION)				\
747 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
748 		    ("not power of 2 aligned"));			\
749 									\
750 		_NOTE(CONSTANTCONDITION)				\
751 		if (_lock)						\
752 			SFXGE_BAR_LOCK(_esbp);				\
753 									\
754 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
755 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
756 		    (_offset));						\
757 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
758 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
759 		    (_offset) + 4);					\
760 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
761 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
762 		    (_offset) + 8);					\
763 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
764 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
765 		    (_offset) + 12);					\
766 									\
767 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
768 		    uint32_t, (_eop)->eo_u32[3],			\
769 		    uint32_t, (_eop)->eo_u32[2],			\
770 		    uint32_t, (_eop)->eo_u32[1],			\
771 		    uint32_t, (_eop)->eo_u32[0]);			\
772 									\
773 		_NOTE(CONSTANTCONDITION)				\
774 		if (_lock)						\
775 			SFXGE_BAR_UNLOCK(_esbp);			\
776 	_NOTE(CONSTANTCONDITION)					\
777 	} while (B_FALSE)
778 #endif
779 
780 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
781 	do {								\
782 		_NOTE(CONSTANTCONDITION)				\
783 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
784 		    ("not power of 2 aligned"));			\
785 									\
786 		_NOTE(CONSTANTCONDITION)				\
787 		if (_lock)						\
788 			SFXGE_BAR_LOCK(_esbp);				\
789 									\
790 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
791 		    uint32_t, (_edp)->ed_u32[0]);			\
792 									\
793 		/*							\
794 		 * Make sure that previous writes to the dword have	\
795 		 * been done. It should be cheaper than barrier just	\
796 		 * after the write below.				\
797 		 */							\
798 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
799 		    (_offset), sizeof (efx_dword_t),			\
800 		    BUS_SPACE_BARRIER_WRITE);				\
801 		bus_space_write_stream_4((_esbp)->esb_tag,		\
802 		    (_esbp)->esb_handle,				\
803 		    (_offset), (_edp)->ed_u32[0]);			\
804 									\
805 		_NOTE(CONSTANTCONDITION)				\
806 		if (_lock)						\
807 			SFXGE_BAR_UNLOCK(_esbp);			\
808 	_NOTE(CONSTANTCONDITION)					\
809 	} while (B_FALSE)
810 
811 #if defined(SFXGE_USE_BUS_SPACE_8)
812 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
813 	do {								\
814 		_NOTE(CONSTANTCONDITION)				\
815 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
816 		    ("not power of 2 aligned"));			\
817 									\
818 		SFXGE_BAR_LOCK(_esbp);					\
819 									\
820 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
821 		    uint32_t, (_eqp)->eq_u32[1],			\
822 		    uint32_t, (_eqp)->eq_u32[0]);			\
823 									\
824 		/*							\
825 		 * Make sure that previous writes to the qword have	\
826 		 * been done. It should be cheaper than barrier just	\
827 		 * after the write below.				\
828 		 */							\
829 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
830 		    (_offset), sizeof (efx_qword_t),			\
831 		    BUS_SPACE_BARRIER_WRITE);				\
832 		bus_space_write_stream_8((_esbp)->esb_tag,		\
833 		    (_esbp)->esb_handle,				\
834 		    (_offset), (_eqp)->eq_u64[0]);			\
835 									\
836 		SFXGE_BAR_UNLOCK(_esbp);				\
837 	_NOTE(CONSTANTCONDITION)					\
838 	} while (B_FALSE)
839 #else
840 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
841 	do {								\
842 		_NOTE(CONSTANTCONDITION)				\
843 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
844 		    ("not power of 2 aligned"));			\
845 									\
846 		SFXGE_BAR_LOCK(_esbp);					\
847 									\
848 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
849 		    uint32_t, (_eqp)->eq_u32[1],			\
850 		    uint32_t, (_eqp)->eq_u32[0]);			\
851 									\
852 		/*							\
853 		 * Make sure that previous writes to the qword have	\
854 		 * been done. It should be cheaper than barrier just	\
855 		 * after the last write below.				\
856 		 */							\
857 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
858 		    (_offset), sizeof (efx_qword_t),			\
859 		    BUS_SPACE_BARRIER_WRITE);				\
860 		bus_space_write_stream_4((_esbp)->esb_tag,		\
861 		    (_esbp)->esb_handle,				\
862 		    (_offset), (_eqp)->eq_u32[0]);			\
863 		/*							\
864 		 * It should be guaranteed that the last dword comes	\
865 		 * the last, so barrier entire qword to be sure that	\
866 		 * neither above nor below writes are reordered.	\
867 		 */							\
868 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
869 		    (_offset), sizeof (efx_qword_t),			\
870 		    BUS_SPACE_BARRIER_WRITE);				\
871 		bus_space_write_stream_4((_esbp)->esb_tag,		\
872 		    (_esbp)->esb_handle,				\
873 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
874 									\
875 		SFXGE_BAR_UNLOCK(_esbp);				\
876 	_NOTE(CONSTANTCONDITION)					\
877 	} while (B_FALSE)
878 #endif
879 
880 /*
881  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
882  * (required by PIO hardware)
883  */
884 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
885 	do {								\
886 		_NOTE(CONSTANTCONDITION)				\
887 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
888 		    ("not power of 2 aligned"));			\
889 									\
890 		(void) (_esbp);						\
891 									\
892 		/* FIXME: Perform a 64-bit write */			\
893 		KASSERT(0, ("not implemented"));			\
894 									\
895 	_NOTE(CONSTANTCONDITION)					\
896 	} while (B_FALSE)
897 
898 #if defined(SFXGE_USE_BUS_SPACE_8)
899 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
900 	do {								\
901 		_NOTE(CONSTANTCONDITION)				\
902 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
903 		    ("not power of 2 aligned"));			\
904 									\
905 		_NOTE(CONSTANTCONDITION)				\
906 		if (_lock)						\
907 			SFXGE_BAR_LOCK(_esbp);				\
908 									\
909 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
910 		    uint32_t, (_eop)->eo_u32[3],			\
911 		    uint32_t, (_eop)->eo_u32[2],			\
912 		    uint32_t, (_eop)->eo_u32[1],			\
913 		    uint32_t, (_eop)->eo_u32[0]);			\
914 									\
915 		/*							\
916 		 * Make sure that previous writes to the oword have	\
917 		 * been done. It should be cheaper than barrier just	\
918 		 * after the last write below.				\
919 		 */							\
920 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
921 		    (_offset), sizeof (efx_oword_t),			\
922 		    BUS_SPACE_BARRIER_WRITE);				\
923 		bus_space_write_stream_8((_esbp)->esb_tag,		\
924 		    (_esbp)->esb_handle,				\
925 		    (_offset), (_eop)->eo_u64[0]);			\
926 		/*							\
927 		 * It should be guaranteed that the last qword comes	\
928 		 * the last, so barrier entire oword to be sure that	\
929 		 * neither above nor below writes are reordered.	\
930 		 */							\
931 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
932 		    (_offset), sizeof (efx_oword_t),			\
933 		    BUS_SPACE_BARRIER_WRITE);				\
934 		bus_space_write_stream_8((_esbp)->esb_tag,		\
935 		    (_esbp)->esb_handle,				\
936 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
937 									\
938 		_NOTE(CONSTANTCONDITION)				\
939 		if (_lock)						\
940 			SFXGE_BAR_UNLOCK(_esbp);			\
941 	_NOTE(CONSTANTCONDITION)					\
942 	} while (B_FALSE)
943 
944 #else
945 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
946 	do {								\
947 		_NOTE(CONSTANTCONDITION)				\
948 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
949 		    ("not power of 2 aligned"));			\
950 									\
951 		_NOTE(CONSTANTCONDITION)				\
952 		if (_lock)						\
953 			SFXGE_BAR_LOCK(_esbp);				\
954 									\
955 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
956 		    uint32_t, (_eop)->eo_u32[3],			\
957 		    uint32_t, (_eop)->eo_u32[2],			\
958 		    uint32_t, (_eop)->eo_u32[1],			\
959 		    uint32_t, (_eop)->eo_u32[0]);			\
960 									\
961 		/*							\
962 		 * Make sure that previous writes to the oword have	\
963 		 * been done. It should be cheaper than barrier just	\
964 		 * after the last write below.				\
965 		 */							\
966 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
967 		    (_offset), sizeof (efx_oword_t),			\
968 		    BUS_SPACE_BARRIER_WRITE);				\
969 		bus_space_write_stream_4((_esbp)->esb_tag,		\
970 		    (_esbp)->esb_handle,				\
971 		    (_offset), (_eop)->eo_u32[0]);			\
972 		bus_space_write_stream_4((_esbp)->esb_tag,		\
973 		    (_esbp)->esb_handle,				\
974 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
975 		bus_space_write_stream_4((_esbp)->esb_tag,		\
976 		    (_esbp)->esb_handle,				\
977 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
978 		/*							\
979 		 * It should be guaranteed that the last dword comes	\
980 		 * the last, so barrier entire oword to be sure that	\
981 		 * neither above nor below writes are reordered.	\
982 		 */							\
983 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
984 		    (_offset), sizeof (efx_oword_t),			\
985 		    BUS_SPACE_BARRIER_WRITE);				\
986 		bus_space_write_stream_4((_esbp)->esb_tag,		\
987 		    (_esbp)->esb_handle,				\
988 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
989 									\
990 		_NOTE(CONSTANTCONDITION)				\
991 		if (_lock)						\
992 			SFXGE_BAR_UNLOCK(_esbp);			\
993 	_NOTE(CONSTANTCONDITION)					\
994 	} while (B_FALSE)
995 #endif
996 
997 /* Use the standard octo-word write for doorbell writes */
998 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
999 	do {								\
1000 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1001 	_NOTE(CONSTANTCONDITION)					\
1002 	} while (B_FALSE)
1003 
1004 /* SPIN */
1005 
1006 #define	EFSYS_SPIN(_us)							\
1007 	do {								\
1008 		DELAY(_us);						\
1009 	_NOTE(CONSTANTCONDITION)					\
1010 	} while (B_FALSE)
1011 
1012 #define	EFSYS_SLEEP	EFSYS_SPIN
1013 
1014 /* BARRIERS */
1015 
1016 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1017 #define	EFSYS_PIO_WRITE_BARRIER()
1018 
1019 /* DMA SYNC */
1020 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1021 	do {								\
1022 		bus_dmamap_sync((_esmp)->esm_tag,			\
1023 		    (_esmp)->esm_map,					\
1024 		    BUS_DMASYNC_POSTREAD);				\
1025 	_NOTE(CONSTANTCONDITION)					\
1026 	} while (B_FALSE)
1027 
1028 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1029 	do {								\
1030 		bus_dmamap_sync((_esmp)->esm_tag,			\
1031 		    (_esmp)->esm_map,					\
1032 		    BUS_DMASYNC_PREWRITE);				\
1033 	_NOTE(CONSTANTCONDITION)					\
1034 	} while (B_FALSE)
1035 
1036 /* TIMESTAMP */
1037 
1038 typedef	clock_t	efsys_timestamp_t;
1039 
1040 #define	EFSYS_TIMESTAMP(_usp)						\
1041 	do {								\
1042 		clock_t now;						\
1043 									\
1044 		now = ticks;						\
1045 		*(_usp) = now * hz / 1000000;				\
1046 	_NOTE(CONSTANTCONDITION)					\
1047 	} while (B_FALSE)
1048 
1049 /* KMEM */
1050 
1051 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1052 	do {								\
1053 		(_esip) = (_esip);					\
1054 		/*							\
1055 		 * The macro is used in non-sleepable contexts, for	\
1056 		 * example, holding a mutex.				\
1057 		 */							\
1058 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1059 	_NOTE(CONSTANTCONDITION)					\
1060 	} while (B_FALSE)
1061 
1062 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1063 	do {								\
1064 		(void) (_esip);						\
1065 		(void) (_size);						\
1066 		free((_p), M_SFXGE);					\
1067 	_NOTE(CONSTANTCONDITION)					\
1068 	} while (B_FALSE)
1069 
1070 /* LOCK */
1071 
1072 typedef struct efsys_lock_s {
1073 	struct mtx	lock;
1074 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1075 } efsys_lock_t;
1076 
1077 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1078 	do {								\
1079 		efsys_lock_t *__eslp = (_eslp);				\
1080 									\
1081 		snprintf((__eslp)->lock_name,				\
1082 			 sizeof((__eslp)->lock_name),			\
1083 			 "%s:%s", (_ifname), (_label));			\
1084 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1085 			 NULL, MTX_DEF);				\
1086 	} while (B_FALSE)
1087 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1088 	mtx_destroy(&(_eslp)->lock)
1089 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1090 	mtx_lock(&(_eslp)->lock)
1091 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1092 	mtx_unlock(&(_eslp)->lock)
1093 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1094 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1095 
1096 #define	EFSYS_LOCK_MAGIC	0x000010c4
1097 
1098 #define	EFSYS_LOCK(_lockp, _state)					\
1099 	do {								\
1100 		SFXGE_EFSYS_LOCK(_lockp);				\
1101 		(_state) = EFSYS_LOCK_MAGIC;				\
1102 	_NOTE(CONSTANTCONDITION)					\
1103 	} while (B_FALSE)
1104 
1105 #define	EFSYS_UNLOCK(_lockp, _state)					\
1106 	do {								\
1107 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1108 			KASSERT(B_FALSE, ("not locked"));		\
1109 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1110 	_NOTE(CONSTANTCONDITION)					\
1111 	} while (B_FALSE)
1112 
1113 /* PREEMPT */
1114 
1115 #define	EFSYS_PREEMPT_DISABLE(_state)					\
1116 	do {								\
1117 		(_state) = (_state);					\
1118 		critical_enter();					\
1119 	_NOTE(CONSTANTCONDITION)					\
1120 	} while (B_FALSE)
1121 
1122 #define	EFSYS_PREEMPT_ENABLE(_state)					\
1123 	do {								\
1124 		(_state) = (_state);					\
1125 		critical_exit(_state);					\
1126 	_NOTE(CONSTANTCONDITION)					\
1127 	} while (B_FALSE)
1128 
1129 /* STAT */
1130 
1131 typedef uint64_t		efsys_stat_t;
1132 
1133 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1134 	do {								\
1135 		*(_knp) += (_delta);					\
1136 	_NOTE(CONSTANTCONDITION)					\
1137 	} while (B_FALSE)
1138 
1139 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1140 	do {								\
1141 		*(_knp) -= (_delta);					\
1142 	_NOTE(CONSTANTCONDITION)					\
1143 	} while (B_FALSE)
1144 
1145 #define	EFSYS_STAT_SET(_knp, _val)					\
1146 	do {								\
1147 		*(_knp) = (_val);					\
1148 	_NOTE(CONSTANTCONDITION)					\
1149 	} while (B_FALSE)
1150 
1151 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1152 	do {								\
1153 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1154 	_NOTE(CONSTANTCONDITION)					\
1155 	} while (B_FALSE)
1156 
1157 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1158 	do {								\
1159 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1160 	_NOTE(CONSTANTCONDITION)					\
1161 	} while (B_FALSE)
1162 
1163 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1164 	do {								\
1165 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1166 	_NOTE(CONSTANTCONDITION)					\
1167 	} while (B_FALSE)
1168 
1169 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1170 	do {								\
1171 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1172 	_NOTE(CONSTANTCONDITION)					\
1173 	} while (B_FALSE)
1174 
1175 /* ERR */
1176 
1177 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1178 		    uint32_t, uint32_t);
1179 
1180 #if EFSYS_OPT_DECODE_INTR_FATAL
1181 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1182 	do {								\
1183 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1184 	_NOTE(CONSTANTCONDITION)					\
1185 	} while (B_FALSE)
1186 #endif
1187 
1188 /* ASSERT */
1189 
1190 #define	EFSYS_ASSERT(_exp) do {						\
1191 	if (!(_exp))							\
1192 		panic("%s", #_exp);					\
1193 	} while (0)
1194 
1195 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1196 	const _t __x = (_t)(_x);					\
1197 	const _t __y = (_t)(_y);					\
1198 	if (!(__x _op __y))						\
1199 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1200 	} while(0)
1201 
1202 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1203 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1204 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1205 
1206 /* ROTATE */
1207 
1208 #define	EFSYS_HAS_ROTL_DWORD 0
1209 
1210 #ifdef	__cplusplus
1211 }
1212 #endif
1213 
1214 #endif	/* _SYS_EFSYS_H */
1215