xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision 5dae51da3da0cc94d17bd67b308fad304ebec7e0)
1 /*-
2  * Copyright (c) 2010-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * The views and conclusions contained in the software and documentation are
30  * those of the authors and should not be interpreted as representing official
31  * policies, either expressed or implied, of the FreeBSD Project.
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef	_SYS_EFSYS_H
37 #define	_SYS_EFSYS_H
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/systm.h>
53 
54 #include <machine/bus.h>
55 #include <machine/endian.h>
56 
57 #define	EFSYS_HAS_UINT64 1
58 #if defined(__x86_64__)
59 #define	EFSYS_USE_UINT64 1
60 #else
61 #define	EFSYS_USE_UINT64 0
62 #endif
63 #define	EFSYS_HAS_SSE2_M128 0
64 #if _BYTE_ORDER == _BIG_ENDIAN
65 #define	EFSYS_IS_BIG_ENDIAN 1
66 #define	EFSYS_IS_LITTLE_ENDIAN 0
67 #elif _BYTE_ORDER == _LITTLE_ENDIAN
68 #define	EFSYS_IS_BIG_ENDIAN 0
69 #define	EFSYS_IS_LITTLE_ENDIAN 1
70 #endif
71 #include "efx_types.h"
72 
73 /* Common code requires this */
74 #if __FreeBSD_version < 800068
75 #define	memmove(d, s, l) bcopy(s, d, l)
76 #endif
77 
78 /* FreeBSD equivalents of Solaris things */
79 #ifndef _NOTE
80 #define	_NOTE(s)
81 #endif
82 
83 #ifndef B_FALSE
84 #define	B_FALSE	FALSE
85 #endif
86 #ifndef B_TRUE
87 #define	B_TRUE	TRUE
88 #endif
89 
90 #ifndef IS_P2ALIGNED
91 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
92 #endif
93 
94 #ifndef P2ROUNDUP
95 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
96 #endif
97 
98 #ifndef P2ALIGN
99 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
100 #endif
101 
102 #ifndef IS2P
103 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
104 #endif
105 
106 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
107 
108 #define	SFXGE_USE_BUS_SPACE_8		1
109 
110 #if !defined(bus_space_read_stream_8)
111 
112 #define	bus_space_read_stream_8(t, h, o)				\
113 	bus_space_read_8((t), (h), (o))
114 
115 #define	bus_space_write_stream_8(t, h, o, v)				\
116 	bus_space_write_8((t), (h), (o), (v))
117 
118 #endif
119 
120 #endif
121 
122 #define	ENOTACTIVE EINVAL
123 
124 /* Memory type to use on FreeBSD */
125 MALLOC_DECLARE(M_SFXGE);
126 
127 /* Machine dependend prefetch wrappers */
128 #if defined(__i386__) || defined(__amd64__)
129 static __inline void
130 prefetch_read_many(void *addr)
131 {
132 
133 	__asm__(
134 	    "prefetcht0 (%0)"
135 	    :
136 	    : "r" (addr));
137 }
138 
139 static __inline void
140 prefetch_read_once(void *addr)
141 {
142 
143 	__asm__(
144 	    "prefetchnta (%0)"
145 	    :
146 	    : "r" (addr));
147 }
148 #elif defined(__sparc64__)
149 static __inline void
150 prefetch_read_many(void *addr)
151 {
152 
153 	__asm__(
154 	    "prefetch [%0], 0"
155 	    :
156 	    : "r" (addr));
157 }
158 
159 static __inline void
160 prefetch_read_once(void *addr)
161 {
162 
163 	__asm__(
164 	    "prefetch [%0], 1"
165 	    :
166 	    : "r" (addr));
167 }
168 #else
169 static __inline void
170 prefetch_read_many(void *addr)
171 {
172 
173 }
174 
175 static __inline void
176 prefetch_read_once(void *addr)
177 {
178 
179 }
180 #endif
181 
182 #if defined(__i386__) || defined(__amd64__)
183 #include <vm/vm.h>
184 #include <vm/pmap.h>
185 #endif
186 static __inline void
187 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
188 		    struct mbuf *m, bus_dma_segment_t *seg)
189 {
190 #if defined(__i386__) || defined(__amd64__)
191 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
192 	seg->ds_len = m->m_len;
193 #else
194 	int nsegstmp;
195 
196 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
197 #endif
198 }
199 
200 /* Modifiers used for Windows builds */
201 #define	__in
202 #define	__in_opt
203 #define	__in_ecount(_n)
204 #define	__in_ecount_opt(_n)
205 #define	__in_bcount(_n)
206 #define	__in_bcount_opt(_n)
207 
208 #define	__out
209 #define	__out_opt
210 #define	__out_ecount(_n)
211 #define	__out_ecount_opt(_n)
212 #define	__out_bcount(_n)
213 #define	__out_bcount_opt(_n)
214 #define	__out_bcount_part(_n, _l)
215 #define	__out_bcount_part_opt(_n, _l)
216 
217 #define	__deref_out
218 
219 #define	__inout
220 #define	__inout_opt
221 #define	__inout_ecount(_n)
222 #define	__inout_ecount_opt(_n)
223 #define	__inout_bcount(_n)
224 #define	__inout_bcount_opt(_n)
225 #define	__inout_bcount_full_opt(_n)
226 
227 #define	__deref_out_bcount_opt(n)
228 
229 #define	__checkReturn
230 #define	__success(_x)
231 
232 #define	__drv_when(_p, _c)
233 
234 /* Code inclusion options */
235 
236 
237 #define	EFSYS_OPT_NAMES 1
238 
239 #define	EFSYS_OPT_SIENA 1
240 #define	EFSYS_OPT_HUNTINGTON 1
241 #define	EFSYS_OPT_MEDFORD 1
242 #ifdef DEBUG
243 #define	EFSYS_OPT_CHECK_REG 1
244 #else
245 #define	EFSYS_OPT_CHECK_REG 0
246 #endif
247 
248 #define	EFSYS_OPT_MCDI 1
249 #define	EFSYS_OPT_MCDI_LOGGING 0
250 #define	EFSYS_OPT_MCDI_PROXY_AUTH 0
251 
252 #define	EFSYS_OPT_MAC_STATS 1
253 
254 #define	EFSYS_OPT_LOOPBACK 0
255 
256 #define	EFSYS_OPT_MON_MCDI 0
257 #define	EFSYS_OPT_MON_STATS 0
258 
259 #define	EFSYS_OPT_PHY_STATS 1
260 #define	EFSYS_OPT_BIST 1
261 #define	EFSYS_OPT_PHY_LED_CONTROL 1
262 #define	EFSYS_OPT_PHY_FLAGS 0
263 
264 #define	EFSYS_OPT_VPD 1
265 #define	EFSYS_OPT_NVRAM 1
266 #define	EFSYS_OPT_BOOTCFG 0
267 
268 #define	EFSYS_OPT_DIAG 0
269 #define	EFSYS_OPT_WOL 1
270 #define	EFSYS_OPT_RX_SCALE 1
271 #define	EFSYS_OPT_QSTATS 1
272 #define	EFSYS_OPT_FILTER 1
273 #define	EFSYS_OPT_RX_SCATTER 0
274 
275 #define	EFSYS_OPT_EV_PREFETCH 0
276 
277 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
278 
279 #define	EFSYS_OPT_LICENSING 0
280 
281 #define	EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
282 
283 /* ID */
284 
285 typedef struct __efsys_identifier_s	efsys_identifier_t;
286 
287 /* PROBE */
288 
289 #ifndef DTRACE_PROBE
290 
291 #define	EFSYS_PROBE(_name)
292 
293 #define	EFSYS_PROBE1(_name, _type1, _arg1)
294 
295 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
296 
297 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
298 	    _type3, _arg3)
299 
300 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
301 	    _type3, _arg3, _type4, _arg4)
302 
303 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
304 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
305 
306 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
307 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
308 	    _type6, _arg6)
309 
310 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
311 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
312 	    _type6, _arg6, _type7, _arg7)
313 
314 #else /* DTRACE_PROBE */
315 
316 #define	EFSYS_PROBE(_name)						\
317 	DTRACE_PROBE(_name)
318 
319 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
320 	DTRACE_PROBE1(_name, _type1, _arg1)
321 
322 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
323 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
324 
325 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
326 	    _type3, _arg3)						\
327 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
328 	    _type3, _arg3)
329 
330 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
331 	    _type3, _arg3, _type4, _arg4)				\
332 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
333 	    _type3, _arg3, _type4, _arg4)
334 
335 #ifdef DTRACE_PROBE5
336 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
337 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
338 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
339 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
340 #else
341 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
342 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
343 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
344 	    _type3, _arg3, _type4, _arg4)
345 #endif
346 
347 #ifdef DTRACE_PROBE6
348 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
349 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
350 	    _type6, _arg6)						\
351 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
352 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
353 	    _type6, _arg6)
354 #else
355 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
356 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
357 	    _type6, _arg6)						\
358 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
359 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
360 #endif
361 
362 #ifdef DTRACE_PROBE7
363 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
364 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
365 	    _type6, _arg6, _type7, _arg7)				\
366 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
367 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
368 	    _type6, _arg6, _type7, _arg7)
369 #else
370 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
371 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
372 	    _type6, _arg6, _type7, _arg7)				\
373 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
374 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
375 	    _type6, _arg6)
376 #endif
377 
378 #endif /* DTRACE_PROBE */
379 
380 /* DMA */
381 
382 typedef uint64_t		efsys_dma_addr_t;
383 
384 typedef struct efsys_mem_s {
385 	bus_dma_tag_t		esm_tag;
386 	bus_dmamap_t		esm_map;
387 	caddr_t			esm_base;
388 	efsys_dma_addr_t	esm_addr;
389 } efsys_mem_t;
390 
391 
392 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
393 	do {								\
394 		(void) memset((_esmp)->esm_base, 0, (_size));		\
395 									\
396 	_NOTE(CONSTANTCONDITION)					\
397 	} while (B_FALSE)
398 
399 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
400 	do {								\
401 		uint32_t *addr;						\
402 									\
403 		_NOTE(CONSTANTCONDITION)				\
404 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
405 		    ("not power of 2 aligned"));			\
406 									\
407 		addr = (void *)((_esmp)->esm_base + (_offset));		\
408 									\
409 		(_edp)->ed_u32[0] = *addr;				\
410 									\
411 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
412 		    uint32_t, (_edp)->ed_u32[0]);			\
413 									\
414 	_NOTE(CONSTANTCONDITION)					\
415 	} while (B_FALSE)
416 
417 #if defined(__x86_64__)
418 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
419 	do {								\
420 		uint64_t *addr;						\
421 									\
422 		_NOTE(CONSTANTCONDITION)				\
423 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
424 		    ("not power of 2 aligned"));			\
425 									\
426 		addr = (void *)((_esmp)->esm_base + (_offset));		\
427 									\
428 		(_eqp)->eq_u64[0] = *addr;				\
429 									\
430 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
431 		    uint32_t, (_eqp)->eq_u32[1],			\
432 		    uint32_t, (_eqp)->eq_u32[0]);			\
433 									\
434 	_NOTE(CONSTANTCONDITION)					\
435 	} while (B_FALSE)
436 #else
437 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
438 	do {								\
439 		uint32_t *addr;						\
440 									\
441 		_NOTE(CONSTANTCONDITION)				\
442 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
443 		    ("not power of 2 aligned"));			\
444 									\
445 		addr = (void *)((_esmp)->esm_base + (_offset));		\
446 									\
447 		(_eqp)->eq_u32[0] = *addr++;				\
448 		(_eqp)->eq_u32[1] = *addr;				\
449 									\
450 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
451 		    uint32_t, (_eqp)->eq_u32[1],			\
452 		    uint32_t, (_eqp)->eq_u32[0]);			\
453 									\
454 	_NOTE(CONSTANTCONDITION)					\
455 	} while (B_FALSE)
456 #endif
457 
458 #if defined(__x86_64__)
459 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
460 	do {								\
461 		uint64_t *addr;						\
462 									\
463 		_NOTE(CONSTANTCONDITION)				\
464 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
465 		    ("not power of 2 aligned"));			\
466 									\
467 		addr = (void *)((_esmp)->esm_base + (_offset));		\
468 									\
469 		(_eop)->eo_u64[0] = *addr++;				\
470 		(_eop)->eo_u64[1] = *addr;				\
471 									\
472 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
473 		    uint32_t, (_eop)->eo_u32[3],			\
474 		    uint32_t, (_eop)->eo_u32[2],			\
475 		    uint32_t, (_eop)->eo_u32[1],			\
476 		    uint32_t, (_eop)->eo_u32[0]);			\
477 									\
478 	_NOTE(CONSTANTCONDITION)					\
479 	} while (B_FALSE)
480 #else
481 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
482 	do {								\
483 		uint32_t *addr;						\
484 									\
485 		_NOTE(CONSTANTCONDITION)				\
486 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
487 		    ("not power of 2 aligned"));			\
488 									\
489 		addr = (void *)((_esmp)->esm_base + (_offset));		\
490 									\
491 		(_eop)->eo_u32[0] = *addr++;				\
492 		(_eop)->eo_u32[1] = *addr++;				\
493 		(_eop)->eo_u32[2] = *addr++;				\
494 		(_eop)->eo_u32[3] = *addr;				\
495 									\
496 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
497 		    uint32_t, (_eop)->eo_u32[3],			\
498 		    uint32_t, (_eop)->eo_u32[2],			\
499 		    uint32_t, (_eop)->eo_u32[1],			\
500 		    uint32_t, (_eop)->eo_u32[0]);			\
501 									\
502 	_NOTE(CONSTANTCONDITION)					\
503 	} while (B_FALSE)
504 #endif
505 
506 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
507 	do {								\
508 		uint32_t *addr;						\
509 									\
510 		_NOTE(CONSTANTCONDITION)				\
511 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
512 		    ("not power of 2 aligned"));			\
513 									\
514 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
515 		    uint32_t, (_edp)->ed_u32[0]);			\
516 									\
517 		addr = (void *)((_esmp)->esm_base + (_offset));		\
518 									\
519 		*addr = (_edp)->ed_u32[0];				\
520 									\
521 	_NOTE(CONSTANTCONDITION)					\
522 	} while (B_FALSE)
523 
524 #if defined(__x86_64__)
525 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
526 	do {								\
527 		uint64_t *addr;						\
528 									\
529 		_NOTE(CONSTANTCONDITION)				\
530 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
531 		    ("not power of 2 aligned"));			\
532 									\
533 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
534 		    uint32_t, (_eqp)->eq_u32[1],			\
535 		    uint32_t, (_eqp)->eq_u32[0]);			\
536 									\
537 		addr = (void *)((_esmp)->esm_base + (_offset));		\
538 									\
539 		*addr   = (_eqp)->eq_u64[0];				\
540 									\
541 	_NOTE(CONSTANTCONDITION)					\
542 	} while (B_FALSE)
543 
544 #else
545 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
546 	do {								\
547 		uint32_t *addr;						\
548 									\
549 		_NOTE(CONSTANTCONDITION)				\
550 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
551 		    ("not power of 2 aligned"));			\
552 									\
553 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
554 		    uint32_t, (_eqp)->eq_u32[1],			\
555 		    uint32_t, (_eqp)->eq_u32[0]);			\
556 									\
557 		addr = (void *)((_esmp)->esm_base + (_offset));		\
558 									\
559 		*addr++ = (_eqp)->eq_u32[0];				\
560 		*addr   = (_eqp)->eq_u32[1];				\
561 									\
562 	_NOTE(CONSTANTCONDITION)					\
563 	} while (B_FALSE)
564 #endif
565 
566 #if defined(__x86_64__)
567 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
568 	do {								\
569 		uint64_t *addr;						\
570 									\
571 		_NOTE(CONSTANTCONDITION)				\
572 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
573 		    ("not power of 2 aligned"));			\
574 									\
575 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
576 		    uint32_t, (_eop)->eo_u32[3],			\
577 		    uint32_t, (_eop)->eo_u32[2],			\
578 		    uint32_t, (_eop)->eo_u32[1],			\
579 		    uint32_t, (_eop)->eo_u32[0]);			\
580 									\
581 		addr = (void *)((_esmp)->esm_base + (_offset));		\
582 									\
583 		*addr++ = (_eop)->eo_u64[0];				\
584 		*addr   = (_eop)->eo_u64[1];				\
585 									\
586 	_NOTE(CONSTANTCONDITION)					\
587 	} while (B_FALSE)
588 #else
589 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
590 	do {								\
591 		uint32_t *addr;						\
592 									\
593 		_NOTE(CONSTANTCONDITION)				\
594 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
595 		    ("not power of 2 aligned"));			\
596 									\
597 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
598 		    uint32_t, (_eop)->eo_u32[3],			\
599 		    uint32_t, (_eop)->eo_u32[2],			\
600 		    uint32_t, (_eop)->eo_u32[1],			\
601 		    uint32_t, (_eop)->eo_u32[0]);			\
602 									\
603 		addr = (void *)((_esmp)->esm_base + (_offset));		\
604 									\
605 		*addr++ = (_eop)->eo_u32[0];				\
606 		*addr++ = (_eop)->eo_u32[1];				\
607 		*addr++ = (_eop)->eo_u32[2];				\
608 		*addr   = (_eop)->eo_u32[3];				\
609 									\
610 	_NOTE(CONSTANTCONDITION)					\
611 	} while (B_FALSE)
612 #endif
613 
614 #define	EFSYS_MEM_ADDR(_esmp)						\
615 	((_esmp)->esm_addr)
616 
617 #define	EFSYS_MEM_IS_NULL(_esmp)					\
618 	((_esmp)->esm_base == NULL)
619 
620 /* BAR */
621 
622 #define	SFXGE_LOCK_NAME_MAX	16
623 
624 typedef struct efsys_bar_s {
625 	struct mtx		esb_lock;
626 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
627 	bus_space_tag_t		esb_tag;
628 	bus_space_handle_t	esb_handle;
629 	int			esb_rid;
630 	struct resource		*esb_res;
631 } efsys_bar_t;
632 
633 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
634 	do {								\
635 		snprintf((_esbp)->esb_lock_name,			\
636 			 sizeof((_esbp)->esb_lock_name),		\
637 			 "%s:bar", (_ifname));				\
638 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
639 			 NULL, MTX_DEF);				\
640 	_NOTE(CONSTANTCONDITION)					\
641 	} while (B_FALSE)
642 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
643 	mtx_destroy(&(_esbp)->esb_lock)
644 #define	SFXGE_BAR_LOCK(_esbp)						\
645 	mtx_lock(&(_esbp)->esb_lock)
646 #define	SFXGE_BAR_UNLOCK(_esbp)						\
647 	mtx_unlock(&(_esbp)->esb_lock)
648 
649 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
650 	do {								\
651 		_NOTE(CONSTANTCONDITION)				\
652 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
653 		    ("not power of 2 aligned"));			\
654 									\
655 		_NOTE(CONSTANTCONDITION)				\
656 		if (_lock)						\
657 			SFXGE_BAR_LOCK(_esbp);				\
658 									\
659 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
660 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
661 		    (_offset));						\
662 									\
663 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
664 		    uint32_t, (_edp)->ed_u32[0]);			\
665 									\
666 		_NOTE(CONSTANTCONDITION)				\
667 		if (_lock)						\
668 			SFXGE_BAR_UNLOCK(_esbp);			\
669 	_NOTE(CONSTANTCONDITION)					\
670 	} while (B_FALSE)
671 
672 #if defined(SFXGE_USE_BUS_SPACE_8)
673 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
674 	do {								\
675 		_NOTE(CONSTANTCONDITION)				\
676 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
677 		    ("not power of 2 aligned"));			\
678 									\
679 		SFXGE_BAR_LOCK(_esbp);					\
680 									\
681 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
682 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
683 		    (_offset));						\
684 									\
685 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
686 		    uint32_t, (_eqp)->eq_u32[1],			\
687 		    uint32_t, (_eqp)->eq_u32[0]);			\
688 									\
689 		SFXGE_BAR_UNLOCK(_esbp);				\
690 	_NOTE(CONSTANTCONDITION)					\
691 	} while (B_FALSE)
692 
693 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
694 	do {								\
695 		_NOTE(CONSTANTCONDITION)				\
696 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
697 		    ("not power of 2 aligned"));			\
698 									\
699 		_NOTE(CONSTANTCONDITION)				\
700 		if (_lock)						\
701 			SFXGE_BAR_LOCK(_esbp);				\
702 									\
703 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
704 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
705 		    (_offset));						\
706 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
707 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
708 		    (_offset) + 8);					\
709 									\
710 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
711 		    uint32_t, (_eop)->eo_u32[3],			\
712 		    uint32_t, (_eop)->eo_u32[2],			\
713 		    uint32_t, (_eop)->eo_u32[1],			\
714 		    uint32_t, (_eop)->eo_u32[0]);			\
715 									\
716 		_NOTE(CONSTANTCONDITION)				\
717 		if (_lock)						\
718 			SFXGE_BAR_UNLOCK(_esbp);			\
719 	_NOTE(CONSTANTCONDITION)					\
720 	} while (B_FALSE)
721 
722 #else
723 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
724 	do {								\
725 		_NOTE(CONSTANTCONDITION)				\
726 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
727 		    ("not power of 2 aligned"));			\
728 									\
729 		SFXGE_BAR_LOCK(_esbp);					\
730 									\
731 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
732 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
733 		    (_offset));						\
734 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
735 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
736 		    (_offset) + 4);					\
737 									\
738 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
739 		    uint32_t, (_eqp)->eq_u32[1],			\
740 		    uint32_t, (_eqp)->eq_u32[0]);			\
741 									\
742 		SFXGE_BAR_UNLOCK(_esbp);				\
743 	_NOTE(CONSTANTCONDITION)					\
744 	} while (B_FALSE)
745 
746 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
747 	do {								\
748 		_NOTE(CONSTANTCONDITION)				\
749 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
750 		    ("not power of 2 aligned"));			\
751 									\
752 		_NOTE(CONSTANTCONDITION)				\
753 		if (_lock)						\
754 			SFXGE_BAR_LOCK(_esbp);				\
755 									\
756 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
757 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
758 		    (_offset));						\
759 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
760 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
761 		    (_offset) + 4);					\
762 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
763 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
764 		    (_offset) + 8);					\
765 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
766 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
767 		    (_offset) + 12);					\
768 									\
769 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
770 		    uint32_t, (_eop)->eo_u32[3],			\
771 		    uint32_t, (_eop)->eo_u32[2],			\
772 		    uint32_t, (_eop)->eo_u32[1],			\
773 		    uint32_t, (_eop)->eo_u32[0]);			\
774 									\
775 		_NOTE(CONSTANTCONDITION)				\
776 		if (_lock)						\
777 			SFXGE_BAR_UNLOCK(_esbp);			\
778 	_NOTE(CONSTANTCONDITION)					\
779 	} while (B_FALSE)
780 #endif
781 
782 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
783 	do {								\
784 		_NOTE(CONSTANTCONDITION)				\
785 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
786 		    ("not power of 2 aligned"));			\
787 									\
788 		_NOTE(CONSTANTCONDITION)				\
789 		if (_lock)						\
790 			SFXGE_BAR_LOCK(_esbp);				\
791 									\
792 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
793 		    uint32_t, (_edp)->ed_u32[0]);			\
794 									\
795 		/*							\
796 		 * Make sure that previous writes to the dword have	\
797 		 * been done. It should be cheaper than barrier just	\
798 		 * after the write below.				\
799 		 */							\
800 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
801 		    (_offset), sizeof (efx_dword_t),			\
802 		    BUS_SPACE_BARRIER_WRITE);				\
803 		bus_space_write_stream_4((_esbp)->esb_tag,		\
804 		    (_esbp)->esb_handle,				\
805 		    (_offset), (_edp)->ed_u32[0]);			\
806 									\
807 		_NOTE(CONSTANTCONDITION)				\
808 		if (_lock)						\
809 			SFXGE_BAR_UNLOCK(_esbp);			\
810 	_NOTE(CONSTANTCONDITION)					\
811 	} while (B_FALSE)
812 
813 #if defined(SFXGE_USE_BUS_SPACE_8)
814 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
815 	do {								\
816 		_NOTE(CONSTANTCONDITION)				\
817 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
818 		    ("not power of 2 aligned"));			\
819 									\
820 		SFXGE_BAR_LOCK(_esbp);					\
821 									\
822 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
823 		    uint32_t, (_eqp)->eq_u32[1],			\
824 		    uint32_t, (_eqp)->eq_u32[0]);			\
825 									\
826 		/*							\
827 		 * Make sure that previous writes to the qword have	\
828 		 * been done. It should be cheaper than barrier just	\
829 		 * after the write below.				\
830 		 */							\
831 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
832 		    (_offset), sizeof (efx_qword_t),			\
833 		    BUS_SPACE_BARRIER_WRITE);				\
834 		bus_space_write_stream_8((_esbp)->esb_tag,		\
835 		    (_esbp)->esb_handle,				\
836 		    (_offset), (_eqp)->eq_u64[0]);			\
837 									\
838 		SFXGE_BAR_UNLOCK(_esbp);				\
839 	_NOTE(CONSTANTCONDITION)					\
840 	} while (B_FALSE)
841 #else
842 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
843 	do {								\
844 		_NOTE(CONSTANTCONDITION)				\
845 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
846 		    ("not power of 2 aligned"));			\
847 									\
848 		SFXGE_BAR_LOCK(_esbp);					\
849 									\
850 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
851 		    uint32_t, (_eqp)->eq_u32[1],			\
852 		    uint32_t, (_eqp)->eq_u32[0]);			\
853 									\
854 		/*							\
855 		 * Make sure that previous writes to the qword have	\
856 		 * been done. It should be cheaper than barrier just	\
857 		 * after the last write below.				\
858 		 */							\
859 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
860 		    (_offset), sizeof (efx_qword_t),			\
861 		    BUS_SPACE_BARRIER_WRITE);				\
862 		bus_space_write_stream_4((_esbp)->esb_tag,		\
863 		    (_esbp)->esb_handle,				\
864 		    (_offset), (_eqp)->eq_u32[0]);			\
865 		/*							\
866 		 * It should be guaranteed that the last dword comes	\
867 		 * the last, so barrier entire qword to be sure that	\
868 		 * neither above nor below writes are reordered.	\
869 		 */							\
870 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
871 		    (_offset), sizeof (efx_qword_t),			\
872 		    BUS_SPACE_BARRIER_WRITE);				\
873 		bus_space_write_stream_4((_esbp)->esb_tag,		\
874 		    (_esbp)->esb_handle,				\
875 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
876 									\
877 		SFXGE_BAR_UNLOCK(_esbp);				\
878 	_NOTE(CONSTANTCONDITION)					\
879 	} while (B_FALSE)
880 #endif
881 
882 /*
883  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
884  * (required by PIO hardware)
885  */
886 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
887 	do {								\
888 		_NOTE(CONSTANTCONDITION)				\
889 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
890 		    ("not power of 2 aligned"));			\
891 									\
892 		(void) (_esbp);						\
893 									\
894 		/* FIXME: Perform a 64-bit write */			\
895 		KASSERT(0, ("not implemented"));			\
896 									\
897 	_NOTE(CONSTANTCONDITION)					\
898 	} while (B_FALSE)
899 
900 #if defined(SFXGE_USE_BUS_SPACE_8)
901 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
902 	do {								\
903 		_NOTE(CONSTANTCONDITION)				\
904 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
905 		    ("not power of 2 aligned"));			\
906 									\
907 		_NOTE(CONSTANTCONDITION)				\
908 		if (_lock)						\
909 			SFXGE_BAR_LOCK(_esbp);				\
910 									\
911 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
912 		    uint32_t, (_eop)->eo_u32[3],			\
913 		    uint32_t, (_eop)->eo_u32[2],			\
914 		    uint32_t, (_eop)->eo_u32[1],			\
915 		    uint32_t, (_eop)->eo_u32[0]);			\
916 									\
917 		/*							\
918 		 * Make sure that previous writes to the oword have	\
919 		 * been done. It should be cheaper than barrier just	\
920 		 * after the last write below.				\
921 		 */							\
922 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
923 		    (_offset), sizeof (efx_oword_t),			\
924 		    BUS_SPACE_BARRIER_WRITE);				\
925 		bus_space_write_stream_8((_esbp)->esb_tag,		\
926 		    (_esbp)->esb_handle,				\
927 		    (_offset), (_eop)->eo_u64[0]);			\
928 		/*							\
929 		 * It should be guaranteed that the last qword comes	\
930 		 * the last, so barrier entire oword to be sure that	\
931 		 * neither above nor below writes are reordered.	\
932 		 */							\
933 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
934 		    (_offset), sizeof (efx_oword_t),			\
935 		    BUS_SPACE_BARRIER_WRITE);				\
936 		bus_space_write_stream_8((_esbp)->esb_tag,		\
937 		    (_esbp)->esb_handle,				\
938 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
939 									\
940 		_NOTE(CONSTANTCONDITION)				\
941 		if (_lock)						\
942 			SFXGE_BAR_UNLOCK(_esbp);			\
943 	_NOTE(CONSTANTCONDITION)					\
944 	} while (B_FALSE)
945 
946 #else
947 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
948 	do {								\
949 		_NOTE(CONSTANTCONDITION)				\
950 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
951 		    ("not power of 2 aligned"));			\
952 									\
953 		_NOTE(CONSTANTCONDITION)				\
954 		if (_lock)						\
955 			SFXGE_BAR_LOCK(_esbp);				\
956 									\
957 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
958 		    uint32_t, (_eop)->eo_u32[3],			\
959 		    uint32_t, (_eop)->eo_u32[2],			\
960 		    uint32_t, (_eop)->eo_u32[1],			\
961 		    uint32_t, (_eop)->eo_u32[0]);			\
962 									\
963 		/*							\
964 		 * Make sure that previous writes to the oword have	\
965 		 * been done. It should be cheaper than barrier just	\
966 		 * after the last write below.				\
967 		 */							\
968 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
969 		    (_offset), sizeof (efx_oword_t),			\
970 		    BUS_SPACE_BARRIER_WRITE);				\
971 		bus_space_write_stream_4((_esbp)->esb_tag,		\
972 		    (_esbp)->esb_handle,				\
973 		    (_offset), (_eop)->eo_u32[0]);			\
974 		bus_space_write_stream_4((_esbp)->esb_tag,		\
975 		    (_esbp)->esb_handle,				\
976 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
977 		bus_space_write_stream_4((_esbp)->esb_tag,		\
978 		    (_esbp)->esb_handle,				\
979 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
980 		/*							\
981 		 * It should be guaranteed that the last dword comes	\
982 		 * the last, so barrier entire oword to be sure that	\
983 		 * neither above nor below writes are reordered.	\
984 		 */							\
985 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
986 		    (_offset), sizeof (efx_oword_t),			\
987 		    BUS_SPACE_BARRIER_WRITE);				\
988 		bus_space_write_stream_4((_esbp)->esb_tag,		\
989 		    (_esbp)->esb_handle,				\
990 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
991 									\
992 		_NOTE(CONSTANTCONDITION)				\
993 		if (_lock)						\
994 			SFXGE_BAR_UNLOCK(_esbp);			\
995 	_NOTE(CONSTANTCONDITION)					\
996 	} while (B_FALSE)
997 #endif
998 
999 /* Use the standard octo-word write for doorbell writes */
1000 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1001 	do {								\
1002 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1003 	_NOTE(CONSTANTCONDITION)					\
1004 	} while (B_FALSE)
1005 
1006 /* SPIN */
1007 
1008 #define	EFSYS_SPIN(_us)							\
1009 	do {								\
1010 		DELAY(_us);						\
1011 	_NOTE(CONSTANTCONDITION)					\
1012 	} while (B_FALSE)
1013 
1014 #define	EFSYS_SLEEP	EFSYS_SPIN
1015 
1016 /* BARRIERS */
1017 
1018 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1019 #define	EFSYS_PIO_WRITE_BARRIER()
1020 
1021 /* DMA SYNC */
1022 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1023 	do {								\
1024 		bus_dmamap_sync((_esmp)->esm_tag,			\
1025 		    (_esmp)->esm_map,					\
1026 		    BUS_DMASYNC_POSTREAD);				\
1027 	_NOTE(CONSTANTCONDITION)					\
1028 	} while (B_FALSE)
1029 
1030 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1031 	do {								\
1032 		bus_dmamap_sync((_esmp)->esm_tag,			\
1033 		    (_esmp)->esm_map,					\
1034 		    BUS_DMASYNC_PREWRITE);				\
1035 	_NOTE(CONSTANTCONDITION)					\
1036 	} while (B_FALSE)
1037 
1038 /* TIMESTAMP */
1039 
1040 typedef	clock_t	efsys_timestamp_t;
1041 
1042 #define	EFSYS_TIMESTAMP(_usp)						\
1043 	do {								\
1044 		clock_t now;						\
1045 									\
1046 		now = ticks;						\
1047 		*(_usp) = now * hz / 1000000;				\
1048 	_NOTE(CONSTANTCONDITION)					\
1049 	} while (B_FALSE)
1050 
1051 /* KMEM */
1052 
1053 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1054 	do {								\
1055 		(_esip) = (_esip);					\
1056 		/*							\
1057 		 * The macro is used in non-sleepable contexts, for	\
1058 		 * example, holding a mutex.				\
1059 		 */							\
1060 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1061 	_NOTE(CONSTANTCONDITION)					\
1062 	} while (B_FALSE)
1063 
1064 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1065 	do {								\
1066 		(void) (_esip);						\
1067 		(void) (_size);						\
1068 		free((_p), M_SFXGE);					\
1069 	_NOTE(CONSTANTCONDITION)					\
1070 	} while (B_FALSE)
1071 
1072 /* LOCK */
1073 
1074 typedef struct efsys_lock_s {
1075 	struct mtx	lock;
1076 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1077 } efsys_lock_t;
1078 
1079 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1080 	do {								\
1081 		efsys_lock_t *__eslp = (_eslp);				\
1082 									\
1083 		snprintf((__eslp)->lock_name,				\
1084 			 sizeof((__eslp)->lock_name),			\
1085 			 "%s:%s", (_ifname), (_label));			\
1086 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1087 			 NULL, MTX_DEF);				\
1088 	} while (B_FALSE)
1089 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1090 	mtx_destroy(&(_eslp)->lock)
1091 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1092 	mtx_lock(&(_eslp)->lock)
1093 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1094 	mtx_unlock(&(_eslp)->lock)
1095 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1096 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1097 
1098 #define	EFSYS_LOCK_MAGIC	0x000010c4
1099 
1100 #define	EFSYS_LOCK(_lockp, _state)					\
1101 	do {								\
1102 		SFXGE_EFSYS_LOCK(_lockp);				\
1103 		(_state) = EFSYS_LOCK_MAGIC;				\
1104 	_NOTE(CONSTANTCONDITION)					\
1105 	} while (B_FALSE)
1106 
1107 #define	EFSYS_UNLOCK(_lockp, _state)					\
1108 	do {								\
1109 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1110 			KASSERT(B_FALSE, ("not locked"));		\
1111 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1112 	_NOTE(CONSTANTCONDITION)					\
1113 	} while (B_FALSE)
1114 
1115 /* STAT */
1116 
1117 typedef uint64_t		efsys_stat_t;
1118 
1119 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1120 	do {								\
1121 		*(_knp) += (_delta);					\
1122 	_NOTE(CONSTANTCONDITION)					\
1123 	} while (B_FALSE)
1124 
1125 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1126 	do {								\
1127 		*(_knp) -= (_delta);					\
1128 	_NOTE(CONSTANTCONDITION)					\
1129 	} while (B_FALSE)
1130 
1131 #define	EFSYS_STAT_SET(_knp, _val)					\
1132 	do {								\
1133 		*(_knp) = (_val);					\
1134 	_NOTE(CONSTANTCONDITION)					\
1135 	} while (B_FALSE)
1136 
1137 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1138 	do {								\
1139 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1140 	_NOTE(CONSTANTCONDITION)					\
1141 	} while (B_FALSE)
1142 
1143 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1144 	do {								\
1145 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1146 	_NOTE(CONSTANTCONDITION)					\
1147 	} while (B_FALSE)
1148 
1149 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1150 	do {								\
1151 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1152 	_NOTE(CONSTANTCONDITION)					\
1153 	} while (B_FALSE)
1154 
1155 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1156 	do {								\
1157 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1158 	_NOTE(CONSTANTCONDITION)					\
1159 	} while (B_FALSE)
1160 
1161 /* ERR */
1162 
1163 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1164 		    uint32_t, uint32_t);
1165 
1166 #if EFSYS_OPT_DECODE_INTR_FATAL
1167 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1168 	do {								\
1169 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1170 	_NOTE(CONSTANTCONDITION)					\
1171 	} while (B_FALSE)
1172 #endif
1173 
1174 /* ASSERT */
1175 
1176 #define	EFSYS_ASSERT(_exp) do {						\
1177 	if (!(_exp))							\
1178 		panic("%s", #_exp);					\
1179 	} while (0)
1180 
1181 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1182 	const _t __x = (_t)(_x);					\
1183 	const _t __y = (_t)(_y);					\
1184 	if (!(__x _op __y))						\
1185 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1186 	} while(0)
1187 
1188 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1189 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1190 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1191 
1192 /* ROTATE */
1193 
1194 #define	EFSYS_HAS_ROTL_DWORD 0
1195 
1196 #ifdef	__cplusplus
1197 }
1198 #endif
1199 
1200 #endif	/* _SYS_EFSYS_H */
1201