xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision 7d8f797b725e3efc0a4256554654780df83c456c)
1 /*-
2  * Copyright (c) 2010-2015 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was developed in part by Philip Paeps under contract for
6  * Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * The views and conclusions contained in the software and documentation are
30  * those of the authors and should not be interpreted as representing official
31  * policies, either expressed or implied, of the FreeBSD Project.
32  *
33  * $FreeBSD$
34  */
35 
36 #ifndef	_SYS_EFSYS_H
37 #define	_SYS_EFSYS_H
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/systm.h>
53 
54 #include <machine/bus.h>
55 #include <machine/endian.h>
56 
57 #define	EFSYS_HAS_UINT64 1
58 #if defined(__x86_64__)
59 #define	EFSYS_USE_UINT64 1
60 #else
61 #define	EFSYS_USE_UINT64 0
62 #endif
63 #define	EFSYS_HAS_SSE2_M128 0
64 #if _BYTE_ORDER == _BIG_ENDIAN
65 #define	EFSYS_IS_BIG_ENDIAN 1
66 #define	EFSYS_IS_LITTLE_ENDIAN 0
67 #elif _BYTE_ORDER == _LITTLE_ENDIAN
68 #define	EFSYS_IS_BIG_ENDIAN 0
69 #define	EFSYS_IS_LITTLE_ENDIAN 1
70 #endif
71 #include "efx_types.h"
72 
73 /* Common code requires this */
74 #if __FreeBSD_version < 800068
75 #define	memmove(d, s, l) bcopy(s, d, l)
76 #endif
77 
78 /* FreeBSD equivalents of Solaris things */
79 #ifndef _NOTE
80 #define	_NOTE(s)
81 #endif
82 
83 #ifndef B_FALSE
84 #define	B_FALSE	FALSE
85 #endif
86 #ifndef B_TRUE
87 #define	B_TRUE	TRUE
88 #endif
89 
90 #ifndef IS_P2ALIGNED
91 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
92 #endif
93 
94 #ifndef P2ROUNDUP
95 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
96 #endif
97 
98 #ifndef P2ALIGN
99 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
100 #endif
101 
102 #ifndef IS2P
103 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
104 #endif
105 
106 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
107 
108 #define	SFXGE_USE_BUS_SPACE_8		1
109 
110 #if !defined(bus_space_read_stream_8)
111 
112 #define	bus_space_read_stream_8(t, h, o)				\
113 	bus_space_read_8((t), (h), (o))
114 
115 #define	bus_space_write_stream_8(t, h, o, v)				\
116 	bus_space_write_8((t), (h), (o), (v))
117 
118 #endif
119 
120 #endif
121 
122 #define	ENOTACTIVE EINVAL
123 
124 /* Memory type to use on FreeBSD */
125 MALLOC_DECLARE(M_SFXGE);
126 
127 /* Machine dependend prefetch wrappers */
128 #if defined(__i386__) || defined(__amd64__)
129 static __inline void
130 prefetch_read_many(void *addr)
131 {
132 
133 	__asm__(
134 	    "prefetcht0 (%0)"
135 	    :
136 	    : "r" (addr));
137 }
138 
139 static __inline void
140 prefetch_read_once(void *addr)
141 {
142 
143 	__asm__(
144 	    "prefetchnta (%0)"
145 	    :
146 	    : "r" (addr));
147 }
148 #elif defined(__sparc64__)
149 static __inline void
150 prefetch_read_many(void *addr)
151 {
152 
153 	__asm__(
154 	    "prefetch [%0], 0"
155 	    :
156 	    : "r" (addr));
157 }
158 
159 static __inline void
160 prefetch_read_once(void *addr)
161 {
162 
163 	__asm__(
164 	    "prefetch [%0], 1"
165 	    :
166 	    : "r" (addr));
167 }
168 #else
169 static __inline void
170 prefetch_read_many(void *addr)
171 {
172 
173 }
174 
175 static __inline void
176 prefetch_read_once(void *addr)
177 {
178 
179 }
180 #endif
181 
182 #if defined(__i386__) || defined(__amd64__)
183 #include <vm/vm.h>
184 #include <vm/pmap.h>
185 #endif
186 static __inline void
187 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
188 		    struct mbuf *m, bus_dma_segment_t *seg)
189 {
190 #if defined(__i386__) || defined(__amd64__)
191 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
192 	seg->ds_len = m->m_len;
193 #else
194 	int nsegstmp;
195 
196 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
197 #endif
198 }
199 
200 /* Modifiers used for Windows builds */
201 #define	__in
202 #define	__in_opt
203 #define	__in_ecount(_n)
204 #define	__in_ecount_opt(_n)
205 #define	__in_bcount(_n)
206 #define	__in_bcount_opt(_n)
207 
208 #define	__out
209 #define	__out_opt
210 #define	__out_ecount(_n)
211 #define	__out_ecount_opt(_n)
212 #define	__out_bcount(_n)
213 #define	__out_bcount_opt(_n)
214 
215 #define	__deref_out
216 
217 #define	__inout
218 #define	__inout_opt
219 #define	__inout_ecount(_n)
220 #define	__inout_ecount_opt(_n)
221 #define	__inout_bcount(_n)
222 #define	__inout_bcount_opt(_n)
223 #define	__inout_bcount_full_opt(_n)
224 
225 #define	__deref_out_bcount_opt(n)
226 
227 #define	__checkReturn
228 
229 #define	__drv_when(_p, _c)
230 
231 /* Code inclusion options */
232 
233 
234 #define	EFSYS_OPT_NAMES 1
235 
236 #define	EFSYS_OPT_FALCON 0
237 #define	EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0
238 #define	EFSYS_OPT_SIENA 1
239 #define	EFSYS_OPT_HUNTINGTON 1
240 #ifdef DEBUG
241 #define	EFSYS_OPT_CHECK_REG 1
242 #else
243 #define	EFSYS_OPT_CHECK_REG 0
244 #endif
245 
246 #define	EFSYS_OPT_MCDI 1
247 
248 #define	EFSYS_OPT_MAC_FALCON_GMAC 0
249 #define	EFSYS_OPT_MAC_FALCON_XMAC 0
250 #define	EFSYS_OPT_MAC_STATS 1
251 
252 #define	EFSYS_OPT_LOOPBACK 0
253 
254 #define	EFSYS_OPT_MON_NULL 0
255 #define	EFSYS_OPT_MON_LM87 0
256 #define	EFSYS_OPT_MON_MAX6647 0
257 #define	EFSYS_OPT_MON_MCDI 0
258 #define	EFSYS_OPT_MON_STATS 0
259 
260 #define	EFSYS_OPT_PHY_NULL 0
261 #define	EFSYS_OPT_PHY_QT2022C2 0
262 #define	EFSYS_OPT_PHY_SFX7101 0
263 #define	EFSYS_OPT_PHY_TXC43128 0
264 #define	EFSYS_OPT_PHY_SFT9001 0
265 #define	EFSYS_OPT_PHY_QT2025C 0
266 #define	EFSYS_OPT_PHY_STATS 1
267 #define	EFSYS_OPT_PHY_PROPS 0
268 #define	EFSYS_OPT_PHY_BIST 0
269 #define	EFSYS_OPT_BIST 1
270 #define	EFSYS_OPT_PHY_LED_CONTROL 1
271 #define	EFSYS_OPT_PHY_FLAGS 0
272 
273 #define	EFSYS_OPT_VPD 1
274 #define	EFSYS_OPT_NVRAM 1
275 #define	EFSYS_OPT_NVRAM_FALCON_BOOTROM 0
276 #define	EFSYS_OPT_NVRAM_SFT9001	0
277 #define	EFSYS_OPT_NVRAM_SFX7101	0
278 #define	EFSYS_OPT_BOOTCFG 0
279 
280 #define	EFSYS_OPT_PCIE_TUNE 0
281 #define	EFSYS_OPT_DIAG 0
282 #define	EFSYS_OPT_WOL 1
283 #define	EFSYS_OPT_RX_SCALE 1
284 #define	EFSYS_OPT_QSTATS 1
285 #define	EFSYS_OPT_FILTER 1
286 #define	EFSYS_OPT_MCAST_FILTER_LIST 1
287 #define	EFSYS_OPT_RX_SCATTER 0
288 #define	EFSYS_OPT_RX_HDR_SPLIT 0
289 
290 #define	EFSYS_OPT_EV_PREFETCH 0
291 
292 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
293 
294 /* ID */
295 
296 typedef struct __efsys_identifier_s	efsys_identifier_t;
297 
298 /* PROBE */
299 
300 #ifndef DTRACE_PROBE
301 
302 #define	EFSYS_PROBE(_name)
303 
304 #define	EFSYS_PROBE1(_name, _type1, _arg1)
305 
306 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
307 
308 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
309 	    _type3, _arg3)
310 
311 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
312 	    _type3, _arg3, _type4, _arg4)
313 
314 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
315 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
316 
317 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
318 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
319 	    _type6, _arg6)
320 
321 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
322 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
323 	    _type6, _arg6, _type7, _arg7)
324 
325 #else /* DTRACE_PROBE */
326 
327 #define	EFSYS_PROBE(_name)						\
328 	DTRACE_PROBE(_name)
329 
330 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
331 	DTRACE_PROBE1(_name, _type1, _arg1)
332 
333 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
334 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
335 
336 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
337 	    _type3, _arg3)						\
338 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
339 	    _type3, _arg3)
340 
341 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
342 	    _type3, _arg3, _type4, _arg4)				\
343 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
344 	    _type3, _arg3, _type4, _arg4)
345 
346 #ifdef DTRACE_PROBE5
347 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
348 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
349 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
350 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
351 #else
352 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
353 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
354 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
355 	    _type3, _arg3, _type4, _arg4)
356 #endif
357 
358 #ifdef DTRACE_PROBE6
359 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
360 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
361 	    _type6, _arg6)						\
362 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
363 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
364 	    _type6, _arg6)
365 #else
366 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
367 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
368 	    _type6, _arg6)						\
369 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
370 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
371 #endif
372 
373 #ifdef DTRACE_PROBE7
374 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
375 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
376 	    _type6, _arg6, _type7, _arg7)				\
377 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
378 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
379 	    _type6, _arg6, _type7, _arg7)
380 #else
381 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
382 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
383 	    _type6, _arg6, _type7, _arg7)				\
384 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
385 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
386 	    _type6, _arg6)
387 #endif
388 
389 #endif /* DTRACE_PROBE */
390 
391 /* DMA */
392 
393 typedef uint64_t		efsys_dma_addr_t;
394 
395 typedef struct efsys_mem_s {
396 	bus_dma_tag_t		esm_tag;
397 	bus_dmamap_t		esm_map;
398 	caddr_t			esm_base;
399 	efsys_dma_addr_t	esm_addr;
400 } efsys_mem_t;
401 
402 
403 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
404 	do {								\
405 		(void) memset((_esmp)->esm_base, 0, (_size));		\
406 									\
407 	_NOTE(CONSTANTCONDITION)					\
408 	} while (B_FALSE)
409 
410 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
411 	do {								\
412 		uint32_t *addr;						\
413 									\
414 		_NOTE(CONSTANTCONDITION)				\
415 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
416 		    ("not power of 2 aligned"));			\
417 									\
418 		addr = (void *)((_esmp)->esm_base + (_offset));		\
419 									\
420 		(_edp)->ed_u32[0] = *addr;				\
421 									\
422 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
423 		    uint32_t, (_edp)->ed_u32[0]);			\
424 									\
425 	_NOTE(CONSTANTCONDITION)					\
426 	} while (B_FALSE)
427 
428 #if defined(__x86_64__)
429 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
430 	do {								\
431 		uint64_t *addr;						\
432 									\
433 		_NOTE(CONSTANTCONDITION)				\
434 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
435 		    ("not power of 2 aligned"));			\
436 									\
437 		addr = (void *)((_esmp)->esm_base + (_offset));		\
438 									\
439 		(_eqp)->eq_u64[0] = *addr;				\
440 									\
441 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
442 		    uint32_t, (_eqp)->eq_u32[1],			\
443 		    uint32_t, (_eqp)->eq_u32[0]);			\
444 									\
445 	_NOTE(CONSTANTCONDITION)					\
446 	} while (B_FALSE)
447 #else
448 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
449 	do {								\
450 		uint32_t *addr;						\
451 									\
452 		_NOTE(CONSTANTCONDITION)				\
453 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
454 		    ("not power of 2 aligned"));			\
455 									\
456 		addr = (void *)((_esmp)->esm_base + (_offset));		\
457 									\
458 		(_eqp)->eq_u32[0] = *addr++;				\
459 		(_eqp)->eq_u32[1] = *addr;				\
460 									\
461 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
462 		    uint32_t, (_eqp)->eq_u32[1],			\
463 		    uint32_t, (_eqp)->eq_u32[0]);			\
464 									\
465 	_NOTE(CONSTANTCONDITION)					\
466 	} while (B_FALSE)
467 #endif
468 
469 #if defined(__x86_64__)
470 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
471 	do {								\
472 		uint64_t *addr;						\
473 									\
474 		_NOTE(CONSTANTCONDITION)				\
475 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
476 		    ("not power of 2 aligned"));			\
477 									\
478 		addr = (void *)((_esmp)->esm_base + (_offset));		\
479 									\
480 		(_eop)->eo_u64[0] = *addr++;				\
481 		(_eop)->eo_u64[1] = *addr;				\
482 									\
483 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
484 		    uint32_t, (_eop)->eo_u32[3],			\
485 		    uint32_t, (_eop)->eo_u32[2],			\
486 		    uint32_t, (_eop)->eo_u32[1],			\
487 		    uint32_t, (_eop)->eo_u32[0]);			\
488 									\
489 	_NOTE(CONSTANTCONDITION)					\
490 	} while (B_FALSE)
491 #else
492 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
493 	do {								\
494 		uint32_t *addr;						\
495 									\
496 		_NOTE(CONSTANTCONDITION)				\
497 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
498 		    ("not power of 2 aligned"));			\
499 									\
500 		addr = (void *)((_esmp)->esm_base + (_offset));		\
501 									\
502 		(_eop)->eo_u32[0] = *addr++;				\
503 		(_eop)->eo_u32[1] = *addr++;				\
504 		(_eop)->eo_u32[2] = *addr++;				\
505 		(_eop)->eo_u32[3] = *addr;				\
506 									\
507 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
508 		    uint32_t, (_eop)->eo_u32[3],			\
509 		    uint32_t, (_eop)->eo_u32[2],			\
510 		    uint32_t, (_eop)->eo_u32[1],			\
511 		    uint32_t, (_eop)->eo_u32[0]);			\
512 									\
513 	_NOTE(CONSTANTCONDITION)					\
514 	} while (B_FALSE)
515 #endif
516 
517 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
518 	do {								\
519 		uint32_t *addr;						\
520 									\
521 		_NOTE(CONSTANTCONDITION)				\
522 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
523 		    ("not power of 2 aligned"));			\
524 									\
525 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
526 		    uint32_t, (_edp)->ed_u32[0]);			\
527 									\
528 		addr = (void *)((_esmp)->esm_base + (_offset));		\
529 									\
530 		*addr = (_edp)->ed_u32[0];				\
531 									\
532 	_NOTE(CONSTANTCONDITION)					\
533 	} while (B_FALSE)
534 
535 #if defined(__x86_64__)
536 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
537 	do {								\
538 		uint64_t *addr;						\
539 									\
540 		_NOTE(CONSTANTCONDITION)				\
541 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
542 		    ("not power of 2 aligned"));			\
543 									\
544 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
545 		    uint32_t, (_eqp)->eq_u32[1],			\
546 		    uint32_t, (_eqp)->eq_u32[0]);			\
547 									\
548 		addr = (void *)((_esmp)->esm_base + (_offset));		\
549 									\
550 		*addr   = (_eqp)->eq_u64[0];				\
551 									\
552 	_NOTE(CONSTANTCONDITION)					\
553 	} while (B_FALSE)
554 
555 #else
556 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
557 	do {								\
558 		uint32_t *addr;						\
559 									\
560 		_NOTE(CONSTANTCONDITION)				\
561 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
562 		    ("not power of 2 aligned"));			\
563 									\
564 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
565 		    uint32_t, (_eqp)->eq_u32[1],			\
566 		    uint32_t, (_eqp)->eq_u32[0]);			\
567 									\
568 		addr = (void *)((_esmp)->esm_base + (_offset));		\
569 									\
570 		*addr++ = (_eqp)->eq_u32[0];				\
571 		*addr   = (_eqp)->eq_u32[1];				\
572 									\
573 	_NOTE(CONSTANTCONDITION)					\
574 	} while (B_FALSE)
575 #endif
576 
577 #if defined(__x86_64__)
578 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
579 	do {								\
580 		uint64_t *addr;						\
581 									\
582 		_NOTE(CONSTANTCONDITION)				\
583 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
584 		    ("not power of 2 aligned"));			\
585 									\
586 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
587 		    uint32_t, (_eop)->eo_u32[3],			\
588 		    uint32_t, (_eop)->eo_u32[2],			\
589 		    uint32_t, (_eop)->eo_u32[1],			\
590 		    uint32_t, (_eop)->eo_u32[0]);			\
591 									\
592 		addr = (void *)((_esmp)->esm_base + (_offset));		\
593 									\
594 		*addr++ = (_eop)->eo_u64[0];				\
595 		*addr   = (_eop)->eo_u64[1];				\
596 									\
597 	_NOTE(CONSTANTCONDITION)					\
598 	} while (B_FALSE)
599 #else
600 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
601 	do {								\
602 		uint32_t *addr;						\
603 									\
604 		_NOTE(CONSTANTCONDITION)				\
605 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
606 		    ("not power of 2 aligned"));			\
607 									\
608 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
609 		    uint32_t, (_eop)->eo_u32[3],			\
610 		    uint32_t, (_eop)->eo_u32[2],			\
611 		    uint32_t, (_eop)->eo_u32[1],			\
612 		    uint32_t, (_eop)->eo_u32[0]);			\
613 									\
614 		addr = (void *)((_esmp)->esm_base + (_offset));		\
615 									\
616 		*addr++ = (_eop)->eo_u32[0];				\
617 		*addr++ = (_eop)->eo_u32[1];				\
618 		*addr++ = (_eop)->eo_u32[2];				\
619 		*addr   = (_eop)->eo_u32[3];				\
620 									\
621 	_NOTE(CONSTANTCONDITION)					\
622 	} while (B_FALSE)
623 #endif
624 
625 #define	EFSYS_MEM_ADDR(_esmp)						\
626 	((_esmp)->esm_addr)
627 
628 #define	EFSYS_MEM_IS_NULL(_esmp)					\
629 	((_esmp)->esm_base == NULL)
630 
631 /* BAR */
632 
633 #define	SFXGE_LOCK_NAME_MAX	16
634 
635 typedef struct efsys_bar_s {
636 	struct mtx		esb_lock;
637 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
638 	bus_space_tag_t		esb_tag;
639 	bus_space_handle_t	esb_handle;
640 	int			esb_rid;
641 	struct resource		*esb_res;
642 } efsys_bar_t;
643 
644 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
645 	do {								\
646 		snprintf((_esbp)->esb_lock_name,			\
647 			 sizeof((_esbp)->esb_lock_name),		\
648 			 "%s:bar", (_ifname));				\
649 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
650 			 NULL, MTX_DEF);				\
651 	_NOTE(CONSTANTCONDITION)					\
652 	} while (B_FALSE)
653 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
654 	mtx_destroy(&(_esbp)->esb_lock)
655 #define	SFXGE_BAR_LOCK(_esbp)						\
656 	mtx_lock(&(_esbp)->esb_lock)
657 #define	SFXGE_BAR_UNLOCK(_esbp)						\
658 	mtx_unlock(&(_esbp)->esb_lock)
659 
660 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
661 	do {								\
662 		_NOTE(CONSTANTCONDITION)				\
663 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
664 		    ("not power of 2 aligned"));			\
665 									\
666 		_NOTE(CONSTANTCONDITION)				\
667 		if (_lock)						\
668 			SFXGE_BAR_LOCK(_esbp);				\
669 									\
670 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
671 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
672 		    (_offset));						\
673 									\
674 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
675 		    uint32_t, (_edp)->ed_u32[0]);			\
676 									\
677 		_NOTE(CONSTANTCONDITION)				\
678 		if (_lock)						\
679 			SFXGE_BAR_UNLOCK(_esbp);			\
680 	_NOTE(CONSTANTCONDITION)					\
681 	} while (B_FALSE)
682 
683 #if defined(SFXGE_USE_BUS_SPACE_8)
684 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
685 	do {								\
686 		_NOTE(CONSTANTCONDITION)				\
687 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
688 		    ("not power of 2 aligned"));			\
689 									\
690 		SFXGE_BAR_LOCK(_esbp);					\
691 									\
692 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
693 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
694 		    (_offset));						\
695 									\
696 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
697 		    uint32_t, (_eqp)->eq_u32[1],			\
698 		    uint32_t, (_eqp)->eq_u32[0]);			\
699 									\
700 		SFXGE_BAR_UNLOCK(_esbp);				\
701 	_NOTE(CONSTANTCONDITION)					\
702 	} while (B_FALSE)
703 
704 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
705 	do {								\
706 		_NOTE(CONSTANTCONDITION)				\
707 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
708 		    ("not power of 2 aligned"));			\
709 									\
710 		_NOTE(CONSTANTCONDITION)				\
711 		if (_lock)						\
712 			SFXGE_BAR_LOCK(_esbp);				\
713 									\
714 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
715 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
716 		    (_offset));						\
717 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
718 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
719 		    (_offset) + 8);					\
720 									\
721 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
722 		    uint32_t, (_eop)->eo_u32[3],			\
723 		    uint32_t, (_eop)->eo_u32[2],			\
724 		    uint32_t, (_eop)->eo_u32[1],			\
725 		    uint32_t, (_eop)->eo_u32[0]);			\
726 									\
727 		_NOTE(CONSTANTCONDITION)				\
728 		if (_lock)						\
729 			SFXGE_BAR_UNLOCK(_esbp);			\
730 	_NOTE(CONSTANTCONDITION)					\
731 	} while (B_FALSE)
732 
733 #else
734 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
735 	do {								\
736 		_NOTE(CONSTANTCONDITION)				\
737 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
738 		    ("not power of 2 aligned"));			\
739 									\
740 		SFXGE_BAR_LOCK(_esbp);					\
741 									\
742 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
743 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
744 		    (_offset));						\
745 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
746 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
747 		    (_offset) + 4);					\
748 									\
749 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
750 		    uint32_t, (_eqp)->eq_u32[1],			\
751 		    uint32_t, (_eqp)->eq_u32[0]);			\
752 									\
753 		SFXGE_BAR_UNLOCK(_esbp);				\
754 	_NOTE(CONSTANTCONDITION)					\
755 	} while (B_FALSE)
756 
757 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
758 	do {								\
759 		_NOTE(CONSTANTCONDITION)				\
760 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
761 		    ("not power of 2 aligned"));			\
762 									\
763 		_NOTE(CONSTANTCONDITION)				\
764 		if (_lock)						\
765 			SFXGE_BAR_LOCK(_esbp);				\
766 									\
767 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
768 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
769 		    (_offset));						\
770 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
771 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
772 		    (_offset) + 4);					\
773 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
774 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
775 		    (_offset) + 8);					\
776 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
777 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
778 		    (_offset) + 12);					\
779 									\
780 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
781 		    uint32_t, (_eop)->eo_u32[3],			\
782 		    uint32_t, (_eop)->eo_u32[2],			\
783 		    uint32_t, (_eop)->eo_u32[1],			\
784 		    uint32_t, (_eop)->eo_u32[0]);			\
785 									\
786 		_NOTE(CONSTANTCONDITION)				\
787 		if (_lock)						\
788 			SFXGE_BAR_UNLOCK(_esbp);			\
789 	_NOTE(CONSTANTCONDITION)					\
790 	} while (B_FALSE)
791 #endif
792 
793 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
794 	do {								\
795 		_NOTE(CONSTANTCONDITION)				\
796 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
797 		    ("not power of 2 aligned"));			\
798 									\
799 		_NOTE(CONSTANTCONDITION)				\
800 		if (_lock)						\
801 			SFXGE_BAR_LOCK(_esbp);				\
802 									\
803 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
804 		    uint32_t, (_edp)->ed_u32[0]);			\
805 									\
806 		/*							\
807 		 * Make sure that previous writes to the dword have	\
808 		 * been done. It should be cheaper than barrier just	\
809 		 * after the write below.				\
810 		 */							\
811 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
812 		    (_offset), sizeof (efx_dword_t),			\
813 		    BUS_SPACE_BARRIER_WRITE);				\
814 		bus_space_write_stream_4((_esbp)->esb_tag,		\
815 		    (_esbp)->esb_handle,				\
816 		    (_offset), (_edp)->ed_u32[0]);			\
817 									\
818 		_NOTE(CONSTANTCONDITION)				\
819 		if (_lock)						\
820 			SFXGE_BAR_UNLOCK(_esbp);			\
821 	_NOTE(CONSTANTCONDITION)					\
822 	} while (B_FALSE)
823 
824 #if defined(SFXGE_USE_BUS_SPACE_8)
825 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
826 	do {								\
827 		_NOTE(CONSTANTCONDITION)				\
828 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
829 		    ("not power of 2 aligned"));			\
830 									\
831 		SFXGE_BAR_LOCK(_esbp);					\
832 									\
833 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
834 		    uint32_t, (_eqp)->eq_u32[1],			\
835 		    uint32_t, (_eqp)->eq_u32[0]);			\
836 									\
837 		/*							\
838 		 * Make sure that previous writes to the qword have	\
839 		 * been done. It should be cheaper than barrier just	\
840 		 * after the write below.				\
841 		 */							\
842 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
843 		    (_offset), sizeof (efx_qword_t),			\
844 		    BUS_SPACE_BARRIER_WRITE);				\
845 		bus_space_write_stream_8((_esbp)->esb_tag,		\
846 		    (_esbp)->esb_handle,				\
847 		    (_offset), (_eqp)->eq_u64[0]);			\
848 									\
849 		SFXGE_BAR_UNLOCK(_esbp);				\
850 	_NOTE(CONSTANTCONDITION)					\
851 	} while (B_FALSE)
852 #else
853 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
854 	do {								\
855 		_NOTE(CONSTANTCONDITION)				\
856 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
857 		    ("not power of 2 aligned"));			\
858 									\
859 		SFXGE_BAR_LOCK(_esbp);					\
860 									\
861 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
862 		    uint32_t, (_eqp)->eq_u32[1],			\
863 		    uint32_t, (_eqp)->eq_u32[0]);			\
864 									\
865 		/*							\
866 		 * Make sure that previous writes to the qword have	\
867 		 * been done. It should be cheaper than barrier just	\
868 		 * after the last write below.				\
869 		 */							\
870 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
871 		    (_offset), sizeof (efx_qword_t),			\
872 		    BUS_SPACE_BARRIER_WRITE);				\
873 		bus_space_write_stream_4((_esbp)->esb_tag,		\
874 		    (_esbp)->esb_handle,				\
875 		    (_offset), (_eqp)->eq_u32[0]);			\
876 		/*							\
877 		 * It should be guaranteed that the last dword comes	\
878 		 * the last, so barrier entire qword to be sure that	\
879 		 * neither above nor below writes are reordered.	\
880 		 */							\
881 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
882 		    (_offset), sizeof (efx_qword_t),			\
883 		    BUS_SPACE_BARRIER_WRITE);				\
884 		bus_space_write_stream_4((_esbp)->esb_tag,		\
885 		    (_esbp)->esb_handle,				\
886 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
887 									\
888 		SFXGE_BAR_UNLOCK(_esbp);				\
889 	_NOTE(CONSTANTCONDITION)					\
890 	} while (B_FALSE)
891 #endif
892 
893 /*
894  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
895  * (required by PIO hardware)
896  */
897 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
898 	do {								\
899 		_NOTE(CONSTANTCONDITION)				\
900 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
901 		    ("not power of 2 aligned"));			\
902 									\
903 		(void) (_esbp);						\
904 									\
905 		/* FIXME: Perform a 64-bit write */			\
906 		KASSERT(0, ("not implemented"));			\
907 									\
908 	_NOTE(CONSTANTCONDITION)					\
909 	} while (B_FALSE)
910 
911 #if defined(SFXGE_USE_BUS_SPACE_8)
912 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
913 	do {								\
914 		_NOTE(CONSTANTCONDITION)				\
915 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
916 		    ("not power of 2 aligned"));			\
917 									\
918 		_NOTE(CONSTANTCONDITION)				\
919 		if (_lock)						\
920 			SFXGE_BAR_LOCK(_esbp);				\
921 									\
922 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
923 		    uint32_t, (_eop)->eo_u32[3],			\
924 		    uint32_t, (_eop)->eo_u32[2],			\
925 		    uint32_t, (_eop)->eo_u32[1],			\
926 		    uint32_t, (_eop)->eo_u32[0]);			\
927 									\
928 		/*							\
929 		 * Make sure that previous writes to the oword have	\
930 		 * been done. It should be cheaper than barrier just	\
931 		 * after the last write below.				\
932 		 */							\
933 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
934 		    (_offset), sizeof (efx_oword_t),			\
935 		    BUS_SPACE_BARRIER_WRITE);				\
936 		bus_space_write_stream_8((_esbp)->esb_tag,		\
937 		    (_esbp)->esb_handle,				\
938 		    (_offset), (_eop)->eo_u64[0]);			\
939 		/*							\
940 		 * It should be guaranteed that the last qword comes	\
941 		 * the last, so barrier entire oword to be sure that	\
942 		 * neither above nor below writes are reordered.	\
943 		 */							\
944 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
945 		    (_offset), sizeof (efx_oword_t),			\
946 		    BUS_SPACE_BARRIER_WRITE);				\
947 		bus_space_write_stream_8((_esbp)->esb_tag,		\
948 		    (_esbp)->esb_handle,				\
949 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
950 									\
951 		_NOTE(CONSTANTCONDITION)				\
952 		if (_lock)						\
953 			SFXGE_BAR_UNLOCK(_esbp);			\
954 	_NOTE(CONSTANTCONDITION)					\
955 	} while (B_FALSE)
956 
957 #else
958 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
959 	do {								\
960 		_NOTE(CONSTANTCONDITION)				\
961 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
962 		    ("not power of 2 aligned"));			\
963 									\
964 		_NOTE(CONSTANTCONDITION)				\
965 		if (_lock)						\
966 			SFXGE_BAR_LOCK(_esbp);				\
967 									\
968 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
969 		    uint32_t, (_eop)->eo_u32[3],			\
970 		    uint32_t, (_eop)->eo_u32[2],			\
971 		    uint32_t, (_eop)->eo_u32[1],			\
972 		    uint32_t, (_eop)->eo_u32[0]);			\
973 									\
974 		/*							\
975 		 * Make sure that previous writes to the oword have	\
976 		 * been done. It should be cheaper than barrier just	\
977 		 * after the last write below.				\
978 		 */							\
979 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
980 		    (_offset), sizeof (efx_oword_t),			\
981 		    BUS_SPACE_BARRIER_WRITE);				\
982 		bus_space_write_stream_4((_esbp)->esb_tag,		\
983 		    (_esbp)->esb_handle,				\
984 		    (_offset), (_eop)->eo_u32[0]);			\
985 		bus_space_write_stream_4((_esbp)->esb_tag,		\
986 		    (_esbp)->esb_handle,				\
987 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
988 		bus_space_write_stream_4((_esbp)->esb_tag,		\
989 		    (_esbp)->esb_handle,				\
990 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
991 		/*							\
992 		 * It should be guaranteed that the last dword comes	\
993 		 * the last, so barrier entire oword to be sure that	\
994 		 * neither above nor below writes are reordered.	\
995 		 */							\
996 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
997 		    (_offset), sizeof (efx_oword_t),			\
998 		    BUS_SPACE_BARRIER_WRITE);				\
999 		bus_space_write_stream_4((_esbp)->esb_tag,		\
1000 		    (_esbp)->esb_handle,				\
1001 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
1002 									\
1003 		_NOTE(CONSTANTCONDITION)				\
1004 		if (_lock)						\
1005 			SFXGE_BAR_UNLOCK(_esbp);			\
1006 	_NOTE(CONSTANTCONDITION)					\
1007 	} while (B_FALSE)
1008 #endif
1009 
1010 /* Use the standard octo-word write for doorbell writes */
1011 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1012 	do {								\
1013 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1014 	_NOTE(CONSTANTCONDITION)					\
1015 	} while (B_FALSE)
1016 
1017 /* SPIN */
1018 
1019 #define	EFSYS_SPIN(_us)							\
1020 	do {								\
1021 		DELAY(_us);						\
1022 	_NOTE(CONSTANTCONDITION)					\
1023 	} while (B_FALSE)
1024 
1025 #define	EFSYS_SLEEP	EFSYS_SPIN
1026 
1027 /* BARRIERS */
1028 
1029 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1030 #define	EFSYS_PIO_WRITE_BARRIER()
1031 
1032 /* DMA SYNC */
1033 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1034 	do {								\
1035 		bus_dmamap_sync((_esmp)->esm_tag,			\
1036 		    (_esmp)->esm_map,					\
1037 		    BUS_DMASYNC_POSTREAD);				\
1038 	_NOTE(CONSTANTCONDITION)					\
1039 	} while (B_FALSE)
1040 
1041 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1042 	do {								\
1043 		bus_dmamap_sync((_esmp)->esm_tag,			\
1044 		    (_esmp)->esm_map,					\
1045 		    BUS_DMASYNC_PREWRITE);				\
1046 	_NOTE(CONSTANTCONDITION)					\
1047 	} while (B_FALSE)
1048 
1049 /* TIMESTAMP */
1050 
1051 typedef	clock_t	efsys_timestamp_t;
1052 
1053 #define	EFSYS_TIMESTAMP(_usp)						\
1054 	do {								\
1055 		clock_t now;						\
1056 									\
1057 		now = ticks;						\
1058 		*(_usp) = now * hz / 1000000;				\
1059 	_NOTE(CONSTANTCONDITION)					\
1060 	} while (B_FALSE)
1061 
1062 /* KMEM */
1063 
1064 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1065 	do {								\
1066 		(_esip) = (_esip);					\
1067 		/*							\
1068 		 * The macro is used in non-sleepable contexts, for	\
1069 		 * example, holding a mutex.				\
1070 		 */							\
1071 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1072 	_NOTE(CONSTANTCONDITION)					\
1073 	} while (B_FALSE)
1074 
1075 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1076 	do {								\
1077 		(void) (_esip);						\
1078 		(void) (_size);						\
1079 		free((_p), M_SFXGE);					\
1080 	_NOTE(CONSTANTCONDITION)					\
1081 	} while (B_FALSE)
1082 
1083 /* LOCK */
1084 
1085 typedef struct efsys_lock_s {
1086 	struct mtx	lock;
1087 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1088 } efsys_lock_t;
1089 
1090 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1091 	do {								\
1092 		efsys_lock_t *__eslp = (_eslp);				\
1093 									\
1094 		snprintf((__eslp)->lock_name,				\
1095 			 sizeof((__eslp)->lock_name),			\
1096 			 "%s:%s", (_ifname), (_label));			\
1097 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1098 			 NULL, MTX_DEF);				\
1099 	} while (B_FALSE)
1100 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1101 	mtx_destroy(&(_eslp)->lock)
1102 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1103 	mtx_lock(&(_eslp)->lock)
1104 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1105 	mtx_unlock(&(_eslp)->lock)
1106 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1107 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1108 
1109 #define	EFSYS_LOCK_MAGIC	0x000010c4
1110 
1111 #define	EFSYS_LOCK(_lockp, _state)					\
1112 	do {								\
1113 		SFXGE_EFSYS_LOCK(_lockp);				\
1114 		(_state) = EFSYS_LOCK_MAGIC;				\
1115 	_NOTE(CONSTANTCONDITION)					\
1116 	} while (B_FALSE)
1117 
1118 #define	EFSYS_UNLOCK(_lockp, _state)					\
1119 	do {								\
1120 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1121 			KASSERT(B_FALSE, ("not locked"));		\
1122 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1123 	_NOTE(CONSTANTCONDITION)					\
1124 	} while (B_FALSE)
1125 
1126 /* PREEMPT */
1127 
1128 #define	EFSYS_PREEMPT_DISABLE(_state)					\
1129 	do {								\
1130 		(_state) = (_state);					\
1131 		critical_enter();					\
1132 	_NOTE(CONSTANTCONDITION)					\
1133 	} while (B_FALSE)
1134 
1135 #define	EFSYS_PREEMPT_ENABLE(_state)					\
1136 	do {								\
1137 		(_state) = (_state);					\
1138 		critical_exit(_state);					\
1139 	_NOTE(CONSTANTCONDITION)					\
1140 	} while (B_FALSE)
1141 
1142 /* STAT */
1143 
1144 typedef uint64_t		efsys_stat_t;
1145 
1146 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1147 	do {								\
1148 		*(_knp) += (_delta);					\
1149 	_NOTE(CONSTANTCONDITION)					\
1150 	} while (B_FALSE)
1151 
1152 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1153 	do {								\
1154 		*(_knp) -= (_delta);					\
1155 	_NOTE(CONSTANTCONDITION)					\
1156 	} while (B_FALSE)
1157 
1158 #define	EFSYS_STAT_SET(_knp, _val)					\
1159 	do {								\
1160 		*(_knp) = (_val);					\
1161 	_NOTE(CONSTANTCONDITION)					\
1162 	} while (B_FALSE)
1163 
1164 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1165 	do {								\
1166 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1167 	_NOTE(CONSTANTCONDITION)					\
1168 	} while (B_FALSE)
1169 
1170 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1171 	do {								\
1172 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1173 	_NOTE(CONSTANTCONDITION)					\
1174 	} while (B_FALSE)
1175 
1176 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1177 	do {								\
1178 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1179 	_NOTE(CONSTANTCONDITION)					\
1180 	} while (B_FALSE)
1181 
1182 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1183 	do {								\
1184 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1185 	_NOTE(CONSTANTCONDITION)					\
1186 	} while (B_FALSE)
1187 
1188 /* ERR */
1189 
1190 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1191 		    uint32_t, uint32_t);
1192 
1193 #if EFSYS_OPT_DECODE_INTR_FATAL
1194 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1195 	do {								\
1196 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1197 	_NOTE(CONSTANTCONDITION)					\
1198 	} while (B_FALSE)
1199 #endif
1200 
1201 /* ASSERT */
1202 
1203 #define	EFSYS_ASSERT(_exp) do {						\
1204 	if (!(_exp))							\
1205 		panic("%s", #_exp);					\
1206 	} while (0)
1207 
1208 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1209 	const _t __x = (_t)(_x);					\
1210 	const _t __y = (_t)(_y);					\
1211 	if (!(__x _op __y))						\
1212 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1213 	} while(0)
1214 
1215 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1216 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1217 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1218 
1219 /* ROTATE */
1220 
1221 #define	EFSYS_HAS_ROTL_DWORD 0
1222 
1223 #ifdef	__cplusplus
1224 }
1225 #endif
1226 
1227 #endif	/* _SYS_EFSYS_H */
1228