xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision ab1e0d2410ece7d391a5b1e2cbc9d1e9857c2fdb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010-2016 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was developed in part by Philip Paeps under contract for
8  * Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing official
33  * policies, either expressed or implied, of the FreeBSD Project.
34  *
35  * $FreeBSD$
36  */
37 
38 #ifndef	_SYS_EFSYS_H
39 #define	_SYS_EFSYS_H
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/bus.h>
47 #include <sys/endian.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/systm.h>
55 
56 #include <machine/bus.h>
57 #include <machine/endian.h>
58 
59 #define	EFSYS_HAS_UINT64 1
60 #if defined(__x86_64__)
61 #define	EFSYS_USE_UINT64 1
62 #else
63 #define	EFSYS_USE_UINT64 0
64 #endif
65 #define	EFSYS_HAS_SSE2_M128 0
66 #if _BYTE_ORDER == _BIG_ENDIAN
67 #define	EFSYS_IS_BIG_ENDIAN 1
68 #define	EFSYS_IS_LITTLE_ENDIAN 0
69 #elif _BYTE_ORDER == _LITTLE_ENDIAN
70 #define	EFSYS_IS_BIG_ENDIAN 0
71 #define	EFSYS_IS_LITTLE_ENDIAN 1
72 #endif
73 #include "efx_types.h"
74 
75 /* Common code requires this */
76 #if __FreeBSD_version < 800068
77 #define	memmove(d, s, l) bcopy(s, d, l)
78 #endif
79 
80 /* FreeBSD equivalents of Solaris things */
81 #ifndef _NOTE
82 #define	_NOTE(s)
83 #endif
84 
85 #ifndef B_FALSE
86 #define	B_FALSE	FALSE
87 #endif
88 #ifndef B_TRUE
89 #define	B_TRUE	TRUE
90 #endif
91 
92 #ifndef IS_P2ALIGNED
93 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
94 #endif
95 
96 #ifndef P2ROUNDUP
97 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
98 #endif
99 
100 #ifndef P2ALIGN
101 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
102 #endif
103 
104 #ifndef IS2P
105 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
106 #endif
107 
108 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
109 
110 #define	SFXGE_USE_BUS_SPACE_8		1
111 
112 #if !defined(bus_space_read_stream_8)
113 
114 #define	bus_space_read_stream_8(t, h, o)				\
115 	bus_space_read_8((t), (h), (o))
116 
117 #define	bus_space_write_stream_8(t, h, o, v)				\
118 	bus_space_write_8((t), (h), (o), (v))
119 
120 #endif
121 
122 #endif
123 
124 #define	ENOTACTIVE EINVAL
125 
126 /* Memory type to use on FreeBSD */
127 MALLOC_DECLARE(M_SFXGE);
128 
129 /* Machine dependend prefetch wrappers */
130 #if defined(__i386__) || defined(__amd64__)
131 static __inline void
132 prefetch_read_many(void *addr)
133 {
134 
135 	__asm__(
136 	    "prefetcht0 (%0)"
137 	    :
138 	    : "r" (addr));
139 }
140 
141 static __inline void
142 prefetch_read_once(void *addr)
143 {
144 
145 	__asm__(
146 	    "prefetchnta (%0)"
147 	    :
148 	    : "r" (addr));
149 }
150 #elif defined(__sparc64__)
151 static __inline void
152 prefetch_read_many(void *addr)
153 {
154 
155 	__asm__(
156 	    "prefetch [%0], 0"
157 	    :
158 	    : "r" (addr));
159 }
160 
161 static __inline void
162 prefetch_read_once(void *addr)
163 {
164 
165 	__asm__(
166 	    "prefetch [%0], 1"
167 	    :
168 	    : "r" (addr));
169 }
170 #else
171 static __inline void
172 prefetch_read_many(void *addr)
173 {
174 
175 }
176 
177 static __inline void
178 prefetch_read_once(void *addr)
179 {
180 
181 }
182 #endif
183 
184 #if defined(__i386__) || defined(__amd64__)
185 #include <vm/vm.h>
186 #include <vm/pmap.h>
187 #endif
188 static __inline void
189 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
190 		    struct mbuf *m, bus_dma_segment_t *seg)
191 {
192 #if defined(__i386__) || defined(__amd64__)
193 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
194 	seg->ds_len = m->m_len;
195 #else
196 	int nsegstmp;
197 
198 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
199 #endif
200 }
201 
202 /* Modifiers used for Windows builds */
203 #define	__in
204 #define	__in_opt
205 #define	__in_ecount(_n)
206 #define	__in_ecount_opt(_n)
207 #define	__in_bcount(_n)
208 #define	__in_bcount_opt(_n)
209 
210 #define	__out
211 #define	__out_opt
212 #define	__out_ecount(_n)
213 #define	__out_ecount_opt(_n)
214 #define	__out_bcount(_n)
215 #define	__out_bcount_opt(_n)
216 #define	__out_bcount_part(_n, _l)
217 #define	__out_bcount_part_opt(_n, _l)
218 
219 #define	__deref_out
220 
221 #define	__inout
222 #define	__inout_opt
223 #define	__inout_ecount(_n)
224 #define	__inout_ecount_opt(_n)
225 #define	__inout_bcount(_n)
226 #define	__inout_bcount_opt(_n)
227 #define	__inout_bcount_full_opt(_n)
228 
229 #define	__deref_out_bcount_opt(n)
230 
231 #define	__checkReturn
232 #define	__success(_x)
233 
234 #define	__drv_when(_p, _c)
235 
236 /* Code inclusion options */
237 
238 
239 #define	EFSYS_OPT_NAMES 1
240 
241 #define	EFSYS_OPT_SIENA 1
242 #define	EFSYS_OPT_HUNTINGTON 1
243 #define	EFSYS_OPT_MEDFORD 1
244 #ifdef DEBUG
245 #define	EFSYS_OPT_CHECK_REG 1
246 #else
247 #define	EFSYS_OPT_CHECK_REG 0
248 #endif
249 
250 #define	EFSYS_OPT_MCDI 1
251 #define	EFSYS_OPT_MCDI_LOGGING 0
252 #define	EFSYS_OPT_MCDI_PROXY_AUTH 0
253 
254 #define	EFSYS_OPT_MAC_STATS 1
255 
256 #define	EFSYS_OPT_LOOPBACK 0
257 
258 #define	EFSYS_OPT_MON_MCDI 0
259 #define	EFSYS_OPT_MON_STATS 0
260 
261 #define	EFSYS_OPT_PHY_STATS 1
262 #define	EFSYS_OPT_BIST 1
263 #define	EFSYS_OPT_PHY_LED_CONTROL 1
264 #define	EFSYS_OPT_PHY_FLAGS 0
265 
266 #define	EFSYS_OPT_VPD 1
267 #define	EFSYS_OPT_NVRAM 1
268 #define	EFSYS_OPT_BOOTCFG 0
269 
270 #define	EFSYS_OPT_DIAG 0
271 #define	EFSYS_OPT_RX_SCALE 1
272 #define	EFSYS_OPT_QSTATS 1
273 #define	EFSYS_OPT_FILTER 1
274 #define	EFSYS_OPT_RX_SCATTER 0
275 
276 #define	EFSYS_OPT_EV_PREFETCH 0
277 
278 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
279 
280 #define	EFSYS_OPT_LICENSING 0
281 
282 #define	EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
283 
284 #define	EFSYS_OPT_RX_PACKED_STREAM 0
285 
286 /* ID */
287 
288 typedef struct __efsys_identifier_s	efsys_identifier_t;
289 
290 /* PROBE */
291 
292 #ifndef DTRACE_PROBE
293 
294 #define	EFSYS_PROBE(_name)
295 
296 #define	EFSYS_PROBE1(_name, _type1, _arg1)
297 
298 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
299 
300 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
301 	    _type3, _arg3)
302 
303 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
304 	    _type3, _arg3, _type4, _arg4)
305 
306 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
307 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
308 
309 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
310 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
311 	    _type6, _arg6)
312 
313 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
314 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
315 	    _type6, _arg6, _type7, _arg7)
316 
317 #else /* DTRACE_PROBE */
318 
319 #define	EFSYS_PROBE(_name)						\
320 	DTRACE_PROBE(_name)
321 
322 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
323 	DTRACE_PROBE1(_name, _type1, _arg1)
324 
325 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
326 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
327 
328 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
329 	    _type3, _arg3)						\
330 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
331 	    _type3, _arg3)
332 
333 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
334 	    _type3, _arg3, _type4, _arg4)				\
335 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
336 	    _type3, _arg3, _type4, _arg4)
337 
338 #ifdef DTRACE_PROBE5
339 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
340 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
341 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
342 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
343 #else
344 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
345 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
346 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
347 	    _type3, _arg3, _type4, _arg4)
348 #endif
349 
350 #ifdef DTRACE_PROBE6
351 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
352 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
353 	    _type6, _arg6)						\
354 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
355 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
356 	    _type6, _arg6)
357 #else
358 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
359 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
360 	    _type6, _arg6)						\
361 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
362 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
363 #endif
364 
365 #ifdef DTRACE_PROBE7
366 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
367 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
368 	    _type6, _arg6, _type7, _arg7)				\
369 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
370 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
371 	    _type6, _arg6, _type7, _arg7)
372 #else
373 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
374 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
375 	    _type6, _arg6, _type7, _arg7)				\
376 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
377 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
378 	    _type6, _arg6)
379 #endif
380 
381 #endif /* DTRACE_PROBE */
382 
383 /* DMA */
384 
385 typedef uint64_t		efsys_dma_addr_t;
386 
387 typedef struct efsys_mem_s {
388 	bus_dma_tag_t		esm_tag;
389 	bus_dmamap_t		esm_map;
390 	caddr_t			esm_base;
391 	efsys_dma_addr_t	esm_addr;
392 } efsys_mem_t;
393 
394 
395 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
396 	do {								\
397 		(void) memset((_esmp)->esm_base, 0, (_size));		\
398 									\
399 	_NOTE(CONSTANTCONDITION)					\
400 	} while (B_FALSE)
401 
402 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
403 	do {								\
404 		uint32_t *addr;						\
405 									\
406 		_NOTE(CONSTANTCONDITION)				\
407 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
408 		    ("not power of 2 aligned"));			\
409 									\
410 		addr = (void *)((_esmp)->esm_base + (_offset));		\
411 									\
412 		(_edp)->ed_u32[0] = *addr;				\
413 									\
414 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
415 		    uint32_t, (_edp)->ed_u32[0]);			\
416 									\
417 	_NOTE(CONSTANTCONDITION)					\
418 	} while (B_FALSE)
419 
420 #if defined(__x86_64__)
421 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
422 	do {								\
423 		uint64_t *addr;						\
424 									\
425 		_NOTE(CONSTANTCONDITION)				\
426 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
427 		    ("not power of 2 aligned"));			\
428 									\
429 		addr = (void *)((_esmp)->esm_base + (_offset));		\
430 									\
431 		(_eqp)->eq_u64[0] = *addr;				\
432 									\
433 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
434 		    uint32_t, (_eqp)->eq_u32[1],			\
435 		    uint32_t, (_eqp)->eq_u32[0]);			\
436 									\
437 	_NOTE(CONSTANTCONDITION)					\
438 	} while (B_FALSE)
439 #else
440 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
441 	do {								\
442 		uint32_t *addr;						\
443 									\
444 		_NOTE(CONSTANTCONDITION)				\
445 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
446 		    ("not power of 2 aligned"));			\
447 									\
448 		addr = (void *)((_esmp)->esm_base + (_offset));		\
449 									\
450 		(_eqp)->eq_u32[0] = *addr++;				\
451 		(_eqp)->eq_u32[1] = *addr;				\
452 									\
453 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
454 		    uint32_t, (_eqp)->eq_u32[1],			\
455 		    uint32_t, (_eqp)->eq_u32[0]);			\
456 									\
457 	_NOTE(CONSTANTCONDITION)					\
458 	} while (B_FALSE)
459 #endif
460 
461 #if defined(__x86_64__)
462 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
463 	do {								\
464 		uint64_t *addr;						\
465 									\
466 		_NOTE(CONSTANTCONDITION)				\
467 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
468 		    ("not power of 2 aligned"));			\
469 									\
470 		addr = (void *)((_esmp)->esm_base + (_offset));		\
471 									\
472 		(_eop)->eo_u64[0] = *addr++;				\
473 		(_eop)->eo_u64[1] = *addr;				\
474 									\
475 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
476 		    uint32_t, (_eop)->eo_u32[3],			\
477 		    uint32_t, (_eop)->eo_u32[2],			\
478 		    uint32_t, (_eop)->eo_u32[1],			\
479 		    uint32_t, (_eop)->eo_u32[0]);			\
480 									\
481 	_NOTE(CONSTANTCONDITION)					\
482 	} while (B_FALSE)
483 #else
484 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
485 	do {								\
486 		uint32_t *addr;						\
487 									\
488 		_NOTE(CONSTANTCONDITION)				\
489 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
490 		    ("not power of 2 aligned"));			\
491 									\
492 		addr = (void *)((_esmp)->esm_base + (_offset));		\
493 									\
494 		(_eop)->eo_u32[0] = *addr++;				\
495 		(_eop)->eo_u32[1] = *addr++;				\
496 		(_eop)->eo_u32[2] = *addr++;				\
497 		(_eop)->eo_u32[3] = *addr;				\
498 									\
499 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
500 		    uint32_t, (_eop)->eo_u32[3],			\
501 		    uint32_t, (_eop)->eo_u32[2],			\
502 		    uint32_t, (_eop)->eo_u32[1],			\
503 		    uint32_t, (_eop)->eo_u32[0]);			\
504 									\
505 	_NOTE(CONSTANTCONDITION)					\
506 	} while (B_FALSE)
507 #endif
508 
509 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
510 	do {								\
511 		uint32_t *addr;						\
512 									\
513 		_NOTE(CONSTANTCONDITION)				\
514 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
515 		    ("not power of 2 aligned"));			\
516 									\
517 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
518 		    uint32_t, (_edp)->ed_u32[0]);			\
519 									\
520 		addr = (void *)((_esmp)->esm_base + (_offset));		\
521 									\
522 		*addr = (_edp)->ed_u32[0];				\
523 									\
524 	_NOTE(CONSTANTCONDITION)					\
525 	} while (B_FALSE)
526 
527 #if defined(__x86_64__)
528 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
529 	do {								\
530 		uint64_t *addr;						\
531 									\
532 		_NOTE(CONSTANTCONDITION)				\
533 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
534 		    ("not power of 2 aligned"));			\
535 									\
536 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
537 		    uint32_t, (_eqp)->eq_u32[1],			\
538 		    uint32_t, (_eqp)->eq_u32[0]);			\
539 									\
540 		addr = (void *)((_esmp)->esm_base + (_offset));		\
541 									\
542 		*addr   = (_eqp)->eq_u64[0];				\
543 									\
544 	_NOTE(CONSTANTCONDITION)					\
545 	} while (B_FALSE)
546 
547 #else
548 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
549 	do {								\
550 		uint32_t *addr;						\
551 									\
552 		_NOTE(CONSTANTCONDITION)				\
553 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
554 		    ("not power of 2 aligned"));			\
555 									\
556 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
557 		    uint32_t, (_eqp)->eq_u32[1],			\
558 		    uint32_t, (_eqp)->eq_u32[0]);			\
559 									\
560 		addr = (void *)((_esmp)->esm_base + (_offset));		\
561 									\
562 		*addr++ = (_eqp)->eq_u32[0];				\
563 		*addr   = (_eqp)->eq_u32[1];				\
564 									\
565 	_NOTE(CONSTANTCONDITION)					\
566 	} while (B_FALSE)
567 #endif
568 
569 #if defined(__x86_64__)
570 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
571 	do {								\
572 		uint64_t *addr;						\
573 									\
574 		_NOTE(CONSTANTCONDITION)				\
575 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
576 		    ("not power of 2 aligned"));			\
577 									\
578 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
579 		    uint32_t, (_eop)->eo_u32[3],			\
580 		    uint32_t, (_eop)->eo_u32[2],			\
581 		    uint32_t, (_eop)->eo_u32[1],			\
582 		    uint32_t, (_eop)->eo_u32[0]);			\
583 									\
584 		addr = (void *)((_esmp)->esm_base + (_offset));		\
585 									\
586 		*addr++ = (_eop)->eo_u64[0];				\
587 		*addr   = (_eop)->eo_u64[1];				\
588 									\
589 	_NOTE(CONSTANTCONDITION)					\
590 	} while (B_FALSE)
591 #else
592 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
593 	do {								\
594 		uint32_t *addr;						\
595 									\
596 		_NOTE(CONSTANTCONDITION)				\
597 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
598 		    ("not power of 2 aligned"));			\
599 									\
600 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
601 		    uint32_t, (_eop)->eo_u32[3],			\
602 		    uint32_t, (_eop)->eo_u32[2],			\
603 		    uint32_t, (_eop)->eo_u32[1],			\
604 		    uint32_t, (_eop)->eo_u32[0]);			\
605 									\
606 		addr = (void *)((_esmp)->esm_base + (_offset));		\
607 									\
608 		*addr++ = (_eop)->eo_u32[0];				\
609 		*addr++ = (_eop)->eo_u32[1];				\
610 		*addr++ = (_eop)->eo_u32[2];				\
611 		*addr   = (_eop)->eo_u32[3];				\
612 									\
613 	_NOTE(CONSTANTCONDITION)					\
614 	} while (B_FALSE)
615 #endif
616 
617 #define	EFSYS_MEM_ADDR(_esmp)						\
618 	((_esmp)->esm_addr)
619 
620 #define	EFSYS_MEM_IS_NULL(_esmp)					\
621 	((_esmp)->esm_base == NULL)
622 
623 /* BAR */
624 
625 #define	SFXGE_LOCK_NAME_MAX	16
626 
627 typedef struct efsys_bar_s {
628 	struct mtx		esb_lock;
629 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
630 	bus_space_tag_t		esb_tag;
631 	bus_space_handle_t	esb_handle;
632 	int			esb_rid;
633 	struct resource		*esb_res;
634 } efsys_bar_t;
635 
636 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
637 	do {								\
638 		snprintf((_esbp)->esb_lock_name,			\
639 			 sizeof((_esbp)->esb_lock_name),		\
640 			 "%s:bar", (_ifname));				\
641 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
642 			 NULL, MTX_DEF);				\
643 	_NOTE(CONSTANTCONDITION)					\
644 	} while (B_FALSE)
645 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
646 	mtx_destroy(&(_esbp)->esb_lock)
647 #define	SFXGE_BAR_LOCK(_esbp)						\
648 	mtx_lock(&(_esbp)->esb_lock)
649 #define	SFXGE_BAR_UNLOCK(_esbp)						\
650 	mtx_unlock(&(_esbp)->esb_lock)
651 
652 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
653 	do {								\
654 		_NOTE(CONSTANTCONDITION)				\
655 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
656 		    ("not power of 2 aligned"));			\
657 									\
658 		_NOTE(CONSTANTCONDITION)				\
659 		if (_lock)						\
660 			SFXGE_BAR_LOCK(_esbp);				\
661 									\
662 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
663 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
664 		    (_offset));						\
665 									\
666 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
667 		    uint32_t, (_edp)->ed_u32[0]);			\
668 									\
669 		_NOTE(CONSTANTCONDITION)				\
670 		if (_lock)						\
671 			SFXGE_BAR_UNLOCK(_esbp);			\
672 	_NOTE(CONSTANTCONDITION)					\
673 	} while (B_FALSE)
674 
675 #if defined(SFXGE_USE_BUS_SPACE_8)
676 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
677 	do {								\
678 		_NOTE(CONSTANTCONDITION)				\
679 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
680 		    ("not power of 2 aligned"));			\
681 									\
682 		SFXGE_BAR_LOCK(_esbp);					\
683 									\
684 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
685 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
686 		    (_offset));						\
687 									\
688 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
689 		    uint32_t, (_eqp)->eq_u32[1],			\
690 		    uint32_t, (_eqp)->eq_u32[0]);			\
691 									\
692 		SFXGE_BAR_UNLOCK(_esbp);				\
693 	_NOTE(CONSTANTCONDITION)					\
694 	} while (B_FALSE)
695 
696 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
697 	do {								\
698 		_NOTE(CONSTANTCONDITION)				\
699 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
700 		    ("not power of 2 aligned"));			\
701 									\
702 		_NOTE(CONSTANTCONDITION)				\
703 		if (_lock)						\
704 			SFXGE_BAR_LOCK(_esbp);				\
705 									\
706 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
707 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
708 		    (_offset));						\
709 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
710 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
711 		    (_offset) + 8);					\
712 									\
713 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
714 		    uint32_t, (_eop)->eo_u32[3],			\
715 		    uint32_t, (_eop)->eo_u32[2],			\
716 		    uint32_t, (_eop)->eo_u32[1],			\
717 		    uint32_t, (_eop)->eo_u32[0]);			\
718 									\
719 		_NOTE(CONSTANTCONDITION)				\
720 		if (_lock)						\
721 			SFXGE_BAR_UNLOCK(_esbp);			\
722 	_NOTE(CONSTANTCONDITION)					\
723 	} while (B_FALSE)
724 
725 #else
726 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
727 	do {								\
728 		_NOTE(CONSTANTCONDITION)				\
729 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
730 		    ("not power of 2 aligned"));			\
731 									\
732 		SFXGE_BAR_LOCK(_esbp);					\
733 									\
734 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
735 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
736 		    (_offset));						\
737 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
738 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
739 		    (_offset) + 4);					\
740 									\
741 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
742 		    uint32_t, (_eqp)->eq_u32[1],			\
743 		    uint32_t, (_eqp)->eq_u32[0]);			\
744 									\
745 		SFXGE_BAR_UNLOCK(_esbp);				\
746 	_NOTE(CONSTANTCONDITION)					\
747 	} while (B_FALSE)
748 
749 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
750 	do {								\
751 		_NOTE(CONSTANTCONDITION)				\
752 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
753 		    ("not power of 2 aligned"));			\
754 									\
755 		_NOTE(CONSTANTCONDITION)				\
756 		if (_lock)						\
757 			SFXGE_BAR_LOCK(_esbp);				\
758 									\
759 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
760 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
761 		    (_offset));						\
762 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
763 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
764 		    (_offset) + 4);					\
765 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
766 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
767 		    (_offset) + 8);					\
768 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
769 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
770 		    (_offset) + 12);					\
771 									\
772 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
773 		    uint32_t, (_eop)->eo_u32[3],			\
774 		    uint32_t, (_eop)->eo_u32[2],			\
775 		    uint32_t, (_eop)->eo_u32[1],			\
776 		    uint32_t, (_eop)->eo_u32[0]);			\
777 									\
778 		_NOTE(CONSTANTCONDITION)				\
779 		if (_lock)						\
780 			SFXGE_BAR_UNLOCK(_esbp);			\
781 	_NOTE(CONSTANTCONDITION)					\
782 	} while (B_FALSE)
783 #endif
784 
785 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
786 	do {								\
787 		_NOTE(CONSTANTCONDITION)				\
788 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
789 		    ("not power of 2 aligned"));			\
790 									\
791 		_NOTE(CONSTANTCONDITION)				\
792 		if (_lock)						\
793 			SFXGE_BAR_LOCK(_esbp);				\
794 									\
795 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
796 		    uint32_t, (_edp)->ed_u32[0]);			\
797 									\
798 		/*							\
799 		 * Make sure that previous writes to the dword have	\
800 		 * been done. It should be cheaper than barrier just	\
801 		 * after the write below.				\
802 		 */							\
803 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
804 		    (_offset), sizeof (efx_dword_t),			\
805 		    BUS_SPACE_BARRIER_WRITE);				\
806 		bus_space_write_stream_4((_esbp)->esb_tag,		\
807 		    (_esbp)->esb_handle,				\
808 		    (_offset), (_edp)->ed_u32[0]);			\
809 									\
810 		_NOTE(CONSTANTCONDITION)				\
811 		if (_lock)						\
812 			SFXGE_BAR_UNLOCK(_esbp);			\
813 	_NOTE(CONSTANTCONDITION)					\
814 	} while (B_FALSE)
815 
816 #if defined(SFXGE_USE_BUS_SPACE_8)
817 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
818 	do {								\
819 		_NOTE(CONSTANTCONDITION)				\
820 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
821 		    ("not power of 2 aligned"));			\
822 									\
823 		SFXGE_BAR_LOCK(_esbp);					\
824 									\
825 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
826 		    uint32_t, (_eqp)->eq_u32[1],			\
827 		    uint32_t, (_eqp)->eq_u32[0]);			\
828 									\
829 		/*							\
830 		 * Make sure that previous writes to the qword have	\
831 		 * been done. It should be cheaper than barrier just	\
832 		 * after the write below.				\
833 		 */							\
834 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
835 		    (_offset), sizeof (efx_qword_t),			\
836 		    BUS_SPACE_BARRIER_WRITE);				\
837 		bus_space_write_stream_8((_esbp)->esb_tag,		\
838 		    (_esbp)->esb_handle,				\
839 		    (_offset), (_eqp)->eq_u64[0]);			\
840 									\
841 		SFXGE_BAR_UNLOCK(_esbp);				\
842 	_NOTE(CONSTANTCONDITION)					\
843 	} while (B_FALSE)
844 #else
845 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
846 	do {								\
847 		_NOTE(CONSTANTCONDITION)				\
848 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
849 		    ("not power of 2 aligned"));			\
850 									\
851 		SFXGE_BAR_LOCK(_esbp);					\
852 									\
853 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
854 		    uint32_t, (_eqp)->eq_u32[1],			\
855 		    uint32_t, (_eqp)->eq_u32[0]);			\
856 									\
857 		/*							\
858 		 * Make sure that previous writes to the qword have	\
859 		 * been done. It should be cheaper than barrier just	\
860 		 * after the last write below.				\
861 		 */							\
862 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
863 		    (_offset), sizeof (efx_qword_t),			\
864 		    BUS_SPACE_BARRIER_WRITE);				\
865 		bus_space_write_stream_4((_esbp)->esb_tag,		\
866 		    (_esbp)->esb_handle,				\
867 		    (_offset), (_eqp)->eq_u32[0]);			\
868 		/*							\
869 		 * It should be guaranteed that the last dword comes	\
870 		 * the last, so barrier entire qword to be sure that	\
871 		 * neither above nor below writes are reordered.	\
872 		 */							\
873 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
874 		    (_offset), sizeof (efx_qword_t),			\
875 		    BUS_SPACE_BARRIER_WRITE);				\
876 		bus_space_write_stream_4((_esbp)->esb_tag,		\
877 		    (_esbp)->esb_handle,				\
878 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
879 									\
880 		SFXGE_BAR_UNLOCK(_esbp);				\
881 	_NOTE(CONSTANTCONDITION)					\
882 	} while (B_FALSE)
883 #endif
884 
885 /*
886  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
887  * (required by PIO hardware)
888  */
889 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
890 	do {								\
891 		_NOTE(CONSTANTCONDITION)				\
892 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
893 		    ("not power of 2 aligned"));			\
894 									\
895 		(void) (_esbp);						\
896 									\
897 		/* FIXME: Perform a 64-bit write */			\
898 		KASSERT(0, ("not implemented"));			\
899 									\
900 	_NOTE(CONSTANTCONDITION)					\
901 	} while (B_FALSE)
902 
903 #if defined(SFXGE_USE_BUS_SPACE_8)
904 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
905 	do {								\
906 		_NOTE(CONSTANTCONDITION)				\
907 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
908 		    ("not power of 2 aligned"));			\
909 									\
910 		_NOTE(CONSTANTCONDITION)				\
911 		if (_lock)						\
912 			SFXGE_BAR_LOCK(_esbp);				\
913 									\
914 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
915 		    uint32_t, (_eop)->eo_u32[3],			\
916 		    uint32_t, (_eop)->eo_u32[2],			\
917 		    uint32_t, (_eop)->eo_u32[1],			\
918 		    uint32_t, (_eop)->eo_u32[0]);			\
919 									\
920 		/*							\
921 		 * Make sure that previous writes to the oword have	\
922 		 * been done. It should be cheaper than barrier just	\
923 		 * after the last write below.				\
924 		 */							\
925 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
926 		    (_offset), sizeof (efx_oword_t),			\
927 		    BUS_SPACE_BARRIER_WRITE);				\
928 		bus_space_write_stream_8((_esbp)->esb_tag,		\
929 		    (_esbp)->esb_handle,				\
930 		    (_offset), (_eop)->eo_u64[0]);			\
931 		/*							\
932 		 * It should be guaranteed that the last qword comes	\
933 		 * the last, so barrier entire oword to be sure that	\
934 		 * neither above nor below writes are reordered.	\
935 		 */							\
936 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
937 		    (_offset), sizeof (efx_oword_t),			\
938 		    BUS_SPACE_BARRIER_WRITE);				\
939 		bus_space_write_stream_8((_esbp)->esb_tag,		\
940 		    (_esbp)->esb_handle,				\
941 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
942 									\
943 		_NOTE(CONSTANTCONDITION)				\
944 		if (_lock)						\
945 			SFXGE_BAR_UNLOCK(_esbp);			\
946 	_NOTE(CONSTANTCONDITION)					\
947 	} while (B_FALSE)
948 
949 #else
950 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
951 	do {								\
952 		_NOTE(CONSTANTCONDITION)				\
953 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
954 		    ("not power of 2 aligned"));			\
955 									\
956 		_NOTE(CONSTANTCONDITION)				\
957 		if (_lock)						\
958 			SFXGE_BAR_LOCK(_esbp);				\
959 									\
960 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
961 		    uint32_t, (_eop)->eo_u32[3],			\
962 		    uint32_t, (_eop)->eo_u32[2],			\
963 		    uint32_t, (_eop)->eo_u32[1],			\
964 		    uint32_t, (_eop)->eo_u32[0]);			\
965 									\
966 		/*							\
967 		 * Make sure that previous writes to the oword have	\
968 		 * been done. It should be cheaper than barrier just	\
969 		 * after the last write below.				\
970 		 */							\
971 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
972 		    (_offset), sizeof (efx_oword_t),			\
973 		    BUS_SPACE_BARRIER_WRITE);				\
974 		bus_space_write_stream_4((_esbp)->esb_tag,		\
975 		    (_esbp)->esb_handle,				\
976 		    (_offset), (_eop)->eo_u32[0]);			\
977 		bus_space_write_stream_4((_esbp)->esb_tag,		\
978 		    (_esbp)->esb_handle,				\
979 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
980 		bus_space_write_stream_4((_esbp)->esb_tag,		\
981 		    (_esbp)->esb_handle,				\
982 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
983 		/*							\
984 		 * It should be guaranteed that the last dword comes	\
985 		 * the last, so barrier entire oword to be sure that	\
986 		 * neither above nor below writes are reordered.	\
987 		 */							\
988 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
989 		    (_offset), sizeof (efx_oword_t),			\
990 		    BUS_SPACE_BARRIER_WRITE);				\
991 		bus_space_write_stream_4((_esbp)->esb_tag,		\
992 		    (_esbp)->esb_handle,				\
993 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
994 									\
995 		_NOTE(CONSTANTCONDITION)				\
996 		if (_lock)						\
997 			SFXGE_BAR_UNLOCK(_esbp);			\
998 	_NOTE(CONSTANTCONDITION)					\
999 	} while (B_FALSE)
1000 #endif
1001 
1002 /* Use the standard octo-word write for doorbell writes */
1003 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1004 	do {								\
1005 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1006 	_NOTE(CONSTANTCONDITION)					\
1007 	} while (B_FALSE)
1008 
1009 /* SPIN */
1010 
1011 #define	EFSYS_SPIN(_us)							\
1012 	do {								\
1013 		DELAY(_us);						\
1014 	_NOTE(CONSTANTCONDITION)					\
1015 	} while (B_FALSE)
1016 
1017 #define	EFSYS_SLEEP	EFSYS_SPIN
1018 
1019 /* BARRIERS */
1020 
1021 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1022 #define	EFSYS_PIO_WRITE_BARRIER()
1023 
1024 /* DMA SYNC */
1025 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1026 	do {								\
1027 		bus_dmamap_sync((_esmp)->esm_tag,			\
1028 		    (_esmp)->esm_map,					\
1029 		    BUS_DMASYNC_POSTREAD);				\
1030 	_NOTE(CONSTANTCONDITION)					\
1031 	} while (B_FALSE)
1032 
1033 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1034 	do {								\
1035 		bus_dmamap_sync((_esmp)->esm_tag,			\
1036 		    (_esmp)->esm_map,					\
1037 		    BUS_DMASYNC_PREWRITE);				\
1038 	_NOTE(CONSTANTCONDITION)					\
1039 	} while (B_FALSE)
1040 
1041 /* TIMESTAMP */
1042 
1043 typedef	clock_t	efsys_timestamp_t;
1044 
1045 #define	EFSYS_TIMESTAMP(_usp)						\
1046 	do {								\
1047 		clock_t now;						\
1048 									\
1049 		now = ticks;						\
1050 		*(_usp) = now * hz / 1000000;				\
1051 	_NOTE(CONSTANTCONDITION)					\
1052 	} while (B_FALSE)
1053 
1054 /* KMEM */
1055 
1056 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1057 	do {								\
1058 		(_esip) = (_esip);					\
1059 		/*							\
1060 		 * The macro is used in non-sleepable contexts, for	\
1061 		 * example, holding a mutex.				\
1062 		 */							\
1063 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1064 	_NOTE(CONSTANTCONDITION)					\
1065 	} while (B_FALSE)
1066 
1067 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1068 	do {								\
1069 		(void) (_esip);						\
1070 		(void) (_size);						\
1071 		free((_p), M_SFXGE);					\
1072 	_NOTE(CONSTANTCONDITION)					\
1073 	} while (B_FALSE)
1074 
1075 /* LOCK */
1076 
1077 typedef struct efsys_lock_s {
1078 	struct mtx	lock;
1079 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1080 } efsys_lock_t;
1081 
1082 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1083 	do {								\
1084 		efsys_lock_t *__eslp = (_eslp);				\
1085 									\
1086 		snprintf((__eslp)->lock_name,				\
1087 			 sizeof((__eslp)->lock_name),			\
1088 			 "%s:%s", (_ifname), (_label));			\
1089 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1090 			 NULL, MTX_DEF);				\
1091 	} while (B_FALSE)
1092 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1093 	mtx_destroy(&(_eslp)->lock)
1094 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1095 	mtx_lock(&(_eslp)->lock)
1096 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1097 	mtx_unlock(&(_eslp)->lock)
1098 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1099 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1100 
1101 typedef int efsys_lock_state_t;
1102 
1103 #define	EFSYS_LOCK_MAGIC	0x000010c4
1104 
1105 #define	EFSYS_LOCK(_lockp, _state)					\
1106 	do {								\
1107 		SFXGE_EFSYS_LOCK(_lockp);				\
1108 		(_state) = EFSYS_LOCK_MAGIC;				\
1109 	_NOTE(CONSTANTCONDITION)					\
1110 	} while (B_FALSE)
1111 
1112 #define	EFSYS_UNLOCK(_lockp, _state)					\
1113 	do {								\
1114 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1115 			KASSERT(B_FALSE, ("not locked"));		\
1116 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1117 	_NOTE(CONSTANTCONDITION)					\
1118 	} while (B_FALSE)
1119 
1120 /* STAT */
1121 
1122 typedef uint64_t		efsys_stat_t;
1123 
1124 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1125 	do {								\
1126 		*(_knp) += (_delta);					\
1127 	_NOTE(CONSTANTCONDITION)					\
1128 	} while (B_FALSE)
1129 
1130 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1131 	do {								\
1132 		*(_knp) -= (_delta);					\
1133 	_NOTE(CONSTANTCONDITION)					\
1134 	} while (B_FALSE)
1135 
1136 #define	EFSYS_STAT_SET(_knp, _val)					\
1137 	do {								\
1138 		*(_knp) = (_val);					\
1139 	_NOTE(CONSTANTCONDITION)					\
1140 	} while (B_FALSE)
1141 
1142 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1143 	do {								\
1144 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1145 	_NOTE(CONSTANTCONDITION)					\
1146 	} while (B_FALSE)
1147 
1148 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1149 	do {								\
1150 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1151 	_NOTE(CONSTANTCONDITION)					\
1152 	} while (B_FALSE)
1153 
1154 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1155 	do {								\
1156 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1157 	_NOTE(CONSTANTCONDITION)					\
1158 	} while (B_FALSE)
1159 
1160 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1161 	do {								\
1162 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1163 	_NOTE(CONSTANTCONDITION)					\
1164 	} while (B_FALSE)
1165 
1166 /* ERR */
1167 
1168 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1169 		    uint32_t, uint32_t);
1170 
1171 #if EFSYS_OPT_DECODE_INTR_FATAL
1172 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1173 	do {								\
1174 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1175 	_NOTE(CONSTANTCONDITION)					\
1176 	} while (B_FALSE)
1177 #endif
1178 
1179 /* ASSERT */
1180 
1181 #define	EFSYS_ASSERT(_exp) do {						\
1182 	if (!(_exp))							\
1183 		panic("%s", #_exp);					\
1184 	} while (0)
1185 
1186 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1187 	const _t __x = (_t)(_x);					\
1188 	const _t __y = (_t)(_y);					\
1189 	if (!(__x _op __y))						\
1190 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1191 	} while(0)
1192 
1193 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1194 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1195 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1196 
1197 /* ROTATE */
1198 
1199 #define	EFSYS_HAS_ROTL_DWORD 0
1200 
1201 #ifdef	__cplusplus
1202 }
1203 #endif
1204 
1205 #endif	/* _SYS_EFSYS_H */
1206