1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010-2016 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was developed in part by Philip Paeps under contract for 8 * Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing official 33 * policies, either expressed or implied, of the FreeBSD Project. 34 * 35 * $FreeBSD$ 36 */ 37 38 #ifndef _SYS_EFSYS_H 39 #define _SYS_EFSYS_H 40 41 #ifdef __cplusplus 42 extern "C" { 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/bus.h> 47 #include <sys/endian.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/rwlock.h> 53 #include <sys/sdt.h> 54 #include <sys/systm.h> 55 56 #include <machine/bus.h> 57 #include <machine/endian.h> 58 59 #define EFSYS_HAS_UINT64 1 60 #if defined(__x86_64__) 61 #define EFSYS_USE_UINT64 1 62 #else 63 #define EFSYS_USE_UINT64 0 64 #endif 65 #define EFSYS_HAS_SSE2_M128 0 66 #if _BYTE_ORDER == _BIG_ENDIAN 67 #define EFSYS_IS_BIG_ENDIAN 1 68 #define EFSYS_IS_LITTLE_ENDIAN 0 69 #elif _BYTE_ORDER == _LITTLE_ENDIAN 70 #define EFSYS_IS_BIG_ENDIAN 0 71 #define EFSYS_IS_LITTLE_ENDIAN 1 72 #endif 73 #include "efx_types.h" 74 75 /* Common code requires this */ 76 #if __FreeBSD_version < 800068 77 #define memmove(d, s, l) bcopy(s, d, l) 78 #endif 79 80 #ifndef B_FALSE 81 #define B_FALSE FALSE 82 #endif 83 #ifndef B_TRUE 84 #define B_TRUE TRUE 85 #endif 86 87 #ifndef IS_P2ALIGNED 88 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) 89 #endif 90 91 #ifndef P2ROUNDUP 92 #define P2ROUNDUP(x, align) (-(-(x) & -(align))) 93 #endif 94 95 #ifndef P2ALIGN 96 #define P2ALIGN(_x, _a) ((_x) & -(_a)) 97 #endif 98 99 #ifndef IS2P 100 #define ISP2(x) (((x) & ((x) - 1)) == 0) 101 #endif 102 103 #if defined(__x86_64__) && __FreeBSD_version >= 1000000 104 105 #define SFXGE_USE_BUS_SPACE_8 1 106 107 #if !defined(bus_space_read_stream_8) 108 109 #define bus_space_read_stream_8(t, h, o) \ 110 bus_space_read_8((t), (h), (o)) 111 112 #define bus_space_write_stream_8(t, h, o, v) \ 113 bus_space_write_8((t), (h), (o), (v)) 114 115 #endif 116 117 #endif 118 119 #define ENOTACTIVE EINVAL 120 121 /* Memory type to use on FreeBSD */ 122 MALLOC_DECLARE(M_SFXGE); 123 124 /* Machine dependend prefetch wrappers */ 125 #if defined(__i386__) || defined(__amd64__) 126 static __inline void 127 prefetch_read_many(void *addr) 128 { 129 130 __asm__( 131 "prefetcht0 (%0)" 132 : 133 : "r" (addr)); 134 } 135 136 static __inline void 137 prefetch_read_once(void *addr) 138 { 139 140 __asm__( 141 "prefetchnta (%0)" 142 : 143 : "r" (addr)); 144 } 145 #elif defined(__sparc64__) 146 static __inline void 147 prefetch_read_many(void *addr) 148 { 149 150 __asm__( 151 "prefetch [%0], 0" 152 : 153 : "r" (addr)); 154 } 155 156 static __inline void 157 prefetch_read_once(void *addr) 158 { 159 160 __asm__( 161 "prefetch [%0], 1" 162 : 163 : "r" (addr)); 164 } 165 #else 166 static __inline void 167 prefetch_read_many(void *addr) 168 { 169 170 } 171 172 static __inline void 173 prefetch_read_once(void *addr) 174 { 175 176 } 177 #endif 178 179 #if defined(__i386__) || defined(__amd64__) 180 #include <vm/vm.h> 181 #include <vm/pmap.h> 182 #endif 183 static __inline void 184 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, 185 struct mbuf *m, bus_dma_segment_t *seg) 186 { 187 #if defined(__i386__) || defined(__amd64__) 188 seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); 189 seg->ds_len = m->m_len; 190 #else 191 int nsegstmp; 192 193 bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); 194 #endif 195 } 196 197 /* Code inclusion options */ 198 199 200 #define EFSYS_OPT_NAMES 1 201 202 #define EFSYS_OPT_SIENA 1 203 #define EFSYS_OPT_HUNTINGTON 1 204 #define EFSYS_OPT_MEDFORD 1 205 #define EFSYS_OPT_MEDFORD2 1 206 #ifdef DEBUG 207 #define EFSYS_OPT_CHECK_REG 1 208 #else 209 #define EFSYS_OPT_CHECK_REG 0 210 #endif 211 212 #define EFSYS_OPT_MCDI 1 213 #define EFSYS_OPT_MCDI_LOGGING 0 214 #define EFSYS_OPT_MCDI_PROXY_AUTH 0 215 216 #define EFSYS_OPT_MAC_STATS 1 217 218 #define EFSYS_OPT_LOOPBACK 0 219 220 #define EFSYS_OPT_MON_MCDI 0 221 #define EFSYS_OPT_MON_STATS 0 222 223 #define EFSYS_OPT_PHY_STATS 1 224 #define EFSYS_OPT_BIST 1 225 #define EFSYS_OPT_PHY_LED_CONTROL 1 226 #define EFSYS_OPT_PHY_FLAGS 0 227 228 #define EFSYS_OPT_VPD 1 229 #define EFSYS_OPT_NVRAM 1 230 #define EFSYS_OPT_BOOTCFG 0 231 #define EFSYS_OPT_IMAGE_LAYOUT 0 232 233 #define EFSYS_OPT_DIAG 0 234 #define EFSYS_OPT_RX_SCALE 1 235 #define EFSYS_OPT_QSTATS 1 236 #define EFSYS_OPT_FILTER 1 237 #define EFSYS_OPT_RX_SCATTER 0 238 239 #define EFSYS_OPT_EV_PREFETCH 0 240 241 #define EFSYS_OPT_DECODE_INTR_FATAL 1 242 243 #define EFSYS_OPT_LICENSING 0 244 245 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0 246 247 #define EFSYS_OPT_RX_PACKED_STREAM 0 248 249 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 0 250 251 #define EFSYS_OPT_TUNNEL 0 252 253 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 0 254 255 /* ID */ 256 257 typedef struct __efsys_identifier_s efsys_identifier_t; 258 259 /* PROBE */ 260 261 #ifndef DTRACE_PROBE 262 263 #define EFSYS_PROBE(_name) 264 265 #define EFSYS_PROBE1(_name, _type1, _arg1) 266 267 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) 268 269 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 270 _type3, _arg3) 271 272 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 273 _type3, _arg3, _type4, _arg4) 274 275 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 276 _type3, _arg3, _type4, _arg4, _type5, _arg5) 277 278 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 279 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 280 _type6, _arg6) 281 282 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 283 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 284 _type6, _arg6, _type7, _arg7) 285 286 #else /* DTRACE_PROBE */ 287 288 #define EFSYS_PROBE(_name) \ 289 DTRACE_PROBE(_name) 290 291 #define EFSYS_PROBE1(_name, _type1, _arg1) \ 292 DTRACE_PROBE1(_name, _type1, _arg1) 293 294 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ 295 DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2) 296 297 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 298 _type3, _arg3) \ 299 DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 300 _type3, _arg3) 301 302 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 303 _type3, _arg3, _type4, _arg4) \ 304 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 305 _type3, _arg3, _type4, _arg4) 306 307 #ifdef DTRACE_PROBE5 308 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 309 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 310 DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 311 _type3, _arg3, _type4, _arg4, _type5, _arg5) 312 #else 313 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 314 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 315 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 316 _type3, _arg3, _type4, _arg4) 317 #endif 318 319 #ifdef DTRACE_PROBE6 320 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 321 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 322 _type6, _arg6) \ 323 DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 324 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 325 _type6, _arg6) 326 #else 327 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 328 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 329 _type6, _arg6) \ 330 EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 331 _type3, _arg3, _type4, _arg4, _type5, _arg5) 332 #endif 333 334 #ifdef DTRACE_PROBE7 335 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 336 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 337 _type6, _arg6, _type7, _arg7) \ 338 DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 339 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 340 _type6, _arg6, _type7, _arg7) 341 #else 342 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 343 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 344 _type6, _arg6, _type7, _arg7) \ 345 EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 346 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 347 _type6, _arg6) 348 #endif 349 350 #endif /* DTRACE_PROBE */ 351 352 /* DMA */ 353 354 typedef uint64_t efsys_dma_addr_t; 355 356 typedef struct efsys_mem_s { 357 bus_dma_tag_t esm_tag; 358 bus_dmamap_t esm_map; 359 caddr_t esm_base; 360 efsys_dma_addr_t esm_addr; 361 size_t esm_size; 362 } efsys_mem_t; 363 364 #define EFSYS_MEM_SIZE(_esmp) \ 365 ((_esmp)->esm_size) 366 367 #define EFSYS_MEM_ADDR(_esmp) \ 368 ((_esmp)->esm_addr) 369 370 #define EFSYS_MEM_IS_NULL(_esmp) \ 371 ((_esmp)->esm_base == NULL) 372 373 374 #define EFSYS_MEM_ZERO(_esmp, _size) \ 375 do { \ 376 (void) memset((_esmp)->esm_base, 0, (_size)); \ 377 \ 378 _NOTE(CONSTANTCONDITION) \ 379 } while (B_FALSE) 380 381 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \ 382 do { \ 383 uint32_t *addr; \ 384 \ 385 _NOTE(CONSTANTCONDITION) \ 386 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 387 ("not power of 2 aligned")); \ 388 \ 389 addr = (void *)((_esmp)->esm_base + (_offset)); \ 390 \ 391 (_edp)->ed_u32[0] = *addr; \ 392 \ 393 EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \ 394 uint32_t, (_edp)->ed_u32[0]); \ 395 \ 396 _NOTE(CONSTANTCONDITION) \ 397 } while (B_FALSE) 398 399 #if defined(__x86_64__) 400 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 401 do { \ 402 uint64_t *addr; \ 403 \ 404 _NOTE(CONSTANTCONDITION) \ 405 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 406 ("not power of 2 aligned")); \ 407 \ 408 addr = (void *)((_esmp)->esm_base + (_offset)); \ 409 \ 410 (_eqp)->eq_u64[0] = *addr; \ 411 \ 412 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 413 uint32_t, (_eqp)->eq_u32[1], \ 414 uint32_t, (_eqp)->eq_u32[0]); \ 415 \ 416 _NOTE(CONSTANTCONDITION) \ 417 } while (B_FALSE) 418 #else 419 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 420 do { \ 421 uint32_t *addr; \ 422 \ 423 _NOTE(CONSTANTCONDITION) \ 424 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 425 ("not power of 2 aligned")); \ 426 \ 427 addr = (void *)((_esmp)->esm_base + (_offset)); \ 428 \ 429 (_eqp)->eq_u32[0] = *addr++; \ 430 (_eqp)->eq_u32[1] = *addr; \ 431 \ 432 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 433 uint32_t, (_eqp)->eq_u32[1], \ 434 uint32_t, (_eqp)->eq_u32[0]); \ 435 \ 436 _NOTE(CONSTANTCONDITION) \ 437 } while (B_FALSE) 438 #endif 439 440 #if defined(__x86_64__) 441 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 442 do { \ 443 uint64_t *addr; \ 444 \ 445 _NOTE(CONSTANTCONDITION) \ 446 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 447 ("not power of 2 aligned")); \ 448 \ 449 addr = (void *)((_esmp)->esm_base + (_offset)); \ 450 \ 451 (_eop)->eo_u64[0] = *addr++; \ 452 (_eop)->eo_u64[1] = *addr; \ 453 \ 454 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 455 uint32_t, (_eop)->eo_u32[3], \ 456 uint32_t, (_eop)->eo_u32[2], \ 457 uint32_t, (_eop)->eo_u32[1], \ 458 uint32_t, (_eop)->eo_u32[0]); \ 459 \ 460 _NOTE(CONSTANTCONDITION) \ 461 } while (B_FALSE) 462 #else 463 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 464 do { \ 465 uint32_t *addr; \ 466 \ 467 _NOTE(CONSTANTCONDITION) \ 468 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 469 ("not power of 2 aligned")); \ 470 \ 471 addr = (void *)((_esmp)->esm_base + (_offset)); \ 472 \ 473 (_eop)->eo_u32[0] = *addr++; \ 474 (_eop)->eo_u32[1] = *addr++; \ 475 (_eop)->eo_u32[2] = *addr++; \ 476 (_eop)->eo_u32[3] = *addr; \ 477 \ 478 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 479 uint32_t, (_eop)->eo_u32[3], \ 480 uint32_t, (_eop)->eo_u32[2], \ 481 uint32_t, (_eop)->eo_u32[1], \ 482 uint32_t, (_eop)->eo_u32[0]); \ 483 \ 484 _NOTE(CONSTANTCONDITION) \ 485 } while (B_FALSE) 486 #endif 487 488 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ 489 do { \ 490 uint32_t *addr; \ 491 \ 492 _NOTE(CONSTANTCONDITION) \ 493 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 494 ("not power of 2 aligned")); \ 495 \ 496 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ 497 uint32_t, (_edp)->ed_u32[0]); \ 498 \ 499 addr = (void *)((_esmp)->esm_base + (_offset)); \ 500 \ 501 *addr = (_edp)->ed_u32[0]; \ 502 \ 503 _NOTE(CONSTANTCONDITION) \ 504 } while (B_FALSE) 505 506 #if defined(__x86_64__) 507 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 508 do { \ 509 uint64_t *addr; \ 510 \ 511 _NOTE(CONSTANTCONDITION) \ 512 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 513 ("not power of 2 aligned")); \ 514 \ 515 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 516 uint32_t, (_eqp)->eq_u32[1], \ 517 uint32_t, (_eqp)->eq_u32[0]); \ 518 \ 519 addr = (void *)((_esmp)->esm_base + (_offset)); \ 520 \ 521 *addr = (_eqp)->eq_u64[0]; \ 522 \ 523 _NOTE(CONSTANTCONDITION) \ 524 } while (B_FALSE) 525 526 #else 527 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 528 do { \ 529 uint32_t *addr; \ 530 \ 531 _NOTE(CONSTANTCONDITION) \ 532 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 533 ("not power of 2 aligned")); \ 534 \ 535 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 536 uint32_t, (_eqp)->eq_u32[1], \ 537 uint32_t, (_eqp)->eq_u32[0]); \ 538 \ 539 addr = (void *)((_esmp)->esm_base + (_offset)); \ 540 \ 541 *addr++ = (_eqp)->eq_u32[0]; \ 542 *addr = (_eqp)->eq_u32[1]; \ 543 \ 544 _NOTE(CONSTANTCONDITION) \ 545 } while (B_FALSE) 546 #endif 547 548 #if defined(__x86_64__) 549 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 550 do { \ 551 uint64_t *addr; \ 552 \ 553 _NOTE(CONSTANTCONDITION) \ 554 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 555 ("not power of 2 aligned")); \ 556 \ 557 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 558 uint32_t, (_eop)->eo_u32[3], \ 559 uint32_t, (_eop)->eo_u32[2], \ 560 uint32_t, (_eop)->eo_u32[1], \ 561 uint32_t, (_eop)->eo_u32[0]); \ 562 \ 563 addr = (void *)((_esmp)->esm_base + (_offset)); \ 564 \ 565 *addr++ = (_eop)->eo_u64[0]; \ 566 *addr = (_eop)->eo_u64[1]; \ 567 \ 568 _NOTE(CONSTANTCONDITION) \ 569 } while (B_FALSE) 570 #else 571 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 572 do { \ 573 uint32_t *addr; \ 574 \ 575 _NOTE(CONSTANTCONDITION) \ 576 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 577 ("not power of 2 aligned")); \ 578 \ 579 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 580 uint32_t, (_eop)->eo_u32[3], \ 581 uint32_t, (_eop)->eo_u32[2], \ 582 uint32_t, (_eop)->eo_u32[1], \ 583 uint32_t, (_eop)->eo_u32[0]); \ 584 \ 585 addr = (void *)((_esmp)->esm_base + (_offset)); \ 586 \ 587 *addr++ = (_eop)->eo_u32[0]; \ 588 *addr++ = (_eop)->eo_u32[1]; \ 589 *addr++ = (_eop)->eo_u32[2]; \ 590 *addr = (_eop)->eo_u32[3]; \ 591 \ 592 _NOTE(CONSTANTCONDITION) \ 593 } while (B_FALSE) 594 #endif 595 596 /* BAR */ 597 598 #define SFXGE_LOCK_NAME_MAX 16 599 600 typedef struct efsys_bar_s { 601 struct mtx esb_lock; 602 char esb_lock_name[SFXGE_LOCK_NAME_MAX]; 603 bus_space_tag_t esb_tag; 604 bus_space_handle_t esb_handle; 605 int esb_rid; 606 struct resource *esb_res; 607 } efsys_bar_t; 608 609 #define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \ 610 do { \ 611 snprintf((_esbp)->esb_lock_name, \ 612 sizeof((_esbp)->esb_lock_name), \ 613 "%s:bar", (_ifname)); \ 614 mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \ 615 NULL, MTX_DEF); \ 616 _NOTE(CONSTANTCONDITION) \ 617 } while (B_FALSE) 618 #define SFXGE_BAR_LOCK_DESTROY(_esbp) \ 619 mtx_destroy(&(_esbp)->esb_lock) 620 #define SFXGE_BAR_LOCK(_esbp) \ 621 mtx_lock(&(_esbp)->esb_lock) 622 #define SFXGE_BAR_UNLOCK(_esbp) \ 623 mtx_unlock(&(_esbp)->esb_lock) 624 625 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ 626 do { \ 627 _NOTE(CONSTANTCONDITION) \ 628 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 629 ("not power of 2 aligned")); \ 630 \ 631 _NOTE(CONSTANTCONDITION) \ 632 if (_lock) \ 633 SFXGE_BAR_LOCK(_esbp); \ 634 \ 635 (_edp)->ed_u32[0] = bus_space_read_stream_4( \ 636 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 637 (_offset)); \ 638 \ 639 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ 640 uint32_t, (_edp)->ed_u32[0]); \ 641 \ 642 _NOTE(CONSTANTCONDITION) \ 643 if (_lock) \ 644 SFXGE_BAR_UNLOCK(_esbp); \ 645 _NOTE(CONSTANTCONDITION) \ 646 } while (B_FALSE) 647 648 #if defined(SFXGE_USE_BUS_SPACE_8) 649 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 650 do { \ 651 _NOTE(CONSTANTCONDITION) \ 652 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 653 ("not power of 2 aligned")); \ 654 \ 655 SFXGE_BAR_LOCK(_esbp); \ 656 \ 657 (_eqp)->eq_u64[0] = bus_space_read_stream_8( \ 658 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 659 (_offset)); \ 660 \ 661 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 662 uint32_t, (_eqp)->eq_u32[1], \ 663 uint32_t, (_eqp)->eq_u32[0]); \ 664 \ 665 SFXGE_BAR_UNLOCK(_esbp); \ 666 _NOTE(CONSTANTCONDITION) \ 667 } while (B_FALSE) 668 669 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 670 do { \ 671 _NOTE(CONSTANTCONDITION) \ 672 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 673 ("not power of 2 aligned")); \ 674 \ 675 _NOTE(CONSTANTCONDITION) \ 676 if (_lock) \ 677 SFXGE_BAR_LOCK(_esbp); \ 678 \ 679 (_eop)->eo_u64[0] = bus_space_read_stream_8( \ 680 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 681 (_offset)); \ 682 (_eop)->eo_u64[1] = bus_space_read_stream_8( \ 683 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 684 (_offset) + 8); \ 685 \ 686 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 687 uint32_t, (_eop)->eo_u32[3], \ 688 uint32_t, (_eop)->eo_u32[2], \ 689 uint32_t, (_eop)->eo_u32[1], \ 690 uint32_t, (_eop)->eo_u32[0]); \ 691 \ 692 _NOTE(CONSTANTCONDITION) \ 693 if (_lock) \ 694 SFXGE_BAR_UNLOCK(_esbp); \ 695 _NOTE(CONSTANTCONDITION) \ 696 } while (B_FALSE) 697 698 #else 699 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 700 do { \ 701 _NOTE(CONSTANTCONDITION) \ 702 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 703 ("not power of 2 aligned")); \ 704 \ 705 SFXGE_BAR_LOCK(_esbp); \ 706 \ 707 (_eqp)->eq_u32[0] = bus_space_read_stream_4( \ 708 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 709 (_offset)); \ 710 (_eqp)->eq_u32[1] = bus_space_read_stream_4( \ 711 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 712 (_offset) + 4); \ 713 \ 714 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 715 uint32_t, (_eqp)->eq_u32[1], \ 716 uint32_t, (_eqp)->eq_u32[0]); \ 717 \ 718 SFXGE_BAR_UNLOCK(_esbp); \ 719 _NOTE(CONSTANTCONDITION) \ 720 } while (B_FALSE) 721 722 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 723 do { \ 724 _NOTE(CONSTANTCONDITION) \ 725 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 726 ("not power of 2 aligned")); \ 727 \ 728 _NOTE(CONSTANTCONDITION) \ 729 if (_lock) \ 730 SFXGE_BAR_LOCK(_esbp); \ 731 \ 732 (_eop)->eo_u32[0] = bus_space_read_stream_4( \ 733 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 734 (_offset)); \ 735 (_eop)->eo_u32[1] = bus_space_read_stream_4( \ 736 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 737 (_offset) + 4); \ 738 (_eop)->eo_u32[2] = bus_space_read_stream_4( \ 739 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 740 (_offset) + 8); \ 741 (_eop)->eo_u32[3] = bus_space_read_stream_4( \ 742 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 743 (_offset) + 12); \ 744 \ 745 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 746 uint32_t, (_eop)->eo_u32[3], \ 747 uint32_t, (_eop)->eo_u32[2], \ 748 uint32_t, (_eop)->eo_u32[1], \ 749 uint32_t, (_eop)->eo_u32[0]); \ 750 \ 751 _NOTE(CONSTANTCONDITION) \ 752 if (_lock) \ 753 SFXGE_BAR_UNLOCK(_esbp); \ 754 _NOTE(CONSTANTCONDITION) \ 755 } while (B_FALSE) 756 #endif 757 758 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ 759 do { \ 760 _NOTE(CONSTANTCONDITION) \ 761 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ 762 ("not power of 2 aligned")); \ 763 \ 764 _NOTE(CONSTANTCONDITION) \ 765 if (_lock) \ 766 SFXGE_BAR_LOCK(_esbp); \ 767 \ 768 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ 769 uint32_t, (_edp)->ed_u32[0]); \ 770 \ 771 /* \ 772 * Make sure that previous writes to the dword have \ 773 * been done. It should be cheaper than barrier just \ 774 * after the write below. \ 775 */ \ 776 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 777 (_offset), sizeof (efx_dword_t), \ 778 BUS_SPACE_BARRIER_WRITE); \ 779 bus_space_write_stream_4((_esbp)->esb_tag, \ 780 (_esbp)->esb_handle, \ 781 (_offset), (_edp)->ed_u32[0]); \ 782 \ 783 _NOTE(CONSTANTCONDITION) \ 784 if (_lock) \ 785 SFXGE_BAR_UNLOCK(_esbp); \ 786 _NOTE(CONSTANTCONDITION) \ 787 } while (B_FALSE) 788 789 #if defined(SFXGE_USE_BUS_SPACE_8) 790 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 791 do { \ 792 _NOTE(CONSTANTCONDITION) \ 793 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 794 ("not power of 2 aligned")); \ 795 \ 796 SFXGE_BAR_LOCK(_esbp); \ 797 \ 798 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 799 uint32_t, (_eqp)->eq_u32[1], \ 800 uint32_t, (_eqp)->eq_u32[0]); \ 801 \ 802 /* \ 803 * Make sure that previous writes to the qword have \ 804 * been done. It should be cheaper than barrier just \ 805 * after the write below. \ 806 */ \ 807 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 808 (_offset), sizeof (efx_qword_t), \ 809 BUS_SPACE_BARRIER_WRITE); \ 810 bus_space_write_stream_8((_esbp)->esb_tag, \ 811 (_esbp)->esb_handle, \ 812 (_offset), (_eqp)->eq_u64[0]); \ 813 \ 814 SFXGE_BAR_UNLOCK(_esbp); \ 815 _NOTE(CONSTANTCONDITION) \ 816 } while (B_FALSE) 817 #else 818 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 819 do { \ 820 _NOTE(CONSTANTCONDITION) \ 821 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 822 ("not power of 2 aligned")); \ 823 \ 824 SFXGE_BAR_LOCK(_esbp); \ 825 \ 826 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 827 uint32_t, (_eqp)->eq_u32[1], \ 828 uint32_t, (_eqp)->eq_u32[0]); \ 829 \ 830 /* \ 831 * Make sure that previous writes to the qword have \ 832 * been done. It should be cheaper than barrier just \ 833 * after the last write below. \ 834 */ \ 835 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 836 (_offset), sizeof (efx_qword_t), \ 837 BUS_SPACE_BARRIER_WRITE); \ 838 bus_space_write_stream_4((_esbp)->esb_tag, \ 839 (_esbp)->esb_handle, \ 840 (_offset), (_eqp)->eq_u32[0]); \ 841 /* \ 842 * It should be guaranteed that the last dword comes \ 843 * the last, so barrier entire qword to be sure that \ 844 * neither above nor below writes are reordered. \ 845 */ \ 846 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 847 (_offset), sizeof (efx_qword_t), \ 848 BUS_SPACE_BARRIER_WRITE); \ 849 bus_space_write_stream_4((_esbp)->esb_tag, \ 850 (_esbp)->esb_handle, \ 851 (_offset) + 4, (_eqp)->eq_u32[1]); \ 852 \ 853 SFXGE_BAR_UNLOCK(_esbp); \ 854 _NOTE(CONSTANTCONDITION) \ 855 } while (B_FALSE) 856 #endif 857 858 /* 859 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping 860 * (required by PIO hardware) 861 */ 862 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ 863 do { \ 864 _NOTE(CONSTANTCONDITION) \ 865 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ 866 ("not power of 2 aligned")); \ 867 \ 868 (void) (_esbp); \ 869 \ 870 /* FIXME: Perform a 64-bit write */ \ 871 KASSERT(0, ("not implemented")); \ 872 \ 873 _NOTE(CONSTANTCONDITION) \ 874 } while (B_FALSE) 875 876 #if defined(SFXGE_USE_BUS_SPACE_8) 877 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 878 do { \ 879 _NOTE(CONSTANTCONDITION) \ 880 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 881 ("not power of 2 aligned")); \ 882 \ 883 _NOTE(CONSTANTCONDITION) \ 884 if (_lock) \ 885 SFXGE_BAR_LOCK(_esbp); \ 886 \ 887 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 888 uint32_t, (_eop)->eo_u32[3], \ 889 uint32_t, (_eop)->eo_u32[2], \ 890 uint32_t, (_eop)->eo_u32[1], \ 891 uint32_t, (_eop)->eo_u32[0]); \ 892 \ 893 /* \ 894 * Make sure that previous writes to the oword have \ 895 * been done. It should be cheaper than barrier just \ 896 * after the last write below. \ 897 */ \ 898 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 899 (_offset), sizeof (efx_oword_t), \ 900 BUS_SPACE_BARRIER_WRITE); \ 901 bus_space_write_stream_8((_esbp)->esb_tag, \ 902 (_esbp)->esb_handle, \ 903 (_offset), (_eop)->eo_u64[0]); \ 904 /* \ 905 * It should be guaranteed that the last qword comes \ 906 * the last, so barrier entire oword to be sure that \ 907 * neither above nor below writes are reordered. \ 908 */ \ 909 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 910 (_offset), sizeof (efx_oword_t), \ 911 BUS_SPACE_BARRIER_WRITE); \ 912 bus_space_write_stream_8((_esbp)->esb_tag, \ 913 (_esbp)->esb_handle, \ 914 (_offset) + 8, (_eop)->eo_u64[1]); \ 915 \ 916 _NOTE(CONSTANTCONDITION) \ 917 if (_lock) \ 918 SFXGE_BAR_UNLOCK(_esbp); \ 919 _NOTE(CONSTANTCONDITION) \ 920 } while (B_FALSE) 921 922 #else 923 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 924 do { \ 925 _NOTE(CONSTANTCONDITION) \ 926 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ 927 ("not power of 2 aligned")); \ 928 \ 929 _NOTE(CONSTANTCONDITION) \ 930 if (_lock) \ 931 SFXGE_BAR_LOCK(_esbp); \ 932 \ 933 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 934 uint32_t, (_eop)->eo_u32[3], \ 935 uint32_t, (_eop)->eo_u32[2], \ 936 uint32_t, (_eop)->eo_u32[1], \ 937 uint32_t, (_eop)->eo_u32[0]); \ 938 \ 939 /* \ 940 * Make sure that previous writes to the oword have \ 941 * been done. It should be cheaper than barrier just \ 942 * after the last write below. \ 943 */ \ 944 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 945 (_offset), sizeof (efx_oword_t), \ 946 BUS_SPACE_BARRIER_WRITE); \ 947 bus_space_write_stream_4((_esbp)->esb_tag, \ 948 (_esbp)->esb_handle, \ 949 (_offset), (_eop)->eo_u32[0]); \ 950 bus_space_write_stream_4((_esbp)->esb_tag, \ 951 (_esbp)->esb_handle, \ 952 (_offset) + 4, (_eop)->eo_u32[1]); \ 953 bus_space_write_stream_4((_esbp)->esb_tag, \ 954 (_esbp)->esb_handle, \ 955 (_offset) + 8, (_eop)->eo_u32[2]); \ 956 /* \ 957 * It should be guaranteed that the last dword comes \ 958 * the last, so barrier entire oword to be sure that \ 959 * neither above nor below writes are reordered. \ 960 */ \ 961 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 962 (_offset), sizeof (efx_oword_t), \ 963 BUS_SPACE_BARRIER_WRITE); \ 964 bus_space_write_stream_4((_esbp)->esb_tag, \ 965 (_esbp)->esb_handle, \ 966 (_offset) + 12, (_eop)->eo_u32[3]); \ 967 \ 968 _NOTE(CONSTANTCONDITION) \ 969 if (_lock) \ 970 SFXGE_BAR_UNLOCK(_esbp); \ 971 _NOTE(CONSTANTCONDITION) \ 972 } while (B_FALSE) 973 #endif 974 975 /* Use the standard octo-word write for doorbell writes */ 976 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ 977 do { \ 978 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ 979 _NOTE(CONSTANTCONDITION) \ 980 } while (B_FALSE) 981 982 /* SPIN */ 983 984 #define EFSYS_SPIN(_us) \ 985 do { \ 986 DELAY(_us); \ 987 _NOTE(CONSTANTCONDITION) \ 988 } while (B_FALSE) 989 990 #define EFSYS_SLEEP EFSYS_SPIN 991 992 /* BARRIERS */ 993 994 #define EFSYS_MEM_READ_BARRIER() rmb() 995 #define EFSYS_PIO_WRITE_BARRIER() 996 997 /* DMA SYNC */ 998 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \ 999 do { \ 1000 bus_dmamap_sync((_esmp)->esm_tag, \ 1001 (_esmp)->esm_map, \ 1002 BUS_DMASYNC_POSTREAD); \ 1003 _NOTE(CONSTANTCONDITION) \ 1004 } while (B_FALSE) 1005 1006 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \ 1007 do { \ 1008 bus_dmamap_sync((_esmp)->esm_tag, \ 1009 (_esmp)->esm_map, \ 1010 BUS_DMASYNC_PREWRITE); \ 1011 _NOTE(CONSTANTCONDITION) \ 1012 } while (B_FALSE) 1013 1014 /* TIMESTAMP */ 1015 1016 typedef clock_t efsys_timestamp_t; 1017 1018 #define EFSYS_TIMESTAMP(_usp) \ 1019 do { \ 1020 clock_t now; \ 1021 \ 1022 now = ticks; \ 1023 *(_usp) = now * hz / 1000000; \ 1024 _NOTE(CONSTANTCONDITION) \ 1025 } while (B_FALSE) 1026 1027 /* KMEM */ 1028 1029 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ 1030 do { \ 1031 (_esip) = (_esip); \ 1032 /* \ 1033 * The macro is used in non-sleepable contexts, for \ 1034 * example, holding a mutex. \ 1035 */ \ 1036 (_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO); \ 1037 _NOTE(CONSTANTCONDITION) \ 1038 } while (B_FALSE) 1039 1040 #define EFSYS_KMEM_FREE(_esip, _size, _p) \ 1041 do { \ 1042 (void) (_esip); \ 1043 (void) (_size); \ 1044 free((_p), M_SFXGE); \ 1045 _NOTE(CONSTANTCONDITION) \ 1046 } while (B_FALSE) 1047 1048 /* LOCK */ 1049 1050 typedef struct efsys_lock_s { 1051 struct mtx lock; 1052 char lock_name[SFXGE_LOCK_NAME_MAX]; 1053 } efsys_lock_t; 1054 1055 #define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ 1056 do { \ 1057 efsys_lock_t *__eslp = (_eslp); \ 1058 \ 1059 snprintf((__eslp)->lock_name, \ 1060 sizeof((__eslp)->lock_name), \ 1061 "%s:%s", (_ifname), (_label)); \ 1062 mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \ 1063 NULL, MTX_DEF); \ 1064 } while (B_FALSE) 1065 #define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \ 1066 mtx_destroy(&(_eslp)->lock) 1067 #define SFXGE_EFSYS_LOCK(_eslp) \ 1068 mtx_lock(&(_eslp)->lock) 1069 #define SFXGE_EFSYS_UNLOCK(_eslp) \ 1070 mtx_unlock(&(_eslp)->lock) 1071 #define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ 1072 mtx_assert(&(_eslp)->lock, MA_OWNED) 1073 1074 typedef int efsys_lock_state_t; 1075 1076 #define EFSYS_LOCK_MAGIC 0x000010c4 1077 1078 #define EFSYS_LOCK(_lockp, _state) \ 1079 do { \ 1080 SFXGE_EFSYS_LOCK(_lockp); \ 1081 (_state) = EFSYS_LOCK_MAGIC; \ 1082 _NOTE(CONSTANTCONDITION) \ 1083 } while (B_FALSE) 1084 1085 #define EFSYS_UNLOCK(_lockp, _state) \ 1086 do { \ 1087 if ((_state) != EFSYS_LOCK_MAGIC) \ 1088 KASSERT(B_FALSE, ("not locked")); \ 1089 SFXGE_EFSYS_UNLOCK(_lockp); \ 1090 _NOTE(CONSTANTCONDITION) \ 1091 } while (B_FALSE) 1092 1093 /* STAT */ 1094 1095 typedef uint64_t efsys_stat_t; 1096 1097 #define EFSYS_STAT_INCR(_knp, _delta) \ 1098 do { \ 1099 *(_knp) += (_delta); \ 1100 _NOTE(CONSTANTCONDITION) \ 1101 } while (B_FALSE) 1102 1103 #define EFSYS_STAT_DECR(_knp, _delta) \ 1104 do { \ 1105 *(_knp) -= (_delta); \ 1106 _NOTE(CONSTANTCONDITION) \ 1107 } while (B_FALSE) 1108 1109 #define EFSYS_STAT_SET(_knp, _val) \ 1110 do { \ 1111 *(_knp) = (_val); \ 1112 _NOTE(CONSTANTCONDITION) \ 1113 } while (B_FALSE) 1114 1115 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \ 1116 do { \ 1117 *(_knp) = le64toh((_valp)->eq_u64[0]); \ 1118 _NOTE(CONSTANTCONDITION) \ 1119 } while (B_FALSE) 1120 1121 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \ 1122 do { \ 1123 *(_knp) = le32toh((_valp)->ed_u32[0]); \ 1124 _NOTE(CONSTANTCONDITION) \ 1125 } while (B_FALSE) 1126 1127 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ 1128 do { \ 1129 *(_knp) += le64toh((_valp)->eq_u64[0]); \ 1130 _NOTE(CONSTANTCONDITION) \ 1131 } while (B_FALSE) 1132 1133 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ 1134 do { \ 1135 *(_knp) -= le64toh((_valp)->eq_u64[0]); \ 1136 _NOTE(CONSTANTCONDITION) \ 1137 } while (B_FALSE) 1138 1139 /* ERR */ 1140 1141 extern void sfxge_err(efsys_identifier_t *, unsigned int, 1142 uint32_t, uint32_t); 1143 1144 #if EFSYS_OPT_DECODE_INTR_FATAL 1145 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ 1146 do { \ 1147 sfxge_err((_esip), (_code), (_dword0), (_dword1)); \ 1148 _NOTE(CONSTANTCONDITION) \ 1149 } while (B_FALSE) 1150 #endif 1151 1152 /* ASSERT */ 1153 1154 #define EFSYS_ASSERT(_exp) do { \ 1155 if (!(_exp)) \ 1156 panic("%s", #_exp); \ 1157 } while (0) 1158 1159 #define EFSYS_ASSERT3(_x, _op, _y, _t) do { \ 1160 const _t __x = (_t)(_x); \ 1161 const _t __y = (_t)(_y); \ 1162 if (!(__x _op __y)) \ 1163 panic("assertion failed at %s:%u", __FILE__, __LINE__); \ 1164 } while(0) 1165 1166 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) 1167 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) 1168 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) 1169 1170 /* ROTATE */ 1171 1172 #define EFSYS_HAS_ROTL_DWORD 0 1173 1174 #ifdef __cplusplus 1175 } 1176 #endif 1177 1178 #endif /* _SYS_EFSYS_H */ 1179