1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010-2016 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was developed in part by Philip Paeps under contract for 8 * Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing official 33 * policies, either expressed or implied, of the FreeBSD Project. 34 * 35 * $FreeBSD$ 36 */ 37 38 #ifndef _SYS_EFSYS_H 39 #define _SYS_EFSYS_H 40 41 #ifdef __cplusplus 42 extern "C" { 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/bus.h> 47 #include <sys/endian.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/mutex.h> 52 #include <sys/rwlock.h> 53 #include <sys/sdt.h> 54 #include <sys/systm.h> 55 56 #include <machine/bus.h> 57 #include <machine/endian.h> 58 59 #define EFSYS_HAS_UINT64 1 60 #if defined(__x86_64__) 61 #define EFSYS_USE_UINT64 1 62 #else 63 #define EFSYS_USE_UINT64 0 64 #endif 65 #define EFSYS_HAS_SSE2_M128 0 66 #if _BYTE_ORDER == _BIG_ENDIAN 67 #define EFSYS_IS_BIG_ENDIAN 1 68 #define EFSYS_IS_LITTLE_ENDIAN 0 69 #elif _BYTE_ORDER == _LITTLE_ENDIAN 70 #define EFSYS_IS_BIG_ENDIAN 0 71 #define EFSYS_IS_LITTLE_ENDIAN 1 72 #endif 73 #include "efx_types.h" 74 75 /* Common code requires this */ 76 #if __FreeBSD_version < 800068 77 #define memmove(d, s, l) bcopy(s, d, l) 78 #endif 79 80 #ifndef B_FALSE 81 #define B_FALSE FALSE 82 #endif 83 #ifndef B_TRUE 84 #define B_TRUE TRUE 85 #endif 86 87 #ifndef IS2P 88 #define ISP2(x) (((x) & ((x) - 1)) == 0) 89 #endif 90 91 #if defined(__x86_64__) && __FreeBSD_version >= 1000000 92 93 #define SFXGE_USE_BUS_SPACE_8 1 94 95 #if !defined(bus_space_read_stream_8) 96 97 #define bus_space_read_stream_8(t, h, o) \ 98 bus_space_read_8((t), (h), (o)) 99 100 #define bus_space_write_stream_8(t, h, o, v) \ 101 bus_space_write_8((t), (h), (o), (v)) 102 103 #endif 104 105 #endif 106 107 #define ENOTACTIVE EINVAL 108 109 /* Memory type to use on FreeBSD */ 110 MALLOC_DECLARE(M_SFXGE); 111 112 /* Machine dependend prefetch wrappers */ 113 #if defined(__i386__) || defined(__amd64__) 114 static __inline void 115 prefetch_read_many(void *addr) 116 { 117 118 __asm__( 119 "prefetcht0 (%0)" 120 : 121 : "r" (addr)); 122 } 123 124 static __inline void 125 prefetch_read_once(void *addr) 126 { 127 128 __asm__( 129 "prefetchnta (%0)" 130 : 131 : "r" (addr)); 132 } 133 #elif defined(__sparc64__) 134 static __inline void 135 prefetch_read_many(void *addr) 136 { 137 138 __asm__( 139 "prefetch [%0], 0" 140 : 141 : "r" (addr)); 142 } 143 144 static __inline void 145 prefetch_read_once(void *addr) 146 { 147 148 __asm__( 149 "prefetch [%0], 1" 150 : 151 : "r" (addr)); 152 } 153 #else 154 static __inline void 155 prefetch_read_many(void *addr) 156 { 157 158 } 159 160 static __inline void 161 prefetch_read_once(void *addr) 162 { 163 164 } 165 #endif 166 167 #if defined(__i386__) || defined(__amd64__) 168 #include <vm/vm.h> 169 #include <vm/pmap.h> 170 #endif 171 static __inline void 172 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, 173 struct mbuf *m, bus_dma_segment_t *seg) 174 { 175 #if defined(__i386__) || defined(__amd64__) 176 seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); 177 seg->ds_len = m->m_len; 178 #else 179 int nsegstmp; 180 181 bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); 182 #endif 183 } 184 185 /* Code inclusion options */ 186 187 188 #define EFSYS_OPT_NAMES 1 189 190 #define EFSYS_OPT_SIENA 1 191 #define EFSYS_OPT_HUNTINGTON 1 192 #define EFSYS_OPT_MEDFORD 1 193 #define EFSYS_OPT_MEDFORD2 1 194 #ifdef DEBUG 195 #define EFSYS_OPT_CHECK_REG 1 196 #else 197 #define EFSYS_OPT_CHECK_REG 0 198 #endif 199 200 #define EFSYS_OPT_MCDI 1 201 #define EFSYS_OPT_MCDI_LOGGING 0 202 #define EFSYS_OPT_MCDI_PROXY_AUTH 0 203 204 #define EFSYS_OPT_MAC_STATS 1 205 206 #define EFSYS_OPT_LOOPBACK 0 207 208 #define EFSYS_OPT_MON_MCDI 0 209 #define EFSYS_OPT_MON_STATS 0 210 211 #define EFSYS_OPT_PHY_STATS 1 212 #define EFSYS_OPT_BIST 1 213 #define EFSYS_OPT_PHY_LED_CONTROL 1 214 #define EFSYS_OPT_PHY_FLAGS 0 215 216 #define EFSYS_OPT_VPD 1 217 #define EFSYS_OPT_NVRAM 1 218 #define EFSYS_OPT_BOOTCFG 0 219 #define EFSYS_OPT_IMAGE_LAYOUT 0 220 221 #define EFSYS_OPT_DIAG 0 222 #define EFSYS_OPT_RX_SCALE 1 223 #define EFSYS_OPT_QSTATS 1 224 #define EFSYS_OPT_FILTER 1 225 #define EFSYS_OPT_RX_SCATTER 0 226 227 #define EFSYS_OPT_EV_PREFETCH 0 228 229 #define EFSYS_OPT_DECODE_INTR_FATAL 1 230 231 #define EFSYS_OPT_LICENSING 0 232 233 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0 234 235 #define EFSYS_OPT_RX_PACKED_STREAM 0 236 237 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 0 238 239 #define EFSYS_OPT_TUNNEL 0 240 241 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 0 242 243 /* ID */ 244 245 typedef struct __efsys_identifier_s efsys_identifier_t; 246 247 /* PROBE */ 248 249 #ifndef DTRACE_PROBE 250 251 #define EFSYS_PROBE(_name) 252 253 #define EFSYS_PROBE1(_name, _type1, _arg1) 254 255 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) 256 257 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 258 _type3, _arg3) 259 260 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 261 _type3, _arg3, _type4, _arg4) 262 263 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 264 _type3, _arg3, _type4, _arg4, _type5, _arg5) 265 266 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 267 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 268 _type6, _arg6) 269 270 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 271 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 272 _type6, _arg6, _type7, _arg7) 273 274 #else /* DTRACE_PROBE */ 275 276 #define EFSYS_PROBE(_name) \ 277 DTRACE_PROBE(_name) 278 279 #define EFSYS_PROBE1(_name, _type1, _arg1) \ 280 DTRACE_PROBE1(_name, _type1, _arg1) 281 282 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ 283 DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2) 284 285 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 286 _type3, _arg3) \ 287 DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ 288 _type3, _arg3) 289 290 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 291 _type3, _arg3, _type4, _arg4) \ 292 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 293 _type3, _arg3, _type4, _arg4) 294 295 #ifdef DTRACE_PROBE5 296 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 297 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 298 DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 299 _type3, _arg3, _type4, _arg4, _type5, _arg5) 300 #else 301 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 302 _type3, _arg3, _type4, _arg4, _type5, _arg5) \ 303 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ 304 _type3, _arg3, _type4, _arg4) 305 #endif 306 307 #ifdef DTRACE_PROBE6 308 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 309 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 310 _type6, _arg6) \ 311 DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 312 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 313 _type6, _arg6) 314 #else 315 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 316 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 317 _type6, _arg6) \ 318 EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ 319 _type3, _arg3, _type4, _arg4, _type5, _arg5) 320 #endif 321 322 #ifdef DTRACE_PROBE7 323 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 324 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 325 _type6, _arg6, _type7, _arg7) \ 326 DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 327 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 328 _type6, _arg6, _type7, _arg7) 329 #else 330 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ 331 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 332 _type6, _arg6, _type7, _arg7) \ 333 EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ 334 _type3, _arg3, _type4, _arg4, _type5, _arg5, \ 335 _type6, _arg6) 336 #endif 337 338 #endif /* DTRACE_PROBE */ 339 340 /* DMA */ 341 342 typedef uint64_t efsys_dma_addr_t; 343 344 typedef struct efsys_mem_s { 345 bus_dma_tag_t esm_tag; 346 bus_dmamap_t esm_map; 347 caddr_t esm_base; 348 efsys_dma_addr_t esm_addr; 349 size_t esm_size; 350 } efsys_mem_t; 351 352 #define EFSYS_MEM_SIZE(_esmp) \ 353 ((_esmp)->esm_size) 354 355 #define EFSYS_MEM_ADDR(_esmp) \ 356 ((_esmp)->esm_addr) 357 358 #define EFSYS_MEM_IS_NULL(_esmp) \ 359 ((_esmp)->esm_base == NULL) 360 361 362 #define EFSYS_MEM_ZERO(_esmp, _size) \ 363 do { \ 364 (void) memset((_esmp)->esm_base, 0, (_size)); \ 365 \ 366 _NOTE(CONSTANTCONDITION) \ 367 } while (B_FALSE) 368 369 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \ 370 do { \ 371 uint32_t *addr; \ 372 \ 373 _NOTE(CONSTANTCONDITION) \ 374 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 375 sizeof (efx_dword_t)), \ 376 ("not power of 2 aligned")); \ 377 \ 378 addr = (void *)((_esmp)->esm_base + (_offset)); \ 379 \ 380 (_edp)->ed_u32[0] = *addr; \ 381 \ 382 EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \ 383 uint32_t, (_edp)->ed_u32[0]); \ 384 \ 385 _NOTE(CONSTANTCONDITION) \ 386 } while (B_FALSE) 387 388 #if defined(__x86_64__) 389 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 390 do { \ 391 uint64_t *addr; \ 392 \ 393 _NOTE(CONSTANTCONDITION) \ 394 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 395 sizeof (efx_qword_t)), \ 396 ("not power of 2 aligned")); \ 397 \ 398 addr = (void *)((_esmp)->esm_base + (_offset)); \ 399 \ 400 (_eqp)->eq_u64[0] = *addr; \ 401 \ 402 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 403 uint32_t, (_eqp)->eq_u32[1], \ 404 uint32_t, (_eqp)->eq_u32[0]); \ 405 \ 406 _NOTE(CONSTANTCONDITION) \ 407 } while (B_FALSE) 408 #else 409 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ 410 do { \ 411 uint32_t *addr; \ 412 \ 413 _NOTE(CONSTANTCONDITION) \ 414 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 415 sizeof (efx_qword_t)), \ 416 ("not power of 2 aligned")); \ 417 \ 418 addr = (void *)((_esmp)->esm_base + (_offset)); \ 419 \ 420 (_eqp)->eq_u32[0] = *addr++; \ 421 (_eqp)->eq_u32[1] = *addr; \ 422 \ 423 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ 424 uint32_t, (_eqp)->eq_u32[1], \ 425 uint32_t, (_eqp)->eq_u32[0]); \ 426 \ 427 _NOTE(CONSTANTCONDITION) \ 428 } while (B_FALSE) 429 #endif 430 431 #if defined(__x86_64__) 432 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 433 do { \ 434 uint64_t *addr; \ 435 \ 436 _NOTE(CONSTANTCONDITION) \ 437 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 438 sizeof (efx_oword_t)), \ 439 ("not power of 2 aligned")); \ 440 \ 441 addr = (void *)((_esmp)->esm_base + (_offset)); \ 442 \ 443 (_eop)->eo_u64[0] = *addr++; \ 444 (_eop)->eo_u64[1] = *addr; \ 445 \ 446 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 447 uint32_t, (_eop)->eo_u32[3], \ 448 uint32_t, (_eop)->eo_u32[2], \ 449 uint32_t, (_eop)->eo_u32[1], \ 450 uint32_t, (_eop)->eo_u32[0]); \ 451 \ 452 _NOTE(CONSTANTCONDITION) \ 453 } while (B_FALSE) 454 #else 455 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ 456 do { \ 457 uint32_t *addr; \ 458 \ 459 _NOTE(CONSTANTCONDITION) \ 460 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 461 sizeof (efx_oword_t)), \ 462 ("not power of 2 aligned")); \ 463 \ 464 addr = (void *)((_esmp)->esm_base + (_offset)); \ 465 \ 466 (_eop)->eo_u32[0] = *addr++; \ 467 (_eop)->eo_u32[1] = *addr++; \ 468 (_eop)->eo_u32[2] = *addr++; \ 469 (_eop)->eo_u32[3] = *addr; \ 470 \ 471 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ 472 uint32_t, (_eop)->eo_u32[3], \ 473 uint32_t, (_eop)->eo_u32[2], \ 474 uint32_t, (_eop)->eo_u32[1], \ 475 uint32_t, (_eop)->eo_u32[0]); \ 476 \ 477 _NOTE(CONSTANTCONDITION) \ 478 } while (B_FALSE) 479 #endif 480 481 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ 482 do { \ 483 uint32_t *addr; \ 484 \ 485 _NOTE(CONSTANTCONDITION) \ 486 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 487 sizeof (efx_dword_t)), \ 488 ("not power of 2 aligned")); \ 489 \ 490 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ 491 uint32_t, (_edp)->ed_u32[0]); \ 492 \ 493 addr = (void *)((_esmp)->esm_base + (_offset)); \ 494 \ 495 *addr = (_edp)->ed_u32[0]; \ 496 \ 497 _NOTE(CONSTANTCONDITION) \ 498 } while (B_FALSE) 499 500 #if defined(__x86_64__) 501 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 502 do { \ 503 uint64_t *addr; \ 504 \ 505 _NOTE(CONSTANTCONDITION) \ 506 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 507 sizeof (efx_qword_t)), \ 508 ("not power of 2 aligned")); \ 509 \ 510 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 511 uint32_t, (_eqp)->eq_u32[1], \ 512 uint32_t, (_eqp)->eq_u32[0]); \ 513 \ 514 addr = (void *)((_esmp)->esm_base + (_offset)); \ 515 \ 516 *addr = (_eqp)->eq_u64[0]; \ 517 \ 518 _NOTE(CONSTANTCONDITION) \ 519 } while (B_FALSE) 520 521 #else 522 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ 523 do { \ 524 uint32_t *addr; \ 525 \ 526 _NOTE(CONSTANTCONDITION) \ 527 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 528 sizeof (efx_qword_t)), \ 529 ("not power of 2 aligned")); \ 530 \ 531 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ 532 uint32_t, (_eqp)->eq_u32[1], \ 533 uint32_t, (_eqp)->eq_u32[0]); \ 534 \ 535 addr = (void *)((_esmp)->esm_base + (_offset)); \ 536 \ 537 *addr++ = (_eqp)->eq_u32[0]; \ 538 *addr = (_eqp)->eq_u32[1]; \ 539 \ 540 _NOTE(CONSTANTCONDITION) \ 541 } while (B_FALSE) 542 #endif 543 544 #if defined(__x86_64__) 545 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 546 do { \ 547 uint64_t *addr; \ 548 \ 549 _NOTE(CONSTANTCONDITION) \ 550 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 551 sizeof (efx_oword_t)), \ 552 ("not power of 2 aligned")); \ 553 \ 554 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 555 uint32_t, (_eop)->eo_u32[3], \ 556 uint32_t, (_eop)->eo_u32[2], \ 557 uint32_t, (_eop)->eo_u32[1], \ 558 uint32_t, (_eop)->eo_u32[0]); \ 559 \ 560 addr = (void *)((_esmp)->esm_base + (_offset)); \ 561 \ 562 *addr++ = (_eop)->eo_u64[0]; \ 563 *addr = (_eop)->eo_u64[1]; \ 564 \ 565 _NOTE(CONSTANTCONDITION) \ 566 } while (B_FALSE) 567 #else 568 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ 569 do { \ 570 uint32_t *addr; \ 571 \ 572 _NOTE(CONSTANTCONDITION) \ 573 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 574 sizeof (efx_oword_t)), \ 575 ("not power of 2 aligned")); \ 576 \ 577 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ 578 uint32_t, (_eop)->eo_u32[3], \ 579 uint32_t, (_eop)->eo_u32[2], \ 580 uint32_t, (_eop)->eo_u32[1], \ 581 uint32_t, (_eop)->eo_u32[0]); \ 582 \ 583 addr = (void *)((_esmp)->esm_base + (_offset)); \ 584 \ 585 *addr++ = (_eop)->eo_u32[0]; \ 586 *addr++ = (_eop)->eo_u32[1]; \ 587 *addr++ = (_eop)->eo_u32[2]; \ 588 *addr = (_eop)->eo_u32[3]; \ 589 \ 590 _NOTE(CONSTANTCONDITION) \ 591 } while (B_FALSE) 592 #endif 593 594 /* BAR */ 595 596 #define SFXGE_LOCK_NAME_MAX 16 597 598 typedef struct efsys_bar_s { 599 struct mtx esb_lock; 600 char esb_lock_name[SFXGE_LOCK_NAME_MAX]; 601 bus_space_tag_t esb_tag; 602 bus_space_handle_t esb_handle; 603 int esb_rid; 604 struct resource *esb_res; 605 } efsys_bar_t; 606 607 #define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \ 608 do { \ 609 snprintf((_esbp)->esb_lock_name, \ 610 sizeof((_esbp)->esb_lock_name), \ 611 "%s:bar", (_ifname)); \ 612 mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \ 613 NULL, MTX_DEF); \ 614 _NOTE(CONSTANTCONDITION) \ 615 } while (B_FALSE) 616 #define SFXGE_BAR_LOCK_DESTROY(_esbp) \ 617 mtx_destroy(&(_esbp)->esb_lock) 618 #define SFXGE_BAR_LOCK(_esbp) \ 619 mtx_lock(&(_esbp)->esb_lock) 620 #define SFXGE_BAR_UNLOCK(_esbp) \ 621 mtx_unlock(&(_esbp)->esb_lock) 622 623 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ 624 do { \ 625 _NOTE(CONSTANTCONDITION) \ 626 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 627 sizeof (efx_dword_t)), \ 628 ("not power of 2 aligned")); \ 629 \ 630 _NOTE(CONSTANTCONDITION) \ 631 if (_lock) \ 632 SFXGE_BAR_LOCK(_esbp); \ 633 \ 634 (_edp)->ed_u32[0] = bus_space_read_stream_4( \ 635 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 636 (_offset)); \ 637 \ 638 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ 639 uint32_t, (_edp)->ed_u32[0]); \ 640 \ 641 _NOTE(CONSTANTCONDITION) \ 642 if (_lock) \ 643 SFXGE_BAR_UNLOCK(_esbp); \ 644 _NOTE(CONSTANTCONDITION) \ 645 } while (B_FALSE) 646 647 #if defined(SFXGE_USE_BUS_SPACE_8) 648 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 649 do { \ 650 _NOTE(CONSTANTCONDITION) \ 651 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 652 sizeof (efx_qword_t)), \ 653 ("not power of 2 aligned")); \ 654 \ 655 SFXGE_BAR_LOCK(_esbp); \ 656 \ 657 (_eqp)->eq_u64[0] = bus_space_read_stream_8( \ 658 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 659 (_offset)); \ 660 \ 661 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 662 uint32_t, (_eqp)->eq_u32[1], \ 663 uint32_t, (_eqp)->eq_u32[0]); \ 664 \ 665 SFXGE_BAR_UNLOCK(_esbp); \ 666 _NOTE(CONSTANTCONDITION) \ 667 } while (B_FALSE) 668 669 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 670 do { \ 671 _NOTE(CONSTANTCONDITION) \ 672 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 673 sizeof (efx_oword_t)), \ 674 ("not power of 2 aligned")); \ 675 \ 676 _NOTE(CONSTANTCONDITION) \ 677 if (_lock) \ 678 SFXGE_BAR_LOCK(_esbp); \ 679 \ 680 (_eop)->eo_u64[0] = bus_space_read_stream_8( \ 681 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 682 (_offset)); \ 683 (_eop)->eo_u64[1] = bus_space_read_stream_8( \ 684 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 685 (_offset) + 8); \ 686 \ 687 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 688 uint32_t, (_eop)->eo_u32[3], \ 689 uint32_t, (_eop)->eo_u32[2], \ 690 uint32_t, (_eop)->eo_u32[1], \ 691 uint32_t, (_eop)->eo_u32[0]); \ 692 \ 693 _NOTE(CONSTANTCONDITION) \ 694 if (_lock) \ 695 SFXGE_BAR_UNLOCK(_esbp); \ 696 _NOTE(CONSTANTCONDITION) \ 697 } while (B_FALSE) 698 699 #else 700 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ 701 do { \ 702 _NOTE(CONSTANTCONDITION) \ 703 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 704 sizeof (efx_qword_t)), \ 705 ("not power of 2 aligned")); \ 706 \ 707 SFXGE_BAR_LOCK(_esbp); \ 708 \ 709 (_eqp)->eq_u32[0] = bus_space_read_stream_4( \ 710 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 711 (_offset)); \ 712 (_eqp)->eq_u32[1] = bus_space_read_stream_4( \ 713 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 714 (_offset) + 4); \ 715 \ 716 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ 717 uint32_t, (_eqp)->eq_u32[1], \ 718 uint32_t, (_eqp)->eq_u32[0]); \ 719 \ 720 SFXGE_BAR_UNLOCK(_esbp); \ 721 _NOTE(CONSTANTCONDITION) \ 722 } while (B_FALSE) 723 724 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ 725 do { \ 726 _NOTE(CONSTANTCONDITION) \ 727 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 728 sizeof (efx_oword_t)), \ 729 ("not power of 2 aligned")); \ 730 \ 731 _NOTE(CONSTANTCONDITION) \ 732 if (_lock) \ 733 SFXGE_BAR_LOCK(_esbp); \ 734 \ 735 (_eop)->eo_u32[0] = bus_space_read_stream_4( \ 736 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 737 (_offset)); \ 738 (_eop)->eo_u32[1] = bus_space_read_stream_4( \ 739 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 740 (_offset) + 4); \ 741 (_eop)->eo_u32[2] = bus_space_read_stream_4( \ 742 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 743 (_offset) + 8); \ 744 (_eop)->eo_u32[3] = bus_space_read_stream_4( \ 745 (_esbp)->esb_tag, (_esbp)->esb_handle, \ 746 (_offset) + 12); \ 747 \ 748 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ 749 uint32_t, (_eop)->eo_u32[3], \ 750 uint32_t, (_eop)->eo_u32[2], \ 751 uint32_t, (_eop)->eo_u32[1], \ 752 uint32_t, (_eop)->eo_u32[0]); \ 753 \ 754 _NOTE(CONSTANTCONDITION) \ 755 if (_lock) \ 756 SFXGE_BAR_UNLOCK(_esbp); \ 757 _NOTE(CONSTANTCONDITION) \ 758 } while (B_FALSE) 759 #endif 760 761 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ 762 do { \ 763 _NOTE(CONSTANTCONDITION) \ 764 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 765 sizeof (efx_dword_t)), \ 766 ("not power of 2 aligned")); \ 767 \ 768 _NOTE(CONSTANTCONDITION) \ 769 if (_lock) \ 770 SFXGE_BAR_LOCK(_esbp); \ 771 \ 772 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ 773 uint32_t, (_edp)->ed_u32[0]); \ 774 \ 775 /* \ 776 * Make sure that previous writes to the dword have \ 777 * been done. It should be cheaper than barrier just \ 778 * after the write below. \ 779 */ \ 780 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 781 (_offset), sizeof (efx_dword_t), \ 782 BUS_SPACE_BARRIER_WRITE); \ 783 bus_space_write_stream_4((_esbp)->esb_tag, \ 784 (_esbp)->esb_handle, \ 785 (_offset), (_edp)->ed_u32[0]); \ 786 \ 787 _NOTE(CONSTANTCONDITION) \ 788 if (_lock) \ 789 SFXGE_BAR_UNLOCK(_esbp); \ 790 _NOTE(CONSTANTCONDITION) \ 791 } while (B_FALSE) 792 793 #if defined(SFXGE_USE_BUS_SPACE_8) 794 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 795 do { \ 796 _NOTE(CONSTANTCONDITION) \ 797 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 798 sizeof (efx_qword_t)), \ 799 ("not power of 2 aligned")); \ 800 \ 801 SFXGE_BAR_LOCK(_esbp); \ 802 \ 803 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 804 uint32_t, (_eqp)->eq_u32[1], \ 805 uint32_t, (_eqp)->eq_u32[0]); \ 806 \ 807 /* \ 808 * Make sure that previous writes to the qword have \ 809 * been done. It should be cheaper than barrier just \ 810 * after the write below. \ 811 */ \ 812 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 813 (_offset), sizeof (efx_qword_t), \ 814 BUS_SPACE_BARRIER_WRITE); \ 815 bus_space_write_stream_8((_esbp)->esb_tag, \ 816 (_esbp)->esb_handle, \ 817 (_offset), (_eqp)->eq_u64[0]); \ 818 \ 819 SFXGE_BAR_UNLOCK(_esbp); \ 820 _NOTE(CONSTANTCONDITION) \ 821 } while (B_FALSE) 822 #else 823 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ 824 do { \ 825 _NOTE(CONSTANTCONDITION) \ 826 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 827 sizeof (efx_qword_t)), \ 828 ("not power of 2 aligned")); \ 829 \ 830 SFXGE_BAR_LOCK(_esbp); \ 831 \ 832 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ 833 uint32_t, (_eqp)->eq_u32[1], \ 834 uint32_t, (_eqp)->eq_u32[0]); \ 835 \ 836 /* \ 837 * Make sure that previous writes to the qword have \ 838 * been done. It should be cheaper than barrier just \ 839 * after the last write below. \ 840 */ \ 841 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 842 (_offset), sizeof (efx_qword_t), \ 843 BUS_SPACE_BARRIER_WRITE); \ 844 bus_space_write_stream_4((_esbp)->esb_tag, \ 845 (_esbp)->esb_handle, \ 846 (_offset), (_eqp)->eq_u32[0]); \ 847 /* \ 848 * It should be guaranteed that the last dword comes \ 849 * the last, so barrier entire qword to be sure that \ 850 * neither above nor below writes are reordered. \ 851 */ \ 852 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 853 (_offset), sizeof (efx_qword_t), \ 854 BUS_SPACE_BARRIER_WRITE); \ 855 bus_space_write_stream_4((_esbp)->esb_tag, \ 856 (_esbp)->esb_handle, \ 857 (_offset) + 4, (_eqp)->eq_u32[1]); \ 858 \ 859 SFXGE_BAR_UNLOCK(_esbp); \ 860 _NOTE(CONSTANTCONDITION) \ 861 } while (B_FALSE) 862 #endif 863 864 /* 865 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping 866 * (required by PIO hardware) 867 */ 868 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \ 869 do { \ 870 _NOTE(CONSTANTCONDITION) \ 871 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 872 sizeof (efx_qword_t)), \ 873 ("not power of 2 aligned")); \ 874 \ 875 (void) (_esbp); \ 876 \ 877 /* FIXME: Perform a 64-bit write */ \ 878 KASSERT(0, ("not implemented")); \ 879 \ 880 _NOTE(CONSTANTCONDITION) \ 881 } while (B_FALSE) 882 883 #if defined(SFXGE_USE_BUS_SPACE_8) 884 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 885 do { \ 886 _NOTE(CONSTANTCONDITION) \ 887 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 888 sizeof (efx_oword_t)), \ 889 ("not power of 2 aligned")); \ 890 \ 891 _NOTE(CONSTANTCONDITION) \ 892 if (_lock) \ 893 SFXGE_BAR_LOCK(_esbp); \ 894 \ 895 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 896 uint32_t, (_eop)->eo_u32[3], \ 897 uint32_t, (_eop)->eo_u32[2], \ 898 uint32_t, (_eop)->eo_u32[1], \ 899 uint32_t, (_eop)->eo_u32[0]); \ 900 \ 901 /* \ 902 * Make sure that previous writes to the oword have \ 903 * been done. It should be cheaper than barrier just \ 904 * after the last write below. \ 905 */ \ 906 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 907 (_offset), sizeof (efx_oword_t), \ 908 BUS_SPACE_BARRIER_WRITE); \ 909 bus_space_write_stream_8((_esbp)->esb_tag, \ 910 (_esbp)->esb_handle, \ 911 (_offset), (_eop)->eo_u64[0]); \ 912 /* \ 913 * It should be guaranteed that the last qword comes \ 914 * the last, so barrier entire oword to be sure that \ 915 * neither above nor below writes are reordered. \ 916 */ \ 917 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 918 (_offset), sizeof (efx_oword_t), \ 919 BUS_SPACE_BARRIER_WRITE); \ 920 bus_space_write_stream_8((_esbp)->esb_tag, \ 921 (_esbp)->esb_handle, \ 922 (_offset) + 8, (_eop)->eo_u64[1]); \ 923 \ 924 _NOTE(CONSTANTCONDITION) \ 925 if (_lock) \ 926 SFXGE_BAR_UNLOCK(_esbp); \ 927 _NOTE(CONSTANTCONDITION) \ 928 } while (B_FALSE) 929 930 #else 931 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ 932 do { \ 933 _NOTE(CONSTANTCONDITION) \ 934 KASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ 935 sizeof (efx_oword_t)), \ 936 ("not power of 2 aligned")); \ 937 \ 938 _NOTE(CONSTANTCONDITION) \ 939 if (_lock) \ 940 SFXGE_BAR_LOCK(_esbp); \ 941 \ 942 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ 943 uint32_t, (_eop)->eo_u32[3], \ 944 uint32_t, (_eop)->eo_u32[2], \ 945 uint32_t, (_eop)->eo_u32[1], \ 946 uint32_t, (_eop)->eo_u32[0]); \ 947 \ 948 /* \ 949 * Make sure that previous writes to the oword have \ 950 * been done. It should be cheaper than barrier just \ 951 * after the last write below. \ 952 */ \ 953 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 954 (_offset), sizeof (efx_oword_t), \ 955 BUS_SPACE_BARRIER_WRITE); \ 956 bus_space_write_stream_4((_esbp)->esb_tag, \ 957 (_esbp)->esb_handle, \ 958 (_offset), (_eop)->eo_u32[0]); \ 959 bus_space_write_stream_4((_esbp)->esb_tag, \ 960 (_esbp)->esb_handle, \ 961 (_offset) + 4, (_eop)->eo_u32[1]); \ 962 bus_space_write_stream_4((_esbp)->esb_tag, \ 963 (_esbp)->esb_handle, \ 964 (_offset) + 8, (_eop)->eo_u32[2]); \ 965 /* \ 966 * It should be guaranteed that the last dword comes \ 967 * the last, so barrier entire oword to be sure that \ 968 * neither above nor below writes are reordered. \ 969 */ \ 970 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\ 971 (_offset), sizeof (efx_oword_t), \ 972 BUS_SPACE_BARRIER_WRITE); \ 973 bus_space_write_stream_4((_esbp)->esb_tag, \ 974 (_esbp)->esb_handle, \ 975 (_offset) + 12, (_eop)->eo_u32[3]); \ 976 \ 977 _NOTE(CONSTANTCONDITION) \ 978 if (_lock) \ 979 SFXGE_BAR_UNLOCK(_esbp); \ 980 _NOTE(CONSTANTCONDITION) \ 981 } while (B_FALSE) 982 #endif 983 984 /* Use the standard octo-word write for doorbell writes */ 985 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \ 986 do { \ 987 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \ 988 _NOTE(CONSTANTCONDITION) \ 989 } while (B_FALSE) 990 991 /* SPIN */ 992 993 #define EFSYS_SPIN(_us) \ 994 do { \ 995 DELAY(_us); \ 996 _NOTE(CONSTANTCONDITION) \ 997 } while (B_FALSE) 998 999 #define EFSYS_SLEEP EFSYS_SPIN 1000 1001 /* BARRIERS */ 1002 1003 #define EFSYS_MEM_READ_BARRIER() rmb() 1004 #define EFSYS_PIO_WRITE_BARRIER() 1005 1006 /* DMA SYNC */ 1007 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \ 1008 do { \ 1009 bus_dmamap_sync((_esmp)->esm_tag, \ 1010 (_esmp)->esm_map, \ 1011 BUS_DMASYNC_POSTREAD); \ 1012 _NOTE(CONSTANTCONDITION) \ 1013 } while (B_FALSE) 1014 1015 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \ 1016 do { \ 1017 bus_dmamap_sync((_esmp)->esm_tag, \ 1018 (_esmp)->esm_map, \ 1019 BUS_DMASYNC_PREWRITE); \ 1020 _NOTE(CONSTANTCONDITION) \ 1021 } while (B_FALSE) 1022 1023 /* TIMESTAMP */ 1024 1025 typedef clock_t efsys_timestamp_t; 1026 1027 #define EFSYS_TIMESTAMP(_usp) \ 1028 do { \ 1029 clock_t now; \ 1030 \ 1031 now = ticks; \ 1032 *(_usp) = now * hz / 1000000; \ 1033 _NOTE(CONSTANTCONDITION) \ 1034 } while (B_FALSE) 1035 1036 /* KMEM */ 1037 1038 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ 1039 do { \ 1040 (_esip) = (_esip); \ 1041 /* \ 1042 * The macro is used in non-sleepable contexts, for \ 1043 * example, holding a mutex. \ 1044 */ \ 1045 (_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO); \ 1046 _NOTE(CONSTANTCONDITION) \ 1047 } while (B_FALSE) 1048 1049 #define EFSYS_KMEM_FREE(_esip, _size, _p) \ 1050 do { \ 1051 (void) (_esip); \ 1052 (void) (_size); \ 1053 free((_p), M_SFXGE); \ 1054 _NOTE(CONSTANTCONDITION) \ 1055 } while (B_FALSE) 1056 1057 /* LOCK */ 1058 1059 typedef struct efsys_lock_s { 1060 struct mtx lock; 1061 char lock_name[SFXGE_LOCK_NAME_MAX]; 1062 } efsys_lock_t; 1063 1064 #define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \ 1065 do { \ 1066 efsys_lock_t *__eslp = (_eslp); \ 1067 \ 1068 snprintf((__eslp)->lock_name, \ 1069 sizeof((__eslp)->lock_name), \ 1070 "%s:%s", (_ifname), (_label)); \ 1071 mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \ 1072 NULL, MTX_DEF); \ 1073 } while (B_FALSE) 1074 #define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \ 1075 mtx_destroy(&(_eslp)->lock) 1076 #define SFXGE_EFSYS_LOCK(_eslp) \ 1077 mtx_lock(&(_eslp)->lock) 1078 #define SFXGE_EFSYS_UNLOCK(_eslp) \ 1079 mtx_unlock(&(_eslp)->lock) 1080 #define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \ 1081 mtx_assert(&(_eslp)->lock, MA_OWNED) 1082 1083 typedef int efsys_lock_state_t; 1084 1085 #define EFSYS_LOCK_MAGIC 0x000010c4 1086 1087 #define EFSYS_LOCK(_lockp, _state) \ 1088 do { \ 1089 SFXGE_EFSYS_LOCK(_lockp); \ 1090 (_state) = EFSYS_LOCK_MAGIC; \ 1091 _NOTE(CONSTANTCONDITION) \ 1092 } while (B_FALSE) 1093 1094 #define EFSYS_UNLOCK(_lockp, _state) \ 1095 do { \ 1096 if ((_state) != EFSYS_LOCK_MAGIC) \ 1097 KASSERT(B_FALSE, ("not locked")); \ 1098 SFXGE_EFSYS_UNLOCK(_lockp); \ 1099 _NOTE(CONSTANTCONDITION) \ 1100 } while (B_FALSE) 1101 1102 /* STAT */ 1103 1104 typedef uint64_t efsys_stat_t; 1105 1106 #define EFSYS_STAT_INCR(_knp, _delta) \ 1107 do { \ 1108 *(_knp) += (_delta); \ 1109 _NOTE(CONSTANTCONDITION) \ 1110 } while (B_FALSE) 1111 1112 #define EFSYS_STAT_DECR(_knp, _delta) \ 1113 do { \ 1114 *(_knp) -= (_delta); \ 1115 _NOTE(CONSTANTCONDITION) \ 1116 } while (B_FALSE) 1117 1118 #define EFSYS_STAT_SET(_knp, _val) \ 1119 do { \ 1120 *(_knp) = (_val); \ 1121 _NOTE(CONSTANTCONDITION) \ 1122 } while (B_FALSE) 1123 1124 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \ 1125 do { \ 1126 *(_knp) = le64toh((_valp)->eq_u64[0]); \ 1127 _NOTE(CONSTANTCONDITION) \ 1128 } while (B_FALSE) 1129 1130 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \ 1131 do { \ 1132 *(_knp) = le32toh((_valp)->ed_u32[0]); \ 1133 _NOTE(CONSTANTCONDITION) \ 1134 } while (B_FALSE) 1135 1136 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ 1137 do { \ 1138 *(_knp) += le64toh((_valp)->eq_u64[0]); \ 1139 _NOTE(CONSTANTCONDITION) \ 1140 } while (B_FALSE) 1141 1142 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ 1143 do { \ 1144 *(_knp) -= le64toh((_valp)->eq_u64[0]); \ 1145 _NOTE(CONSTANTCONDITION) \ 1146 } while (B_FALSE) 1147 1148 /* ERR */ 1149 1150 extern void sfxge_err(efsys_identifier_t *, unsigned int, 1151 uint32_t, uint32_t); 1152 1153 #if EFSYS_OPT_DECODE_INTR_FATAL 1154 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ 1155 do { \ 1156 sfxge_err((_esip), (_code), (_dword0), (_dword1)); \ 1157 _NOTE(CONSTANTCONDITION) \ 1158 } while (B_FALSE) 1159 #endif 1160 1161 /* ASSERT */ 1162 1163 #define EFSYS_ASSERT(_exp) do { \ 1164 if (!(_exp)) \ 1165 panic("%s", #_exp); \ 1166 } while (0) 1167 1168 #define EFSYS_ASSERT3(_x, _op, _y, _t) do { \ 1169 const _t __x = (_t)(_x); \ 1170 const _t __y = (_t)(_y); \ 1171 if (!(__x _op __y)) \ 1172 panic("assertion failed at %s:%u", __FILE__, __LINE__); \ 1173 } while(0) 1174 1175 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) 1176 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) 1177 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) 1178 1179 /* ROTATE */ 1180 1181 #define EFSYS_HAS_ROTL_DWORD 0 1182 1183 #ifdef __cplusplus 1184 } 1185 #endif 1186 1187 #endif /* _SYS_EFSYS_H */ 1188