/freebsd/contrib/arm-optimized-routines/string/aarch64/ |
H A D | strrchr.S | 86 addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128 87 addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 88 addp vend1.16b, vhas_nul1.16b, vhas_chr1.16b // 128->64 112 addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 113 addp vend1.16b, vend1.16b, vhas_chr1.16b // 128->64 122 addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b 123 addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b
|
H A D | memchr.S | 81 addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */ 82 addp vend.16b, vend.16b, vend.16b /* 128->64 */ 102 addp vend.2d, vend.2d, vend.2d 111 addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */ 112 addp vend.16b, vend.16b, vend.16b /* 128->64 */
|
H A D | strchrnul.S | 74 addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 76 addp vend1.16b, vend1.16b, vend1.16b // 128->64 99 addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 100 addp vend1.16b, vend1.16b, vend1.16b // 128->64
|
H A D | strchr.S | 82 addp vend1.16b, vend1.16b, vend2.16b // 256->128 84 addp vend1.16b, vend1.16b, vend2.16b // 128->64 109 addp vend1.16b, vend1.16b, vend2.16b // 256->128 110 addp vend1.16b, vend1.16b, vend2.16b // 128->64
|
H A D | strlen.S | 181 addp maskv.16b, datav1.16b, datav2.16b 182 addp maskv.16b, maskv.16b, maskv.16b
|
H A D | strrchr-mte.S | 123 addp vend.16b, vhas_nul.16b, vhas_nul.16b
|
/freebsd/contrib/cortex-strings/src/aarch64/ |
H A D | strchrnul.S | 103 addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 105 addp vend1.16b, vend1.16b, vend1.16b // 128->64 122 addp vend1.2d, vend1.2d, vend1.2d 130 addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 131 addp vend1.16b, vend1.16b, vend1.16b // 128->64
|
H A D | memchr.S | 108 addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */ 109 addp vend.16b, vend.16b, vend.16b /* 128->64 */ 129 addp vend.2d, vend.2d, vend.2d 138 addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */ 139 addp vend.16b, vend.16b, vend.16b /* 128->64 */
|
H A D | strchr.S | 117 addp vend1.16b, vend1.16b, vend2.16b // 256->128 119 addp vend1.16b, vend1.16b, vend2.16b // 128->64 136 addp vend1.2d, vend1.2d, vend1.2d 148 addp vend1.16b, vend1.16b, vend2.16b // 256->128 149 addp vend1.16b, vend1.16b, vend2.16b // 128->64
|
/freebsd/sys/amd64/sgx/ |
H A D | sgx.c | 773 struct sgx_enclave_add_page *addp) in sgx_ioctl_add_page() argument 795 ret = sgx_enclave_find(sc, addp->addr, &enclave); in sgx_ioctl_add_page() 812 ret = copyin((void *)addp->secinfo, &secinfo, in sgx_ioctl_add_page() 820 ret = copyin((void *)addp->src, tmp_vaddr, PAGE_SIZE); in sgx_ioctl_add_page() 843 addr = (addp->addr - vmh->base); in sgx_ioctl_add_page() 864 pginfo.linaddr = (uint64_t)addp->addr; in sgx_ioctl_add_page() 879 ret = sgx_measure_page(sc, enclave->secs_epc_page, epc, addp->mrmask); in sgx_ioctl_add_page() 982 struct sgx_enclave_add_page *addp; in sgx_ioctl() local 1005 addp = (struct sgx_enclave_add_page *)addr; in sgx_ioctl() 1006 ret = sgx_ioctl_add_page(sc, addp); in sgx_ioctl()
|
/freebsd/sys/crypto/openssl/aarch64/ |
H A D | poly1305-armv8.S | 763 addp v22.2d,v22.2d,v22.2d 765 addp v19.2d,v19.2d,v19.2d 767 addp v23.2d,v23.2d,v23.2d 769 addp v20.2d,v20.2d,v20.2d 771 addp v21.2d,v21.2d,v21.2d
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrAsmAlias.td | 571 // Handle "f{mulp,addp} $op, %st(0)" the same as "f{mulp,addp} $op", since they
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64InstrInfo.td | 5497 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", AArch64addp>; 6603 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">; 7145 // Patterns for addp(addlp(x))) ==> addlv 7165 // Patterns for addp(addlp(x))) ==> addlv 9557 // Prefer using the bottom lanes of addp Rn, Rn compared to 9558 // addp extractlow(Rn), extracthigh(Rn) 9576 // add(uzp1(X, Y), uzp2(X, Y)) -> addp(X, Y)
|
H A D | AArch64SVEInstrInfo.td | 3577 defm ADDP_ZPmZ : sve2_int_arith_pred<0b100011, "addp", int_aarch64_sve_addp>;
|