/illumos-gate/usr/src/cmd/rcm_daemon/common/ |
H A D | rcm_script.c | 365 script_info_t *rsi; in script_init() local 372 module->rsi = NULL; in script_init() 380 rsi = (script_info_t *)rcmscript_calloc(1, sizeof (script_info_t)); in script_init() 381 rsi->script_full_name = (char *)rcmscript_calloc(1, len); in script_init() 383 rsi->module = module; in script_init() 384 rcm_init_queue(&rsi->drreq_q); in script_init() 386 (void) mutex_init(&rsi->channel_lock, USYNC_THREAD, NULL); in script_init() 388 (void) snprintf(rsi->script_full_name, len, "%s%s", script_path, in script_init() 390 rsi->script_name = strrchr(rsi->script_full_name, '/') + 1; in script_init() 392 (void) mutex_lock(&rsi->channel_lock); in script_init() [all …]
|
/illumos-gate/usr/src/lib/libc/amd64/gen/ |
H A D | memcmp.S | 72 movzbl (%rsi), %ecx 79 lea 1 (%rsi), %rsi 103 mov (%rsi), %rax 110 lea 8 (%rsi), %rsi 138 mov (%rsi), %rax 139 mov 8 (%rsi), %r8 140 mov 16 (%rsi), %r9 141 mov 24 (%rsi), %r10 155 lea 32 (%rsi), %rsi 186 movzbl (%rsi), %ecx [all …]
|
H A D | strcpy.S | 45 and $0xfffffffffffffff0, %rsi /* force rsi 16 byte align */ 51 pcmpeqb (%rsi), %xmm0 /* check 16 bytes in src for null */ 73 pcmpeqb 16(%rsi), %xmm0 /* check next 16 bytes in src for a null */ 89 mov (%rsi, %r9), %rdx 91 mov 8(%rsi, %r9), %rdx 120 lea 16(%r9, %rsi), %rsi 122 and $0xfffffffffffffff0, %rsi /* force rsi 16 byte align */ 152 movdqa (%rsi), %xmm1 /* fetch 16 bytes from src string */ 154 add $16, %rsi 156 pcmpeqb (%rsi), %xmm0 /* check 16 bytes in src for a null */ [all …]
|
H A D | strlen.S | 41 mov %rdi, %rsi /* keep original %rdi value */ 42 mov %rsi, %rcx 53 and $0xfffffffffffffff0, %rsi 55 pcmpeqb (%rsi), %xmm0 56 lea 16(%rdi), %rsi 62 sub %rcx, %rsi /* no null, adjust to next 16-byte boundary */ 67 pcmpeqb (%rsi), %xmm0 /* look for null bytes */ 70 add $16, %rsi /* prepare to search next 16 bytes */ 74 pcmpeqb (%rsi), %xmm0 76 add $16, %rsi [all …]
|
H A D | proc64_support.S | 72 # rdi = cpuid function, rsi = out_reg addr, rdx = cache index(fn 4) 77 mov %eax,(%rsi) 78 mov %ebx,0x4(%rsi) 79 mov %ecx,0x8(%rsi) 80 mov %edx,0xc(%rsi) 100 # rdi = l1_cache_size, rsi = l2_cache_size, rdx = largest_level_cache 106 mov %rsi,.amd64cache2(%rip) 107 shr $1, %rsi 108 mov %rsi,.amd64cache2half(%rip)
|
H A D | wsncmp.S | 41 cmpq %rdi,%rsi / same string? 48 cmpl (%rsi),%eax 56 cmpl 4(%rsi),%eax 64 cmpl 8(%rsi),%eax 72 cmpl 12(%rsi),%eax 75 addq $16,%rsi 85 addq $4,%rsi 87 addq $4,%rsi 89 addq $4,%rsi 91 subl (%rsi),%eax / return value is (*s1 - *--s2)
|
H A D | strcmp.S | 73 movlpd (%rsi), %xmm2 75 movhpd 8(%rsi), %xmm2 87 add $16, %rsi /* prepare to search next 16 bytes */ 97 and $0xfffffffffffffff0, %rsi /* force %rsi to be 16 byte aligned */ 108 xchg %rsi, %rdi 123 movdqa (%rsi), %xmm1 148 movdqa (%rsi, %rcx), %xmm1 163 movdqa (%rsi, %rcx), %xmm1 187 movdqa (%rsi), %xmm1 219 movdqa (%rsi, %rcx), %xmm1 [all …]
|
H A D | strcat.S | 91 testq $7, %rsi / if %rsi not quadword aligned 95 movq (%rsi), %rdx / move 1 quadword from (%rsi) to %rdx 98 addq $8, %rsi / next quadword 108 subq $8, %rsi / post-incremented 111 movb (%rsi), %dl / %dl = a byte in (%rsi) 115 incq %rsi / next byte 117 testq $7, %rsi / if %rsi not word aligned 119 jmp .L5 / goto .L5 (%rsi word aligned)
|
H A D | strncat.S | 96 testq $7, %rsi / if %rsi not quadword aligned 102 movq (%rsi), %r11 / move 1 quadword from (%rsi) to %r11 105 addq $8, %rsi / next quadword 118 subq $8, %rsi / post-incremented 124 movb (%rsi), %r11b / %r11b = a byte in (%rsi) 127 incq %rsi / next byte 135 / %rsi not aligned 138 movb (%rsi), %r11b / %r11b = a byte in (%rsi) 142 incq %rsi / next byte
|
/illumos-gate/usr/src/uts/intel/ml/ |
H A D | copy.S | 154 cmpq postbootkernelbase(%rip), %rsi /* %rsi = to */ 209 cmpq postbootkernelbase(%rip), %rsi /* %rsi = to */ 235 orq %rsi, %r10 249 COPY_LOOP_INIT(%rdi, %rsi, %rdx) 250 2: COPY_LOOP_BODY(%rdi, %rsi, %rdx) 270 cmpq postbootkernelbase(%rip), %rsi /* %rsi = to */ 294 addq %rdx, %rsi 394 mov %rcx, -0x48(%rsi) 397 mov %r10, -0x40(%rsi) 400 mov %r8, -0x38(%rsi) [all …]
|
H A D | ddi_i86_asm.S | 41 movq %rsi, %rdx 48 movzbq (%rsi), %rax 64 movq %rsi, %rdx 71 movzwq (%rsi), %rax 87 movq %rsi, %rdx 93 movl (%rsi), %eax 118 movq %rsi, %rdx 124 movb %dl, (%rsi) 141 movq %rsi, %rdx 147 movw %dx, (%rsi) [all …]
|
H A D | i86_subr.S | 74 movq %gs:CPU_THREAD, %rsi 76 movq %rdi, T_ONFAULT(%rsi) /* jumpbuf in t_onfault */ 77 movq %rdx, T_LOFAULT(%rsi) /* catch_fault in t_lofault */ 82 movq %gs:CPU_THREAD, %rsi 83 movq T_ONFAULT(%rsi), %rdi /* address of save area */ 85 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */ 86 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */ 92 movq %gs:CPU_THREAD, %rsi 94 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */ 95 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */ [all …]
|
H A D | float.S | 80 STTS(%rsi) /* trap on next fpu touch */ 91 movq FPU_CTX_FPU_REGS(%rdi), %rsi /* fpu_regs.kfpu_u.kfpu_xs ptr */ 92 xsave (%rsi) 93 STTS(%rsi) /* trap on next fpu touch */ 103 movq FPU_CTX_FPU_REGS(%rdi), %rsi /* fpu_regs.kfpu_u.kfpu_xs ptr */ 104 xsaveopt (%rsi) 105 STTS(%rsi) /* trap on next fpu touch */ 139 STTS(%rsi) /* trap on next fpu touch */ 150 movq FPU_CTX_FPU_REGS(%rdi), %rsi /* fpu_regs.kfpu_u.kfpu_xs ptr */ 151 xsave (%rsi) [all …]
|
H A D | sseblk.S | 97 cmpq $BLOCKSIZE, %rsi /* size must be at least BLOCKSIZE */ 99 testq $BLOCKMASK, %rsi /* .. and be a multiple of BLOCKSIZE */ 101 shrq $BLOCKSHIFT, %rsi 111 9: ZERO_LOOP_BODY_XMM(%rdi, %rsi) 225 4: COPY_LOOP_BODY_XMM(%rdi, %rsi, %ecx) 227 COPY_LOOP_FINI_XMM(%rsi) 241 addq %rsi, %rdi 242 negq %rsi 244 movnti %rax, (%rdi, %rsi) 245 movnti %rax, 8(%rdi, %rsi) [all …]
|
H A D | hypersubr.S | 118 movq %rsi, %rdi /* arg 1 */ 126 movq %rsi, %rdi /* arg 1 */ 127 movq %rdx, %rsi /* arg 2 */ 135 movq %rsi, %rdi /* arg 1 */ 136 movq %rdx, %rsi /* arg 2 */ 145 movq %rsi, %rdi /* arg 1 */ 146 movq %rdx, %rsi /* arg 2 */ 156 movq %rsi, %rdi /* arg 1 */ 157 movq %rdx, %rsi /* arg 2 */
|
/illumos-gate/usr/src/uts/intel/io/vmm/amd/ |
H A D | svm_support.S | 81 movq %rsi, SVMSTK_RSI(%rsp) 88 movq SCTX_R8(%rsi), %r8 89 movq SCTX_R9(%rsi), %r9 90 movq SCTX_R10(%rsi), %r10 91 movq SCTX_R11(%rsi), %r11 92 movq SCTX_R12(%rsi), %r12 93 movq SCTX_R13(%rsi), %r13 94 movq SCTX_R14(%rsi), %r14 95 movq SCTX_R15(%rsi), %r15 96 movq SCTX_RBP(%rsi), %rbp [all …]
|
/illumos-gate/usr/src/test/os-tests/tests/saveargs/testmatch/ |
H A D | data.S | 51 movq %rsi, -0x10(%rbp) 62 movq %rsi,-0x10(%rbp) 74 movq %rsi,-0x10(%rbp) 85 movq %rsi,-0x8(%rbp) 94 movq %rsi,-0x8(%rbp) 108 movq %rsi,-0x8(%rbp) 118 movq %rsi,-0x10(%rbp) 131 movq %rsi,-0x10(%rbp) 144 movq %rsi,-0x10(%rbp) 153 pushq %rsi [all …]
|
/illumos-gate/usr/src/lib/libmvec/amd64/src/ |
H A D | __vsqrtf.S | 39 / %rsi = x 65 movups (%rsi),%xmm0 66 addq %r9,%rsi 79 movss (%rsi),%xmm0 80 addq %rdx,%rsi 93 movss (%rsi),%xmm0 94 addq %rdx,%rsi 95 movss (%rsi),%xmm1 96 addq %rdx,%rsi 97 movss (%rsi),%xmm2 [all …]
|
/illumos-gate/usr/src/cmd/mdb/intel/amd64/kmdb/ |
H A D | kaif_invoke.S | 74 cmpq $6, %rsi 85 movq %rsi, %r12 91 1: decq %rsi 92 movq (%rdx, %rsi, 8), %r9 93 movq %r9, (%rsp, %rsi, 8) 94 cmpq $6, %rsi 107 shlq $3, %rsi 108 addq %rsi, %r9 115 cp2arg: movq 0x08(%rdi), %rsi
|
H A D | kmdb_asmutil.S | 43 movq %rsi, %rax 75 movl %eax, (%rsi) 76 movl %edx, 4(%rsi) 91 movl (%rsi), %eax 92 movl 4(%rsi), %edx 130 cmpq $4, %rsi 132 cmpq $2, %rsi 149 cmpq $4, %rsi 151 cmpq $2, %rsi
|
/illumos-gate/usr/src/cmd/sgs/rtld/amd64/ |
H A D | boot.S | 106 movq %rsp,%rsi 108 movq $EB_ARGV,0(%rsi) 112 movq %rax,8(%rsi) 114 movq $EB_ENVP,16(%rsi) 122 movq %rdi,24(%rsi) 129 movq $EB_AUXV,32(%rsi) 131 movq %rdi,40(%rsi) 133 movq $EB_NULL,48(%rsi) 136 movq %rsi, %rdi 144 movq (%rbx),%rsi
|
H A D | boot_elf.S | 361 movq %rsi, ORIG_RSI(%rbp) 394 leaq SPLAREGOFF(%rbp), %rsi / %rsi = &La_amd64_regs 396 movq %rdi, 0(%rsi) / la_rsp 398 movq %rdi, 8(%rsi) / la_rbp 400 movq %rdi, 16(%rsi) / la_rdi 402 movq %rdi, 24(%rsi) / la_rsi 404 movq %rdi, 32(%rsi) / la_rdx 406 movq %rdi, 40(%rsi) / la_rcx 408 movq %rdi, 48(%rsi) / la_r8 410 movq %rdi, 56(%rsi) / la_r9 [all …]
|
/illumos-gate/usr/src/common/crypto/md5/amd64/ |
H A D | md5_amd64.pl | 50 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1); 60 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 77 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1); 88 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 106 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1); 113 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 131 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1); 143 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ 185 lea (%rsi,%rdx), %rdi / rdi = end 198 cmp %rdi, %rsi / cmp end with ptr [all …]
|
/illumos-gate/usr/src/common/crypto/aes/amd64/ |
H A D | aes_amd64.S | 339 xor tab_0(%rsi), p1; \ 343 xor tab_2(%rsi), p3; \ 349 xor tab_0(%rsi), p2; \ 353 xor tab_2(%rsi), p4; \ 359 xor tab_0(%rsi), p3; \ 363 xor tab_2(%rsi), p1; \ 369 xor tab_0(%rsi), p4; \ 373 xor tab_2(%rsi), p2; \ 393 xor tab_0(%rsi), p1; \ 397 xor tab_2(%rsi), p3; \ [all …]
|
/illumos-gate/usr/src/common/bignum/amd64/ |
H A D | bignum_amd64_asm.S | 248 movq 0(%rsi), %rax / rax = a[0] 250 movq 8(%rsi), %r11 / prefetch a[1] 261 movq 16(%rsi), %r11 / prefetch a[2] 272 movq 24(%rsi), %r11 / prefetch a[3] 283 movq 32(%rsi), %r11 / prefetch a[4] 294 movq 40(%rsi), %r11 / prefetch a[5] 305 movq 48(%rsi), %r11 / prefetch a[6] 316 movq 56(%rsi), %r11 / prefetch a[7] 335 addq $64, %rsi 343 movq 0(%rsi), %rax [all …]
|