Home
last modified time | relevance | path

Searched refs:rsi (Results 1 – 25 of 87) sorted by relevance

1234

/titanic_41/usr/src/cmd/rcm_daemon/common/
H A Drcm_script.c367 script_info_t *rsi; in script_init() local
374 module->rsi = NULL; in script_init()
382 rsi = (script_info_t *)rcmscript_calloc(1, sizeof (script_info_t)); in script_init()
383 rsi->script_full_name = (char *)rcmscript_calloc(1, len); in script_init()
385 rsi->module = module; in script_init()
386 rcm_init_queue(&rsi->drreq_q); in script_init()
388 (void) mutex_init(&rsi->channel_lock, USYNC_THREAD, NULL); in script_init()
390 (void) snprintf(rsi->script_full_name, len, "%s%s", script_path, in script_init()
392 rsi->script_name = strrchr(rsi->script_full_name, '/') + 1; in script_init()
394 (void) mutex_lock(&rsi->channel_lock); in script_init()
[all …]
/titanic_41/usr/src/lib/libc/amd64/gen/
H A Dmemcmp.s72 movzbl (%rsi), %ecx
79 lea 1 (%rsi), %rsi
103 mov (%rsi), %rax
110 lea 8 (%rsi), %rsi
138 mov (%rsi), %rax
139 mov 8 (%rsi), %r8
140 mov 16 (%rsi), %r9
141 mov 24 (%rsi), %r10
155 lea 32 (%rsi), %rsi
186 movzbl (%rsi), %ecx
[all …]
H A Dstrcpy.s45 and $0xfffffffffffffff0, %rsi /* force rsi 16 byte align */
51 pcmpeqb (%rsi), %xmm0 /* check 16 bytes in src for null */
73 pcmpeqb 16(%rsi), %xmm0 /* check next 16 bytes in src for a null */
89 mov (%rsi, %r9), %rdx
91 mov 8(%rsi, %r9), %rdx
120 lea 16(%r9, %rsi), %rsi
122 and $0xfffffffffffffff0, %rsi /* force rsi 16 byte align */
152 movdqa (%rsi), %xmm1 /* fetch 16 bytes from src string */
154 add $16, %rsi
156 pcmpeqb (%rsi), %xmm0 /* check 16 bytes in src for a null */
[all …]
H A Dstrlen.s41 mov %rdi, %rsi /* keep original %rdi value */
42 mov %rsi, %rcx
53 and $0xfffffffffffffff0, %rsi
55 pcmpeqb (%rsi), %xmm0
56 lea 16(%rdi), %rsi
62 sub %rcx, %rsi /* no null, adjust to next 16-byte boundary */
67 pcmpeqb (%rsi), %xmm0 /* look for null bytes */
70 add $16, %rsi /* prepare to search next 16 bytes */
74 pcmpeqb (%rsi), %xmm0
76 add $16, %rsi
[all …]
H A Dproc64_support.s72 # rdi = cpuid function, rsi = out_reg addr, rdx = cache index(fn 4)
77 mov %eax,(%rsi)
78 mov %ebx,0x4(%rsi)
79 mov %ecx,0x8(%rsi)
80 mov %edx,0xc(%rsi)
100 # rdi = l1_cache_size, rsi = l2_cache_size, rdx = largest_level_cache
106 mov %rsi,.amd64cache2(%rip)
107 shr $1, %rsi
108 mov %rsi,.amd64cache2half(%rip)
H A Dwsncmp.s41 cmpq %rdi,%rsi / same string?
48 cmpl (%rsi),%eax
56 cmpl 4(%rsi),%eax
64 cmpl 8(%rsi),%eax
72 cmpl 12(%rsi),%eax
75 addq $16,%rsi
85 addq $4,%rsi
87 addq $4,%rsi
89 addq $4,%rsi
91 subl (%rsi),%eax / return value is (*s1 - *--s2)
H A Dstrcat.s91 testq $7, %rsi / if %rsi not quadword aligned
95 movq (%rsi), %rdx / move 1 quadword from (%rsi) to %rdx
98 addq $8, %rsi / next quadword
108 subq $8, %rsi / post-incremented
111 movb (%rsi), %dl / %dl = a byte in (%rsi)
115 incq %rsi / next byte
117 testq $7, %rsi / if %rsi not word aligned
119 jmp .L5 / goto .L5 (%rsi word aligned)
H A Dstrcmp.s73 movlpd (%rsi), %xmm2
75 movhpd 8(%rsi), %xmm2
87 add $16, %rsi /* prepare to search next 16 bytes */
97 and $0xfffffffffffffff0, %rsi /* force %rsi to be 16 byte aligned */
108 xchg %rsi, %rdi
123 movdqa (%rsi), %xmm1
148 movdqa (%rsi, %rcx), %xmm1
163 movdqa (%rsi, %rcx), %xmm1
187 movdqa (%rsi), %xmm1
219 movdqa (%rsi, %rcx), %xmm1
[all …]
H A Dstrncat.s96 testq $7, %rsi / if %rsi not quadword aligned
102 movq (%rsi), %r11 / move 1 quadword from (%rsi) to %r11
105 addq $8, %rsi / next quadword
118 subq $8, %rsi / post-incremented
124 movb (%rsi), %r11b / %r11b = a byte in (%rsi)
127 incq %rsi / next byte
135 / %rsi not aligned
138 movb (%rsi), %r11b / %r11b = a byte in (%rsi)
142 incq %rsi / next byte
/titanic_41/usr/src/lib/libm/common/m9x/
H A D__fenv_amd64.il133 cmpeqss (%rsi),%xmm0
139 cmpltss (%rsi),%xmm0
145 cmpless (%rsi),%xmm0
151 cmpunordss (%rsi),%xmm0
157 minss (%rsi),%xmm0
163 maxss (%rsi),%xmm0
169 addss (%rsi),%xmm0
175 subss (%rsi),%xmm0
181 mulss (%rsi),%xmm0
187 divss (%rsi),%xmm0
[all …]
/titanic_41/usr/src/lib/libsaveargs/tests/testmatch/
H A Ddata.s51 movq %rsi, -0x10(%rbp)
62 movq %rsi,-0x10(%rbp)
74 movq %rsi,-0x10(%rbp)
85 movq %rsi,-0x8(%rbp)
94 movq %rsi,-0x8(%rbp)
108 movq %rsi,-0x8(%rbp)
118 movq %rsi,-0x10(%rbp)
131 movq %rsi,-0x10(%rbp)
144 movq %rsi,-0x10(%rbp)
153 pushq %rsi
[all …]
/titanic_41/usr/src/uts/intel/ia32/ml/
H A Dcopy.s99 cmpq postbootkernelbase(%rip), %rsi /* %rsi = to */
229 cmpq postbootkernelbase(%rip), %rsi /* %rsi = to */
255 orq %rsi, %r10
269 COPY_LOOP_INIT(%rdi, %rsi, %rdx)
270 2: COPY_LOOP_BODY(%rdi, %rsi, %rdx)
371 cmpq postbootkernelbase(%rip), %rsi /* %rsi = to */
395 addq %rdx, %rsi
495 mov %rcx, -0x48(%rsi)
498 mov %r10, -0x40(%rsi)
501 mov %r8, -0x38(%rsi)
[all …]
H A Dsseblk.s132 cmpq $BLOCKSIZE, %rsi /* size must be at least BLOCKSIZE */
134 testq $BLOCKMASK, %rsi /* .. and be a multiple of BLOCKSIZE */
136 shrq $BLOCKSHIFT, %rsi
146 9: ZERO_LOOP_BODY_XMM(%rdi, %rsi)
321 4: COPY_LOOP_BODY_XMM(%rdi, %rsi, %ecx)
323 COPY_LOOP_FINI_XMM(%rsi)
401 addq %rsi, %rdi
402 negq %rsi
404 movnti %rax, (%rdi, %rsi)
405 movnti %rax, 8(%rdi, %rsi)
[all …]
H A Dddi_i86_asm.s282 movq %rsi, %rdx
289 movzbq (%rsi), %rax
344 movq %rsi, %rdx
351 movzwq (%rsi), %rax
406 movq %rsi, %rdx
412 movl (%rsi), %eax
494 movq %rsi, %rdx
500 movb %dl, (%rsi)
557 movq %rsi, %rdx
563 movw %dx, (%rsi)
[all …]
H A Dhypersubr.s197 movq %rsi, %rdi /* arg 1 */
205 movq %rsi, %rdi /* arg 1 */
206 movq %rdx, %rsi /* arg 2 */
214 movq %rsi, %rdi /* arg 1 */
215 movq %rdx, %rsi /* arg 2 */
224 movq %rsi, %rdi /* arg 1 */
225 movq %rdx, %rsi /* arg 2 */
235 movq %rsi, %rdi /* arg 1 */
236 movq %rdx, %rsi /* arg 2 */
/titanic_41/usr/src/lib/libmvec/amd64/src/
H A D__vsqrtf.S39 / %rsi = x
65 movups (%rsi),%xmm0
66 addq %r9,%rsi
79 movss (%rsi),%xmm0
80 addq %rdx,%rsi
93 movss (%rsi),%xmm0
94 addq %rdx,%rsi
95 movss (%rsi),%xmm1
96 addq %rdx,%rsi
97 movss (%rsi),%xmm2
[all …]
/titanic_41/usr/src/cmd/mdb/intel/amd64/kmdb/
H A Dkaif_invoke.s76 cmpq $6, %rsi
87 movq %rsi, %r12
93 1: decq %rsi
94 movq (%rdx, %rsi, 8), %r9
95 movq %r9, (%rsp, %rsi, 8)
96 cmpq $6, %rsi
109 shlq $3, %rsi
110 addq %rsi, %r9
117 cp2arg: movq 0x08(%rdi), %rsi
H A Dkmdb_asmutil.s45 movq %rsi, %rax
77 movl %eax, (%rsi)
78 movl %edx, 4(%rsi)
93 movl (%rsi), %eax
94 movl 4(%rsi), %edx
132 cmpq $4, %rsi
134 cmpq $2, %rsi
151 cmpq $4, %rsi
153 cmpq $2, %rsi
/titanic_41/usr/src/cmd/sgs/rtld/amd64/
H A Dboot.s107 movq %rsp,%rsi
109 movq $EB_ARGV,0(%rsi)
113 movq %rax,8(%rsi)
115 movq $EB_ENVP,16(%rsi)
123 movq %rdi,24(%rsi)
130 movq $EB_AUXV,32(%rsi)
132 movq %rdi,40(%rsi)
134 movq $EB_NULL,48(%rsi)
137 movq %rsi, %rdi
145 movq (%rbx),%rsi
H A Dboot_elf.s189 movq %rsi, SPRSIOFF(%rsp)
238 leaq SPLAREGOFF(%rbp), %rsi / %rsi = &La_amd64_regs
240 movq %rdi, 0(%rsi) / la_rsp
242 movq %rdi, 8(%rsi) / la_rbp
244 movq %rdi, 16(%rsi) / la_rdi
246 movq %rdi, 24(%rsi) / la_rsi
248 movq %rdi, 32(%rsi) / la_rdx
250 movq %rdi, 40(%rsi) / la_rcx
252 movq %rdi, 48(%rsi) / la_r8
254 movq %rdi, 56(%rsi) / la_r9
[all …]
/titanic_41/usr/src/common/crypto/md5/amd64/
H A Dmd5_amd64.pl50 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
60 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
77 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
88 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
106 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
113 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
131 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
143 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
185 lea (%rsi,%rdx), %rdi / rdi = end
198 cmp %rdi, %rsi / cmp end with ptr
[all …]
/titanic_41/usr/src/common/crypto/aes/amd64/
H A Daes_amd64.s339 xor tab_0(%rsi), p1; \
343 xor tab_2(%rsi), p3; \
349 xor tab_0(%rsi), p2; \
353 xor tab_2(%rsi), p4; \
359 xor tab_0(%rsi), p3; \
363 xor tab_2(%rsi), p1; \
369 xor tab_0(%rsi), p4; \
373 xor tab_2(%rsi), p2; \
393 xor tab_0(%rsi), p1; \
397 xor tab_2(%rsi), p3; \
[all …]
/titanic_41/usr/src/common/bignum/amd64/
H A Dbignum_amd64_asm.s248 movq 0(%rsi), %rax / rax = a[0]
250 movq 8(%rsi), %r11 / prefetch a[1]
261 movq 16(%rsi), %r11 / prefetch a[2]
272 movq 24(%rsi), %r11 / prefetch a[3]
283 movq 32(%rsi), %r11 / prefetch a[4]
294 movq 40(%rsi), %r11 / prefetch a[5]
305 movq 48(%rsi), %r11 / prefetch a[6]
316 movq 56(%rsi), %r11 / prefetch a[7]
335 addq $64, %rsi
343 movq 0(%rsi), %rax
[all …]
/titanic_41/usr/src/uts/i86xpv/ml/
H A Damd64.il38 movq %rsi, %rdi / arg 1
44 movq %rsi, %rdi / arg 1
45 movq %rdx, %rsi / arg 2
51 movq %rsi, %rdi / arg 1
52 movq %rdx, %rsi / arg 2
60 movq %rsi, %rdi / arg 1
61 movq %rdx, %rsi / arg 2
69 movq %rsi, %rdi / arg 1
70 movq %rdx, %rsi / arg 2
/titanic_41/usr/src/lib/libc/amd64/sys/
H A D__clock_gettime_sys.s45 pushq %rsi /* preserve timespec_t ptr */
47 popq %rsi
48 movq %rax, (%rsi)
49 movq %rdx, 8(%rsi)

1234