1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25 #ifndef _INLINE_RELOC_H
26 #define _INLINE_RELOC_H
27
28 #include <sys/types.h>
29 #include <rtld.h>
30 #include <debug.h>
31
32 /*
33 * Generic relative relocation function.
34 */
35 inline static ulong_t
36 /* LINTED */
37 /* ARGSUSED4 */
_elf_reloc_relative(ulong_t rbgn,ulong_t base,Rt_map * lmp,APlist ** textrel,int add)38 _elf_reloc_relative(ulong_t rbgn, ulong_t base, Rt_map *lmp, APlist **textrel,
39 int add)
40 {
41 mmapobj_result_t *mpp;
42 ulong_t roffset;
43
44 roffset = ((M_RELOC *)rbgn)->r_offset;
45 roffset += base;
46
47 /*
48 * If this relocation is against an address that is not associated with
49 * a mapped segment, fall back to the generic relocation loop to
50 * collect the associated error.
51 */
52 if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL)
53 return (0);
54
55 /*
56 * If this relocation is against a segment that does not provide write
57 * access, set the write permission for all non-writable mappings.
58 */
59 if (((mpp->mr_prot & PROT_WRITE) == 0) && textrel &&
60 ((set_prot(lmp, mpp, 1) == 0) ||
61 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
62 return (0);
63
64 /*
65 * Perform a base address update. This simple operation is required
66 * for updating .plt relocations in preparation for lazy binding.
67 */
68 #if defined(__x86)
69 if (add) {
70 *((ulong_t *)roffset) += base;
71 return (1);
72 }
73 #endif
74 /*
75 * Perform the actual relocation. Note, for backward compatibility,
76 * SPARC relocations are added to the offset contents (there was a time
77 * when the offset was used to contain the addend, rather than using
78 * the addend itself).
79 */
80 #if defined(__sparc)
81 *((ulong_t *)roffset) += base + ((M_RELOC *)rbgn)->r_addend;
82 #elif defined(__amd64)
83 *((ulong_t *)roffset) = base + ((M_RELOC *)rbgn)->r_addend;
84 #else
85 *((ulong_t *)roffset) += base;
86 #endif
87 return (1);
88 }
89
90 /*
91 * When a generic relocation loop realizes that it's dealing with relative
92 * relocations, but no DT_RELCOUNT .dynamic tag is present, this tighter loop
93 * is entered as an optimization.
94 */
95 inline static ulong_t
96 /* LINTED */
elf_reloc_relative(ulong_t rbgn,ulong_t rend,ulong_t rsize,ulong_t base,Rt_map * lmp,APlist ** textrel,int add)97 elf_reloc_relative(ulong_t rbgn, ulong_t rend, ulong_t rsize, ulong_t base,
98 Rt_map *lmp, APlist **textrel, int add)
99 {
100 uchar_t rtype;
101
102 do {
103 if (_elf_reloc_relative(rbgn, base, lmp, textrel, add) == 0)
104 break;
105
106 rbgn += rsize;
107 if (rbgn >= rend)
108 break;
109
110 /*
111 * Make sure the next type is a relative relocation.
112 */
113 rtype = ELF_R_TYPE(((M_RELOC *)rbgn)->r_info, M_MACH);
114
115 } while (rtype == M_R_RELATIVE);
116
117 return (rbgn);
118 }
119
120 /*
121 * This is the tightest loop for RELATIVE relocations for those objects built
122 * with the DT_RELACOUNT .dynamic entry.
123 */
124 inline static ulong_t
125 /* LINTED */
elf_reloc_relative_count(ulong_t rbgn,ulong_t rcount,ulong_t rsize,ulong_t base,Rt_map * lmp,APlist ** textrel,int add)126 elf_reloc_relative_count(ulong_t rbgn, ulong_t rcount, ulong_t rsize,
127 ulong_t base, Rt_map *lmp, APlist **textrel, int add)
128 {
129 for (; rcount; rcount--) {
130 if (_elf_reloc_relative(rbgn, base, lmp, textrel, add) == 0)
131 break;
132
133 rbgn += rsize;
134 }
135 return (rbgn);
136 }
137
138 /*
139 * Determine, from a symbols Syminfo information, whether a symbol reference
140 * is deferred. This routine is called from elf_reloc() as part of processing
141 * an objects relocations.
142 */
143 inline static int
144 /* LINTED */
is_sym_deferred(ulong_t rbgn,ulong_t base,Rt_map * lmp,APlist ** textrel,Syminfo * sip,ulong_t sndx)145 is_sym_deferred(ulong_t rbgn, ulong_t base, Rt_map *lmp, APlist **textrel,
146 Syminfo *sip, ulong_t sndx)
147 {
148 Syminfo *sipe;
149
150 /*
151 * ldd(1) by default, sets LD_DEFERRED to force deferred dependency
152 * processing. ldd -D disables LD_DEFERRED, which allows ld.so.1's
153 * default action of skipping deferred dependencies.
154 */
155 if (rtld_flags & RT_FL_DEFERRED)
156 return (0);
157
158 /* LINTED */
159 sipe = (Syminfo *)((char *)sip + (sndx * SYMINENT(lmp)));
160 if (sipe->si_flags & SYMINFO_FLG_DEFERRED) {
161 /*
162 * This .plt relocation should be skipped at this time, as
163 * deferred references are only processed when the associated
164 * function is explicitly called.
165 *
166 * On i386 and amd64 platforms the relocation offset needs
167 * adjusting to add this objects base address. If the object
168 * has already been relocated without RTLD_NOW, then this
169 * update will have already been carried out. However, if this
170 * is an initial RTLD_NOW relocation pass, this relocation
171 * offset needs updating now.
172 */
173 #if defined(__x86)
174 if ((FLAGS(lmp) & FLG_RT_RELOCED) == 0)
175 (void) _elf_reloc_relative(rbgn, base, lmp, textrel, 1);
176 #endif
177 return (1);
178 }
179 return (0);
180 }
181
182 #endif /* _INLINE_RELOC_H */
183