xref: /freebsd/contrib/llvm-project/compiler-rt/lib/BlocksRuntime/runtime.c (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1*0b57cec5SDimitry Andric /*
2*0b57cec5SDimitry Andric  * runtime.c
3*0b57cec5SDimitry Andric  *
4*0b57cec5SDimitry Andric  * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
5*0b57cec5SDimitry Andric  * to any person obtaining a copy of this software and associated documentation
6*0b57cec5SDimitry Andric  * files (the "Software"), to deal in the Software without restriction,
7*0b57cec5SDimitry Andric  * including without limitation the rights to use, copy, modify, merge, publish,
8*0b57cec5SDimitry Andric  * distribute, sublicense, and/or sell copies of the Software, and to permit
9*0b57cec5SDimitry Andric  * persons to whom the Software is furnished to do so, subject to the following
10*0b57cec5SDimitry Andric  * conditions:
11*0b57cec5SDimitry Andric  *
12*0b57cec5SDimitry Andric  * The above copyright notice and this permission notice shall be included in
13*0b57cec5SDimitry Andric  * all copies or substantial portions of the Software.
14*0b57cec5SDimitry Andric  *
15*0b57cec5SDimitry Andric  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*0b57cec5SDimitry Andric  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*0b57cec5SDimitry Andric  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18*0b57cec5SDimitry Andric  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*0b57cec5SDimitry Andric  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*0b57cec5SDimitry Andric  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*0b57cec5SDimitry Andric  * SOFTWARE.
22*0b57cec5SDimitry Andric  *
23*0b57cec5SDimitry Andric  */
24*0b57cec5SDimitry Andric 
25*0b57cec5SDimitry Andric #include "Block_private.h"
26*0b57cec5SDimitry Andric #include <stdio.h>
27*0b57cec5SDimitry Andric #include <stdlib.h>
28*0b57cec5SDimitry Andric #include <string.h>
29*0b57cec5SDimitry Andric #include <stdint.h>
30*0b57cec5SDimitry Andric 
31*0b57cec5SDimitry Andric #include "config.h"
32*0b57cec5SDimitry Andric 
33*0b57cec5SDimitry Andric #ifdef HAVE_AVAILABILITY_MACROS_H
34*0b57cec5SDimitry Andric #include <AvailabilityMacros.h>
35*0b57cec5SDimitry Andric #endif /* HAVE_AVAILABILITY_MACROS_H */
36*0b57cec5SDimitry Andric 
37*0b57cec5SDimitry Andric #ifdef HAVE_TARGET_CONDITIONALS_H
38*0b57cec5SDimitry Andric #include <TargetConditionals.h>
39*0b57cec5SDimitry Andric #endif /* HAVE_TARGET_CONDITIONALS_H */
40*0b57cec5SDimitry Andric 
41*0b57cec5SDimitry Andric #if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
42*0b57cec5SDimitry Andric 
43*0b57cec5SDimitry Andric #ifdef HAVE_LIBKERN_OSATOMIC_H
44*0b57cec5SDimitry Andric #include <libkern/OSAtomic.h>
45*0b57cec5SDimitry Andric #endif /* HAVE_LIBKERN_OSATOMIC_H */
46*0b57cec5SDimitry Andric 
47*0b57cec5SDimitry Andric #elif defined(__WIN32__) || defined(_WIN32)
48*0b57cec5SDimitry Andric #define _CRT_SECURE_NO_WARNINGS 1
49*0b57cec5SDimitry Andric #include <windows.h>
50*0b57cec5SDimitry Andric 
OSAtomicCompareAndSwapLong(long oldl,long newl,long volatile * dst)51*0b57cec5SDimitry Andric static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
52*0b57cec5SDimitry Andric     /* fixme barrier is overkill -- see objc-os.h */
53*0b57cec5SDimitry Andric     long original = InterlockedCompareExchange(dst, newl, oldl);
54*0b57cec5SDimitry Andric     return (original == oldl);
55*0b57cec5SDimitry Andric }
56*0b57cec5SDimitry Andric 
OSAtomicCompareAndSwapInt(int oldi,int newi,int volatile * dst)57*0b57cec5SDimitry Andric static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
58*0b57cec5SDimitry Andric     /* fixme barrier is overkill -- see objc-os.h */
59*0b57cec5SDimitry Andric     int original = InterlockedCompareExchange(dst, newi, oldi);
60*0b57cec5SDimitry Andric     return (original == oldi);
61*0b57cec5SDimitry Andric }
62*0b57cec5SDimitry Andric 
63*0b57cec5SDimitry Andric /*
64*0b57cec5SDimitry Andric  * Check to see if the GCC atomic built-ins are available.  If we're on
65*0b57cec5SDimitry Andric  * a 64-bit system, make sure we have an 8-byte atomic function
66*0b57cec5SDimitry Andric  * available.
67*0b57cec5SDimitry Andric  *
68*0b57cec5SDimitry Andric  */
69*0b57cec5SDimitry Andric 
70*0b57cec5SDimitry Andric #elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
71*0b57cec5SDimitry Andric 
OSAtomicCompareAndSwapLong(long oldl,long newl,long volatile * dst)72*0b57cec5SDimitry Andric static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
73*0b57cec5SDimitry Andric   return __sync_bool_compare_and_swap(dst, oldl, newl);
74*0b57cec5SDimitry Andric }
75*0b57cec5SDimitry Andric 
OSAtomicCompareAndSwapInt(int oldi,int newi,int volatile * dst)76*0b57cec5SDimitry Andric static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
77*0b57cec5SDimitry Andric   return __sync_bool_compare_and_swap(dst, oldi, newi);
78*0b57cec5SDimitry Andric }
79*0b57cec5SDimitry Andric 
80*0b57cec5SDimitry Andric #else
81*0b57cec5SDimitry Andric #error unknown atomic compare-and-swap primitive
82*0b57cec5SDimitry Andric #endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
83*0b57cec5SDimitry Andric 
84*0b57cec5SDimitry Andric 
85*0b57cec5SDimitry Andric /*
86*0b57cec5SDimitry Andric  * Globals:
87*0b57cec5SDimitry Andric  */
88*0b57cec5SDimitry Andric 
89*0b57cec5SDimitry Andric static void *_Block_copy_class = _NSConcreteMallocBlock;
90*0b57cec5SDimitry Andric static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
91*0b57cec5SDimitry Andric static int _Block_copy_flag = BLOCK_NEEDS_FREE;
92*0b57cec5SDimitry Andric static int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
93*0b57cec5SDimitry Andric 
94*0b57cec5SDimitry Andric static const int WANTS_ONE = (1 << 16);
95*0b57cec5SDimitry Andric 
96*0b57cec5SDimitry Andric static bool isGC = false;
97*0b57cec5SDimitry Andric 
98*0b57cec5SDimitry Andric /*
99*0b57cec5SDimitry Andric  * Internal Utilities:
100*0b57cec5SDimitry Andric  */
101*0b57cec5SDimitry Andric 
102*0b57cec5SDimitry Andric #if 0
103*0b57cec5SDimitry Andric static unsigned long int latching_incr_long(unsigned long int *where) {
104*0b57cec5SDimitry Andric     while (1) {
105*0b57cec5SDimitry Andric         unsigned long int old_value = *(volatile unsigned long int *)where;
106*0b57cec5SDimitry Andric         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
107*0b57cec5SDimitry Andric             return BLOCK_REFCOUNT_MASK;
108*0b57cec5SDimitry Andric         }
109*0b57cec5SDimitry Andric         if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
110*0b57cec5SDimitry Andric             return old_value+1;
111*0b57cec5SDimitry Andric         }
112*0b57cec5SDimitry Andric     }
113*0b57cec5SDimitry Andric }
114*0b57cec5SDimitry Andric #endif /* if 0 */
115*0b57cec5SDimitry Andric 
latching_incr_int(int * where)116*0b57cec5SDimitry Andric static int latching_incr_int(int *where) {
117*0b57cec5SDimitry Andric     while (1) {
118*0b57cec5SDimitry Andric         int old_value = *(volatile int *)where;
119*0b57cec5SDimitry Andric         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
120*0b57cec5SDimitry Andric             return BLOCK_REFCOUNT_MASK;
121*0b57cec5SDimitry Andric         }
122*0b57cec5SDimitry Andric         if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
123*0b57cec5SDimitry Andric             return old_value+1;
124*0b57cec5SDimitry Andric         }
125*0b57cec5SDimitry Andric     }
126*0b57cec5SDimitry Andric }
127*0b57cec5SDimitry Andric 
128*0b57cec5SDimitry Andric #if 0
129*0b57cec5SDimitry Andric static int latching_decr_long(unsigned long int *where) {
130*0b57cec5SDimitry Andric     while (1) {
131*0b57cec5SDimitry Andric         unsigned long int old_value = *(volatile int *)where;
132*0b57cec5SDimitry Andric         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
133*0b57cec5SDimitry Andric             return BLOCK_REFCOUNT_MASK;
134*0b57cec5SDimitry Andric         }
135*0b57cec5SDimitry Andric         if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
136*0b57cec5SDimitry Andric             return 0;
137*0b57cec5SDimitry Andric         }
138*0b57cec5SDimitry Andric         if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
139*0b57cec5SDimitry Andric             return old_value-1;
140*0b57cec5SDimitry Andric         }
141*0b57cec5SDimitry Andric     }
142*0b57cec5SDimitry Andric }
143*0b57cec5SDimitry Andric #endif /* if 0 */
144*0b57cec5SDimitry Andric 
latching_decr_int(int * where)145*0b57cec5SDimitry Andric static int latching_decr_int(int *where) {
146*0b57cec5SDimitry Andric     while (1) {
147*0b57cec5SDimitry Andric         int old_value = *(volatile int *)where;
148*0b57cec5SDimitry Andric         if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
149*0b57cec5SDimitry Andric             return BLOCK_REFCOUNT_MASK;
150*0b57cec5SDimitry Andric         }
151*0b57cec5SDimitry Andric         if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
152*0b57cec5SDimitry Andric             return 0;
153*0b57cec5SDimitry Andric         }
154*0b57cec5SDimitry Andric         if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
155*0b57cec5SDimitry Andric             return old_value-1;
156*0b57cec5SDimitry Andric         }
157*0b57cec5SDimitry Andric     }
158*0b57cec5SDimitry Andric }
159*0b57cec5SDimitry Andric 
160*0b57cec5SDimitry Andric 
161*0b57cec5SDimitry Andric /*
162*0b57cec5SDimitry Andric  * GC support stub routines:
163*0b57cec5SDimitry Andric  */
164*0b57cec5SDimitry Andric #if 0
165*0b57cec5SDimitry Andric #pragma mark GC Support Routines
166*0b57cec5SDimitry Andric #endif /* if 0 */
167*0b57cec5SDimitry Andric 
168*0b57cec5SDimitry Andric 
_Block_alloc_default(const unsigned long size,const bool initialCountIsOne,const bool isObject)169*0b57cec5SDimitry Andric static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
170*0b57cec5SDimitry Andric     return malloc(size);
171*0b57cec5SDimitry Andric }
172*0b57cec5SDimitry Andric 
_Block_assign_default(void * value,void ** destptr)173*0b57cec5SDimitry Andric static void _Block_assign_default(void *value, void **destptr) {
174*0b57cec5SDimitry Andric     *destptr = value;
175*0b57cec5SDimitry Andric }
176*0b57cec5SDimitry Andric 
_Block_setHasRefcount_default(const void * ptr,const bool hasRefcount)177*0b57cec5SDimitry Andric static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
178*0b57cec5SDimitry Andric }
179*0b57cec5SDimitry Andric 
_Block_do_nothing(const void * aBlock)180*0b57cec5SDimitry Andric static void _Block_do_nothing(const void *aBlock) { }
181*0b57cec5SDimitry Andric 
_Block_retain_object_default(const void * ptr)182*0b57cec5SDimitry Andric static void _Block_retain_object_default(const void *ptr) {
183*0b57cec5SDimitry Andric     if (!ptr) return;
184*0b57cec5SDimitry Andric }
185*0b57cec5SDimitry Andric 
_Block_release_object_default(const void * ptr)186*0b57cec5SDimitry Andric static void _Block_release_object_default(const void *ptr) {
187*0b57cec5SDimitry Andric     if (!ptr) return;
188*0b57cec5SDimitry Andric }
189*0b57cec5SDimitry Andric 
_Block_assign_weak_default(const void * ptr,void * dest)190*0b57cec5SDimitry Andric static void _Block_assign_weak_default(const void *ptr, void *dest) {
191*0b57cec5SDimitry Andric     *(void **)dest = (void *)ptr;
192*0b57cec5SDimitry Andric }
193*0b57cec5SDimitry Andric 
_Block_memmove_default(void * dst,void * src,unsigned long size)194*0b57cec5SDimitry Andric static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
195*0b57cec5SDimitry Andric     memmove(dst, src, (size_t)size);
196*0b57cec5SDimitry Andric }
197*0b57cec5SDimitry Andric 
_Block_memmove_gc_broken(void * dest,void * src,unsigned long size)198*0b57cec5SDimitry Andric static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
199*0b57cec5SDimitry Andric     void **destp = (void **)dest;
200*0b57cec5SDimitry Andric     void **srcp = (void **)src;
201*0b57cec5SDimitry Andric     while (size) {
202*0b57cec5SDimitry Andric         _Block_assign_default(*srcp, destp);
203*0b57cec5SDimitry Andric         destp++;
204*0b57cec5SDimitry Andric         srcp++;
205*0b57cec5SDimitry Andric         size -= sizeof(void *);
206*0b57cec5SDimitry Andric     }
207*0b57cec5SDimitry Andric }
208*0b57cec5SDimitry Andric 
209*0b57cec5SDimitry Andric /*
210*0b57cec5SDimitry Andric  * GC support callout functions - initially set to stub routines:
211*0b57cec5SDimitry Andric  */
212*0b57cec5SDimitry Andric 
213*0b57cec5SDimitry Andric static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
214*0b57cec5SDimitry Andric static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
215*0b57cec5SDimitry Andric static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
216*0b57cec5SDimitry Andric static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
217*0b57cec5SDimitry Andric static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
218*0b57cec5SDimitry Andric static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
219*0b57cec5SDimitry Andric static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
220*0b57cec5SDimitry Andric static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
221*0b57cec5SDimitry Andric 
222*0b57cec5SDimitry Andric 
223*0b57cec5SDimitry Andric /*
224*0b57cec5SDimitry Andric  * GC support SPI functions - called from ObjC runtime and CoreFoundation:
225*0b57cec5SDimitry Andric  */
226*0b57cec5SDimitry Andric 
227*0b57cec5SDimitry Andric /* Public SPI
228*0b57cec5SDimitry Andric  * Called from objc-auto to turn on GC.
229*0b57cec5SDimitry Andric  * version 3, 4 arg, but changed 1st arg
230*0b57cec5SDimitry Andric  */
_Block_use_GC(void * (* alloc)(const unsigned long,const bool isOne,const bool isObject),void (* setHasRefcount)(const void *,const bool),void (* gc_assign)(void *,void **),void (* gc_assign_weak)(const void *,void *),void (* gc_memmove)(void *,void *,unsigned long))231*0b57cec5SDimitry Andric void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
232*0b57cec5SDimitry Andric                     void (*setHasRefcount)(const void *, const bool),
233*0b57cec5SDimitry Andric                     void (*gc_assign)(void *, void **),
234*0b57cec5SDimitry Andric                     void (*gc_assign_weak)(const void *, void *),
235*0b57cec5SDimitry Andric                     void (*gc_memmove)(void *, void *, unsigned long)) {
236*0b57cec5SDimitry Andric 
237*0b57cec5SDimitry Andric     isGC = true;
238*0b57cec5SDimitry Andric     _Block_allocator = alloc;
239*0b57cec5SDimitry Andric     _Block_deallocator = _Block_do_nothing;
240*0b57cec5SDimitry Andric     _Block_assign = gc_assign;
241*0b57cec5SDimitry Andric     _Block_copy_flag = BLOCK_IS_GC;
242*0b57cec5SDimitry Andric     _Block_copy_class = _NSConcreteAutoBlock;
243*0b57cec5SDimitry Andric     /* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
244*0b57cec5SDimitry Andric     _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
245*0b57cec5SDimitry Andric     _Block_setHasRefcount = setHasRefcount;
246*0b57cec5SDimitry Andric     _Byref_flag_initial_value = BLOCK_IS_GC;   // no refcount
247*0b57cec5SDimitry Andric     _Block_retain_object = _Block_do_nothing;
248*0b57cec5SDimitry Andric     _Block_release_object = _Block_do_nothing;
249*0b57cec5SDimitry Andric     _Block_assign_weak = gc_assign_weak;
250*0b57cec5SDimitry Andric     _Block_memmove = gc_memmove;
251*0b57cec5SDimitry Andric }
252*0b57cec5SDimitry Andric 
253*0b57cec5SDimitry Andric /* transitional */
_Block_use_GC5(void * (* alloc)(const unsigned long,const bool isOne,const bool isObject),void (* setHasRefcount)(const void *,const bool),void (* gc_assign)(void *,void **),void (* gc_assign_weak)(const void *,void *))254*0b57cec5SDimitry Andric void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
255*0b57cec5SDimitry Andric                     void (*setHasRefcount)(const void *, const bool),
256*0b57cec5SDimitry Andric                     void (*gc_assign)(void *, void **),
257*0b57cec5SDimitry Andric                     void (*gc_assign_weak)(const void *, void *)) {
258*0b57cec5SDimitry Andric     /* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
259*0b57cec5SDimitry Andric     _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
260*0b57cec5SDimitry Andric }
261*0b57cec5SDimitry Andric 
262*0b57cec5SDimitry Andric 
263*0b57cec5SDimitry Andric /*
264*0b57cec5SDimitry Andric  * Called from objc-auto to alternatively turn on retain/release.
265*0b57cec5SDimitry Andric  * Prior to this the only "object" support we can provide is for those
266*0b57cec5SDimitry Andric  * super special objects that live in libSystem, namely dispatch queues.
267*0b57cec5SDimitry Andric  * Blocks and Block_byrefs have their own special entry points.
268*0b57cec5SDimitry Andric  *
269*0b57cec5SDimitry Andric  */
_Block_use_RR(void (* retain)(const void *),void (* release)(const void *))270*0b57cec5SDimitry Andric void _Block_use_RR( void (*retain)(const void *),
271*0b57cec5SDimitry Andric                     void (*release)(const void *)) {
272*0b57cec5SDimitry Andric     _Block_retain_object = retain;
273*0b57cec5SDimitry Andric     _Block_release_object = release;
274*0b57cec5SDimitry Andric }
275*0b57cec5SDimitry Andric 
276*0b57cec5SDimitry Andric /*
277*0b57cec5SDimitry Andric  * Internal Support routines for copying:
278*0b57cec5SDimitry Andric  */
279*0b57cec5SDimitry Andric 
280*0b57cec5SDimitry Andric #if 0
281*0b57cec5SDimitry Andric #pragma mark Copy/Release support
282*0b57cec5SDimitry Andric #endif /* if 0 */
283*0b57cec5SDimitry Andric 
284*0b57cec5SDimitry Andric /* Copy, or bump refcount, of a block.  If really copying, call the copy helper if present. */
_Block_copy_internal(const void * arg,const int flags)285*0b57cec5SDimitry Andric static void *_Block_copy_internal(const void *arg, const int flags) {
286*0b57cec5SDimitry Andric     struct Block_layout *aBlock;
287*0b57cec5SDimitry Andric     const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
288*0b57cec5SDimitry Andric 
289*0b57cec5SDimitry Andric     //printf("_Block_copy_internal(%p, %x)\n", arg, flags);
290*0b57cec5SDimitry Andric     if (!arg) return NULL;
291*0b57cec5SDimitry Andric 
292*0b57cec5SDimitry Andric 
293*0b57cec5SDimitry Andric     // The following would be better done as a switch statement
294*0b57cec5SDimitry Andric     aBlock = (struct Block_layout *)arg;
295*0b57cec5SDimitry Andric     if (aBlock->flags & BLOCK_NEEDS_FREE) {
296*0b57cec5SDimitry Andric         // latches on high
297*0b57cec5SDimitry Andric         latching_incr_int(&aBlock->flags);
298*0b57cec5SDimitry Andric         return aBlock;
299*0b57cec5SDimitry Andric     }
300*0b57cec5SDimitry Andric     else if (aBlock->flags & BLOCK_IS_GC) {
301*0b57cec5SDimitry Andric         // GC refcounting is expensive so do most refcounting here.
302*0b57cec5SDimitry Andric         if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
303*0b57cec5SDimitry Andric             // Tell collector to hang on this - it will bump the GC refcount version
304*0b57cec5SDimitry Andric             _Block_setHasRefcount(aBlock, true);
305*0b57cec5SDimitry Andric         }
306*0b57cec5SDimitry Andric         return aBlock;
307*0b57cec5SDimitry Andric     }
308*0b57cec5SDimitry Andric     else if (aBlock->flags & BLOCK_IS_GLOBAL) {
309*0b57cec5SDimitry Andric         return aBlock;
310*0b57cec5SDimitry Andric     }
311*0b57cec5SDimitry Andric 
312*0b57cec5SDimitry Andric     // Its a stack block.  Make a copy.
313*0b57cec5SDimitry Andric     if (!isGC) {
314*0b57cec5SDimitry Andric         struct Block_layout *result = malloc(aBlock->descriptor->size);
315*0b57cec5SDimitry Andric         if (!result) return (void *)0;
316*0b57cec5SDimitry Andric         memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
317*0b57cec5SDimitry Andric         // reset refcount
318*0b57cec5SDimitry Andric         result->flags &= ~(BLOCK_REFCOUNT_MASK);    // XXX not needed
319*0b57cec5SDimitry Andric         result->flags |= BLOCK_NEEDS_FREE | 1;
320*0b57cec5SDimitry Andric         result->isa = _NSConcreteMallocBlock;
321*0b57cec5SDimitry Andric         if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
322*0b57cec5SDimitry Andric             //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
323*0b57cec5SDimitry Andric             (*aBlock->descriptor->copy)(result, aBlock); // do fixup
324*0b57cec5SDimitry Andric         }
325*0b57cec5SDimitry Andric         return result;
326*0b57cec5SDimitry Andric     }
327*0b57cec5SDimitry Andric     else {
328*0b57cec5SDimitry Andric         // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
329*0b57cec5SDimitry Andric         // This allows the copy helper routines to make non-refcounted block copies under GC
330*0b57cec5SDimitry Andric         unsigned long int flags = aBlock->flags;
331*0b57cec5SDimitry Andric         bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
332*0b57cec5SDimitry Andric         struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
333*0b57cec5SDimitry Andric         if (!result) return (void *)0;
334*0b57cec5SDimitry Andric         memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
335*0b57cec5SDimitry Andric         // reset refcount
336*0b57cec5SDimitry Andric         // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
337*0b57cec5SDimitry Andric         flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK);   // XXX not needed
338*0b57cec5SDimitry Andric         if (wantsOne)
339*0b57cec5SDimitry Andric             flags |= BLOCK_IS_GC | 1;
340*0b57cec5SDimitry Andric         else
341*0b57cec5SDimitry Andric             flags |= BLOCK_IS_GC;
342*0b57cec5SDimitry Andric         result->flags = flags;
343*0b57cec5SDimitry Andric         if (flags & BLOCK_HAS_COPY_DISPOSE) {
344*0b57cec5SDimitry Andric             //printf("calling block copy helper...\n");
345*0b57cec5SDimitry Andric             (*aBlock->descriptor->copy)(result, aBlock); // do fixup
346*0b57cec5SDimitry Andric         }
347*0b57cec5SDimitry Andric         if (hasCTOR) {
348*0b57cec5SDimitry Andric             result->isa = _NSConcreteFinalizingBlock;
349*0b57cec5SDimitry Andric         }
350*0b57cec5SDimitry Andric         else {
351*0b57cec5SDimitry Andric             result->isa = _NSConcreteAutoBlock;
352*0b57cec5SDimitry Andric         }
353*0b57cec5SDimitry Andric         return result;
354*0b57cec5SDimitry Andric     }
355*0b57cec5SDimitry Andric }
356*0b57cec5SDimitry Andric 
357*0b57cec5SDimitry Andric 
358*0b57cec5SDimitry Andric /*
359*0b57cec5SDimitry Andric  * Runtime entry points for maintaining the sharing knowledge of byref data blocks.
360*0b57cec5SDimitry Andric  *
361*0b57cec5SDimitry Andric  * A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
362*0b57cec5SDimitry Andric  * Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
363*0b57cec5SDimitry Andric  * We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
364*0b57cec5SDimitry Andric  * Otherwise we need to copy it and update the stack forwarding pointer
365*0b57cec5SDimitry Andric  * XXX We need to account for weak/nonretained read-write barriers.
366*0b57cec5SDimitry Andric  */
367*0b57cec5SDimitry Andric 
_Block_byref_assign_copy(void * dest,const void * arg,const int flags)368*0b57cec5SDimitry Andric static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
369*0b57cec5SDimitry Andric     struct Block_byref **destp = (struct Block_byref **)dest;
370*0b57cec5SDimitry Andric     struct Block_byref *src = (struct Block_byref *)arg;
371*0b57cec5SDimitry Andric 
372*0b57cec5SDimitry Andric     //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
373*0b57cec5SDimitry Andric     //printf("src dump: %s\n", _Block_byref_dump(src));
374*0b57cec5SDimitry Andric     if (src->forwarding->flags & BLOCK_IS_GC) {
375*0b57cec5SDimitry Andric         ;   // don't need to do any more work
376*0b57cec5SDimitry Andric     }
377*0b57cec5SDimitry Andric     else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
378*0b57cec5SDimitry Andric         //printf("making copy\n");
379*0b57cec5SDimitry Andric         // src points to stack
380*0b57cec5SDimitry Andric         bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
381*0b57cec5SDimitry Andric         // if its weak ask for an object (only matters under GC)
382*0b57cec5SDimitry Andric         struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
383*0b57cec5SDimitry Andric         copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
384*0b57cec5SDimitry Andric         copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
385*0b57cec5SDimitry Andric         src->forwarding = copy;  // patch stack to point to heap copy
386*0b57cec5SDimitry Andric         copy->size = src->size;
387*0b57cec5SDimitry Andric         if (isWeak) {
388*0b57cec5SDimitry Andric             copy->isa = &_NSConcreteWeakBlockVariable;  // mark isa field so it gets weak scanning
389*0b57cec5SDimitry Andric         }
390*0b57cec5SDimitry Andric         if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
391*0b57cec5SDimitry Andric             // Trust copy helper to copy everything of interest
392*0b57cec5SDimitry Andric             // If more than one field shows up in a byref block this is wrong XXX
393*0b57cec5SDimitry Andric             copy->byref_keep = src->byref_keep;
394*0b57cec5SDimitry Andric             copy->byref_destroy = src->byref_destroy;
395*0b57cec5SDimitry Andric             (*src->byref_keep)(copy, src);
396*0b57cec5SDimitry Andric         }
397*0b57cec5SDimitry Andric         else {
398*0b57cec5SDimitry Andric             // just bits.  Blast 'em using _Block_memmove in case they're __strong
399*0b57cec5SDimitry Andric             _Block_memmove(
400*0b57cec5SDimitry Andric                 (void *)&copy->byref_keep,
401*0b57cec5SDimitry Andric                 (void *)&src->byref_keep,
402*0b57cec5SDimitry Andric                 src->size - sizeof(struct Block_byref_header));
403*0b57cec5SDimitry Andric         }
404*0b57cec5SDimitry Andric     }
405*0b57cec5SDimitry Andric     // already copied to heap
406*0b57cec5SDimitry Andric     else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
407*0b57cec5SDimitry Andric         latching_incr_int(&src->forwarding->flags);
408*0b57cec5SDimitry Andric     }
409*0b57cec5SDimitry Andric     // assign byref data block pointer into new Block
410*0b57cec5SDimitry Andric     _Block_assign(src->forwarding, (void **)destp);
411*0b57cec5SDimitry Andric }
412*0b57cec5SDimitry Andric 
413*0b57cec5SDimitry Andric // Old compiler SPI
_Block_byref_release(const void * arg)414*0b57cec5SDimitry Andric static void _Block_byref_release(const void *arg) {
415*0b57cec5SDimitry Andric     struct Block_byref *shared_struct = (struct Block_byref *)arg;
416*0b57cec5SDimitry Andric     int refcount;
417*0b57cec5SDimitry Andric 
418*0b57cec5SDimitry Andric     // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
419*0b57cec5SDimitry Andric     shared_struct = shared_struct->forwarding;
420*0b57cec5SDimitry Andric 
421*0b57cec5SDimitry Andric     //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
422*0b57cec5SDimitry Andric     // To support C++ destructors under GC we arrange for there to be a finalizer for this
423*0b57cec5SDimitry Andric     // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
424*0b57cec5SDimitry Andric     if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
425*0b57cec5SDimitry Andric         return; // stack or GC or global
426*0b57cec5SDimitry Andric     }
427*0b57cec5SDimitry Andric     refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
428*0b57cec5SDimitry Andric     if (refcount <= 0) {
429*0b57cec5SDimitry Andric         printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
430*0b57cec5SDimitry Andric     }
431*0b57cec5SDimitry Andric     else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
432*0b57cec5SDimitry Andric         //printf("disposing of heap based byref block\n");
433*0b57cec5SDimitry Andric         if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
434*0b57cec5SDimitry Andric             //printf("calling out to helper\n");
435*0b57cec5SDimitry Andric             (*shared_struct->byref_destroy)(shared_struct);
436*0b57cec5SDimitry Andric         }
437*0b57cec5SDimitry Andric         _Block_deallocator((struct Block_layout *)shared_struct);
438*0b57cec5SDimitry Andric     }
439*0b57cec5SDimitry Andric }
440*0b57cec5SDimitry Andric 
441*0b57cec5SDimitry Andric 
442*0b57cec5SDimitry Andric /*
443*0b57cec5SDimitry Andric  *
444*0b57cec5SDimitry Andric  * API supporting SPI
445*0b57cec5SDimitry Andric  * _Block_copy, _Block_release, and (old) _Block_destroy
446*0b57cec5SDimitry Andric  *
447*0b57cec5SDimitry Andric  */
448*0b57cec5SDimitry Andric 
449*0b57cec5SDimitry Andric #if 0
450*0b57cec5SDimitry Andric #pragma mark SPI/API
451*0b57cec5SDimitry Andric #endif /* if 0 */
452*0b57cec5SDimitry Andric 
_Block_copy(const void * arg)453*0b57cec5SDimitry Andric void *_Block_copy(const void *arg) {
454*0b57cec5SDimitry Andric     return _Block_copy_internal(arg, WANTS_ONE);
455*0b57cec5SDimitry Andric }
456*0b57cec5SDimitry Andric 
457*0b57cec5SDimitry Andric 
458*0b57cec5SDimitry Andric // API entry point to release a copied Block
_Block_release(void * arg)459*0b57cec5SDimitry Andric void _Block_release(void *arg) {
460*0b57cec5SDimitry Andric     struct Block_layout *aBlock = (struct Block_layout *)arg;
461*0b57cec5SDimitry Andric     int32_t newCount;
462*0b57cec5SDimitry Andric     if (!aBlock) return;
463*0b57cec5SDimitry Andric     newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
464*0b57cec5SDimitry Andric     if (newCount > 0) return;
465*0b57cec5SDimitry Andric     // Hit zero
466*0b57cec5SDimitry Andric     if (aBlock->flags & BLOCK_IS_GC) {
467*0b57cec5SDimitry Andric         // Tell GC we no longer have our own refcounts.  GC will decr its refcount
468*0b57cec5SDimitry Andric         // and unless someone has done a CFRetain or marked it uncollectable it will
469*0b57cec5SDimitry Andric         // now be subject to GC reclamation.
470*0b57cec5SDimitry Andric         _Block_setHasRefcount(aBlock, false);
471*0b57cec5SDimitry Andric     }
472*0b57cec5SDimitry Andric     else if (aBlock->flags & BLOCK_NEEDS_FREE) {
473*0b57cec5SDimitry Andric         if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
474*0b57cec5SDimitry Andric         _Block_deallocator(aBlock);
475*0b57cec5SDimitry Andric     }
476*0b57cec5SDimitry Andric     else if (aBlock->flags & BLOCK_IS_GLOBAL) {
477*0b57cec5SDimitry Andric         ;
478*0b57cec5SDimitry Andric     }
479*0b57cec5SDimitry Andric     else {
480*0b57cec5SDimitry Andric         printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock);
481*0b57cec5SDimitry Andric     }
482*0b57cec5SDimitry Andric }
483*0b57cec5SDimitry Andric 
484*0b57cec5SDimitry Andric 
485*0b57cec5SDimitry Andric 
486*0b57cec5SDimitry Andric // Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
_Block_destroy(const void * arg)487*0b57cec5SDimitry Andric static void _Block_destroy(const void *arg) {
488*0b57cec5SDimitry Andric     struct Block_layout *aBlock;
489*0b57cec5SDimitry Andric     if (!arg) return;
490*0b57cec5SDimitry Andric     aBlock = (struct Block_layout *)arg;
491*0b57cec5SDimitry Andric     if (aBlock->flags & BLOCK_IS_GC) {
492*0b57cec5SDimitry Andric         // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
493*0b57cec5SDimitry Andric         return; // ignore, we are being called because of a DTOR
494*0b57cec5SDimitry Andric     }
495*0b57cec5SDimitry Andric     _Block_release(aBlock);
496*0b57cec5SDimitry Andric }
497*0b57cec5SDimitry Andric 
498*0b57cec5SDimitry Andric 
499*0b57cec5SDimitry Andric 
500*0b57cec5SDimitry Andric /*
501*0b57cec5SDimitry Andric  *
502*0b57cec5SDimitry Andric  * SPI used by other layers
503*0b57cec5SDimitry Andric  *
504*0b57cec5SDimitry Andric  */
505*0b57cec5SDimitry Andric 
506*0b57cec5SDimitry Andric // SPI, also internal.  Called from NSAutoBlock only under GC
_Block_copy_collectable(const void * aBlock)507*0b57cec5SDimitry Andric void *_Block_copy_collectable(const void *aBlock) {
508*0b57cec5SDimitry Andric     return _Block_copy_internal(aBlock, 0);
509*0b57cec5SDimitry Andric }
510*0b57cec5SDimitry Andric 
511*0b57cec5SDimitry Andric 
512*0b57cec5SDimitry Andric // SPI
Block_size(void * arg)513*0b57cec5SDimitry Andric unsigned long int Block_size(void *arg) {
514*0b57cec5SDimitry Andric     return ((struct Block_layout *)arg)->descriptor->size;
515*0b57cec5SDimitry Andric }
516*0b57cec5SDimitry Andric 
517*0b57cec5SDimitry Andric 
518*0b57cec5SDimitry Andric #if 0
519*0b57cec5SDimitry Andric #pragma mark Compiler SPI entry points
520*0b57cec5SDimitry Andric #endif /* if 0 */
521*0b57cec5SDimitry Andric 
522*0b57cec5SDimitry Andric 
523*0b57cec5SDimitry Andric /*******************************************************
524*0b57cec5SDimitry Andric 
525*0b57cec5SDimitry Andric Entry points used by the compiler - the real API!
526*0b57cec5SDimitry Andric 
527*0b57cec5SDimitry Andric 
528*0b57cec5SDimitry Andric A Block can reference four different kinds of things that require help when the Block is copied to the heap.
529*0b57cec5SDimitry Andric 1) C++ stack based objects
530*0b57cec5SDimitry Andric 2) References to Objective-C objects
531*0b57cec5SDimitry Andric 3) Other Blocks
532*0b57cec5SDimitry Andric 4) __block variables
533*0b57cec5SDimitry Andric 
534*0b57cec5SDimitry Andric In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
535*0b57cec5SDimitry Andric 
536*0b57cec5SDimitry Andric The flags parameter of _Block_object_assign and _Block_object_dispose is set to
537*0b57cec5SDimitry Andric 	* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
538*0b57cec5SDimitry Andric 	* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
539*0b57cec5SDimitry Andric 	* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
540*0b57cec5SDimitry Andric If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
541*0b57cec5SDimitry Andric 
542*0b57cec5SDimitry Andric So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
543*0b57cec5SDimitry Andric 
544*0b57cec5SDimitry Andric When  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
545*0b57cec5SDimitry Andric 
546*0b57cec5SDimitry Andric So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
547*0b57cec5SDimitry Andric 	__block id                   128+3
548*0b57cec5SDimitry Andric         __weak block id              128+3+16
549*0b57cec5SDimitry Andric 	__block (^Block)             128+7
550*0b57cec5SDimitry Andric 	__weak __block (^Block)      128+7+16
551*0b57cec5SDimitry Andric 
552*0b57cec5SDimitry Andric The implementation of the two routines would be improved by switch statements enumerating the eight cases.
553*0b57cec5SDimitry Andric 
554*0b57cec5SDimitry Andric ********************************************************/
555*0b57cec5SDimitry Andric 
556*0b57cec5SDimitry Andric /*
557*0b57cec5SDimitry Andric  * When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
558*0b57cec5SDimitry Andric  * to do the assignment.
559*0b57cec5SDimitry Andric  */
_Block_object_assign(void * destAddr,const void * object,const int flags)560*0b57cec5SDimitry Andric void _Block_object_assign(void *destAddr, const void *object, const int flags) {
561*0b57cec5SDimitry Andric     //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
562*0b57cec5SDimitry Andric     if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
563*0b57cec5SDimitry Andric         if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
564*0b57cec5SDimitry Andric             _Block_assign_weak(object, destAddr);
565*0b57cec5SDimitry Andric         }
566*0b57cec5SDimitry Andric         else {
567*0b57cec5SDimitry Andric             // do *not* retain or *copy* __block variables whatever they are
568*0b57cec5SDimitry Andric             _Block_assign((void *)object, destAddr);
569*0b57cec5SDimitry Andric         }
570*0b57cec5SDimitry Andric     }
571*0b57cec5SDimitry Andric     else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF)  {
572*0b57cec5SDimitry Andric         // copying a __block reference from the stack Block to the heap
573*0b57cec5SDimitry Andric         // flags will indicate if it holds a __weak reference and needs a special isa
574*0b57cec5SDimitry Andric         _Block_byref_assign_copy(destAddr, object, flags);
575*0b57cec5SDimitry Andric     }
576*0b57cec5SDimitry Andric     // (this test must be before next one)
577*0b57cec5SDimitry Andric     else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
578*0b57cec5SDimitry Andric         // copying a Block declared variable from the stack Block to the heap
579*0b57cec5SDimitry Andric         _Block_assign(_Block_copy_internal(object, flags), destAddr);
580*0b57cec5SDimitry Andric     }
581*0b57cec5SDimitry Andric     // (this test must be after previous one)
582*0b57cec5SDimitry Andric     else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
583*0b57cec5SDimitry Andric         //printf("retaining object at %p\n", object);
584*0b57cec5SDimitry Andric         _Block_retain_object(object);
585*0b57cec5SDimitry Andric         //printf("done retaining object at %p\n", object);
586*0b57cec5SDimitry Andric         _Block_assign((void *)object, destAddr);
587*0b57cec5SDimitry Andric     }
588*0b57cec5SDimitry Andric }
589*0b57cec5SDimitry Andric 
590*0b57cec5SDimitry Andric // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
591*0b57cec5SDimitry Andric // to help dispose of the contents
592*0b57cec5SDimitry Andric // Used initially only for __attribute__((NSObject)) marked pointers.
_Block_object_dispose(const void * object,const int flags)593*0b57cec5SDimitry Andric void _Block_object_dispose(const void *object, const int flags) {
594*0b57cec5SDimitry Andric     //printf("_Block_object_dispose(%p, %x)\n", object, flags);
595*0b57cec5SDimitry Andric     if (flags & BLOCK_FIELD_IS_BYREF)  {
596*0b57cec5SDimitry Andric         // get rid of the __block data structure held in a Block
597*0b57cec5SDimitry Andric         _Block_byref_release(object);
598*0b57cec5SDimitry Andric     }
599*0b57cec5SDimitry Andric     else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
600*0b57cec5SDimitry Andric         // get rid of a referenced Block held by this Block
601*0b57cec5SDimitry Andric         // (ignore __block Block variables, compiler doesn't need to call us)
602*0b57cec5SDimitry Andric         _Block_destroy(object);
603*0b57cec5SDimitry Andric     }
604*0b57cec5SDimitry Andric     else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
605*0b57cec5SDimitry Andric         // get rid of a referenced object held by this Block
606*0b57cec5SDimitry Andric         // (ignore __block object variables, compiler doesn't need to call us)
607*0b57cec5SDimitry Andric         _Block_release_object(object);
608*0b57cec5SDimitry Andric     }
609*0b57cec5SDimitry Andric }
610*0b57cec5SDimitry Andric 
611*0b57cec5SDimitry Andric 
612*0b57cec5SDimitry Andric /*
613*0b57cec5SDimitry Andric  * Debugging support:
614*0b57cec5SDimitry Andric  */
615*0b57cec5SDimitry Andric #if 0
616*0b57cec5SDimitry Andric #pragma mark Debugging
617*0b57cec5SDimitry Andric #endif /* if 0 */
618*0b57cec5SDimitry Andric 
619*0b57cec5SDimitry Andric 
_Block_dump(const void * block)620*0b57cec5SDimitry Andric const char *_Block_dump(const void *block) {
621*0b57cec5SDimitry Andric     struct Block_layout *closure = (struct Block_layout *)block;
622*0b57cec5SDimitry Andric     static char buffer[512];
623*0b57cec5SDimitry Andric     char *cp = buffer;
624*0b57cec5SDimitry Andric     if (closure == NULL) {
625*0b57cec5SDimitry Andric         sprintf(cp, "NULL passed to _Block_dump\n");
626*0b57cec5SDimitry Andric         return buffer;
627*0b57cec5SDimitry Andric     }
628*0b57cec5SDimitry Andric     if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
629*0b57cec5SDimitry Andric         printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
630*0b57cec5SDimitry Andric         exit(1);
631*0b57cec5SDimitry Andric     }
632*0b57cec5SDimitry Andric     cp += sprintf(cp, "^%p (new layout) =\n", (void *)closure);
633*0b57cec5SDimitry Andric     if (closure->isa == NULL) {
634*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa: NULL\n");
635*0b57cec5SDimitry Andric     }
636*0b57cec5SDimitry Andric     else if (closure->isa == _NSConcreteStackBlock) {
637*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa: stack Block\n");
638*0b57cec5SDimitry Andric     }
639*0b57cec5SDimitry Andric     else if (closure->isa == _NSConcreteMallocBlock) {
640*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa: malloc heap Block\n");
641*0b57cec5SDimitry Andric     }
642*0b57cec5SDimitry Andric     else if (closure->isa == _NSConcreteAutoBlock) {
643*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa: GC heap Block\n");
644*0b57cec5SDimitry Andric     }
645*0b57cec5SDimitry Andric     else if (closure->isa == _NSConcreteGlobalBlock) {
646*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa: global Block\n");
647*0b57cec5SDimitry Andric     }
648*0b57cec5SDimitry Andric     else if (closure->isa == _NSConcreteFinalizingBlock) {
649*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa: finalizing Block\n");
650*0b57cec5SDimitry Andric     }
651*0b57cec5SDimitry Andric     else {
652*0b57cec5SDimitry Andric         cp += sprintf(cp, "isa?: %p\n", (void *)closure->isa);
653*0b57cec5SDimitry Andric     }
654*0b57cec5SDimitry Andric     cp += sprintf(cp, "flags:");
655*0b57cec5SDimitry Andric     if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
656*0b57cec5SDimitry Andric         cp += sprintf(cp, " HASDESCRIPTOR");
657*0b57cec5SDimitry Andric     }
658*0b57cec5SDimitry Andric     if (closure->flags & BLOCK_NEEDS_FREE) {
659*0b57cec5SDimitry Andric         cp += sprintf(cp, " FREEME");
660*0b57cec5SDimitry Andric     }
661*0b57cec5SDimitry Andric     if (closure->flags & BLOCK_IS_GC) {
662*0b57cec5SDimitry Andric         cp += sprintf(cp, " ISGC");
663*0b57cec5SDimitry Andric     }
664*0b57cec5SDimitry Andric     if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
665*0b57cec5SDimitry Andric         cp += sprintf(cp, " HASHELP");
666*0b57cec5SDimitry Andric     }
667*0b57cec5SDimitry Andric     if (closure->flags & BLOCK_HAS_CTOR) {
668*0b57cec5SDimitry Andric         cp += sprintf(cp, " HASCTOR");
669*0b57cec5SDimitry Andric     }
670*0b57cec5SDimitry Andric     cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
671*0b57cec5SDimitry Andric     cp += sprintf(cp, "invoke: %p\n", (void *)(uintptr_t)closure->invoke);
672*0b57cec5SDimitry Andric     {
673*0b57cec5SDimitry Andric         struct Block_descriptor *dp = closure->descriptor;
674*0b57cec5SDimitry Andric         cp += sprintf(cp, "descriptor: %p\n", (void *)dp);
675*0b57cec5SDimitry Andric         cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
676*0b57cec5SDimitry Andric         cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
677*0b57cec5SDimitry Andric 
678*0b57cec5SDimitry Andric         if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
679*0b57cec5SDimitry Andric             cp += sprintf(cp, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp->copy);
680*0b57cec5SDimitry Andric             cp += sprintf(cp, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp->dispose);
681*0b57cec5SDimitry Andric         }
682*0b57cec5SDimitry Andric     }
683*0b57cec5SDimitry Andric     return buffer;
684*0b57cec5SDimitry Andric }
685*0b57cec5SDimitry Andric 
686*0b57cec5SDimitry Andric 
_Block_byref_dump(struct Block_byref * src)687*0b57cec5SDimitry Andric const char *_Block_byref_dump(struct Block_byref *src) {
688*0b57cec5SDimitry Andric     static char buffer[256];
689*0b57cec5SDimitry Andric     char *cp = buffer;
690*0b57cec5SDimitry Andric     cp += sprintf(cp, "byref data block %p contents:\n", (void *)src);
691*0b57cec5SDimitry Andric     cp += sprintf(cp, "  forwarding: %p\n", (void *)src->forwarding);
692*0b57cec5SDimitry Andric     cp += sprintf(cp, "  flags: 0x%x\n", src->flags);
693*0b57cec5SDimitry Andric     cp += sprintf(cp, "  size: %d\n", src->size);
694*0b57cec5SDimitry Andric     if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
695*0b57cec5SDimitry Andric         cp += sprintf(cp, "  copy helper: %p\n", (void *)(uintptr_t)src->byref_keep);
696*0b57cec5SDimitry Andric         cp += sprintf(cp, "  dispose helper: %p\n", (void *)(uintptr_t)src->byref_destroy);
697*0b57cec5SDimitry Andric     }
698*0b57cec5SDimitry Andric     return buffer;
699*0b57cec5SDimitry Andric }
700*0b57cec5SDimitry Andric 
701