xref: /freebsd/sys/contrib/ncsw/etc/ncsw_mem.c (revision 43a5ec4eb41567cc92586503212743d89686d78f)
1 /******************************************************************************
2 
3  � 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4  All rights reserved.
5 
6  This is proprietary source code of Freescale Semiconductor Inc.,
7  and its use is subject to the NetComm Device Drivers EULA.
8  The copyright notice above does not evidence any actual or intended
9  publication of such source code.
10 
11  ALTERNATIVELY, redistribution and use in source and binary forms, with
12  or without modification, are permitted provided that the following
13  conditions are met:
14      * Redistributions of source code must retain the above copyright
15        notice, this list of conditions and the following disclaimer.
16      * Redistributions in binary form must reproduce the above copyright
17        notice, this list of conditions and the following disclaimer in the
18        documentation and/or other materials provided with the distribution.
19      * Neither the name of Freescale Semiconductor nor the
20        names of its contributors may be used to endorse or promote products
21        derived from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34 
35  **************************************************************************/
36 #include "error_ext.h"
37 #include "part_ext.h"
38 #include "std_ext.h"
39 #include "string_ext.h"
40 #include "mem_ext.h"
41 #include "mem.h"
42 #include "xx_ext.h"
43 
44 
45 #if 0
46 #define PAD_ALIGNMENT(align, x) (((x)%(align)) ? ((align)-((x)%(align))) : 0)
47 
48 #define ALIGN_BLOCK(p_Block, prefixSize, alignment)                 \
49     do {                                                            \
50         p_Block += (prefixSize);                                    \
51         p_Block += PAD_ALIGNMENT((alignment), (uintptr_t)(p_Block)); \
52     } while (0)
53 
54 #if defined(__GNUC__)
55 #define GET_CALLER_ADDR \
56     __asm__ ("mflr  %0" : "=r" (callerAddr))
57 #elif defined(__MWERKS__)
58 /* NOTE: This implementation is only valid for CodeWarrior for PowerPC */
59 #define GET_CALLER_ADDR \
60     __asm__("add  %0, 0, %0" : : "r" (callerAddr))
61 #endif /* defined(__GNUC__) */
62 
63 
64 /*****************************************************************************/
65 static __inline__ void * MemGet(t_MemorySegment *p_Mem)
66 {
67     uint8_t *p_Block;
68 
69     /* check if there is an available block */
70     if (p_Mem->current == p_Mem->num)
71     {
72         p_Mem->getFailures++;
73         return NULL;
74     }
75 
76     /* get the block */
77     p_Block = p_Mem->p_BlocksStack[p_Mem->current];
78 #ifdef DEBUG
79     p_Mem->p_BlocksStack[p_Mem->current] = NULL;
80 #endif /* DEBUG */
81     /* advance current index */
82     p_Mem->current++;
83 
84     return (void *)p_Block;
85 }
86 
87 /*****************************************************************************/
88 static __inline__ t_Error MemPut(t_MemorySegment *p_Mem, void *p_Block)
89 {
90     /* check if blocks stack is full */
91     if (p_Mem->current > 0)
92     {
93         /* decrease current index */
94         p_Mem->current--;
95         /* put the block */
96         p_Mem->p_BlocksStack[p_Mem->current] = (uint8_t *)p_Block;
97         return E_OK;
98     }
99 
100     RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
101 }
102 
103 
104 #ifdef DEBUG_MEM_LEAKS
105 
106 /*****************************************************************************/
107 static t_Error InitMemDebugDatabase(t_MemorySegment *p_Mem)
108 {
109     p_Mem->p_MemDbg = (void *)XX_Malloc(sizeof(t_MemDbg) * p_Mem->num);
110     if (!p_Mem->p_MemDbg)
111     {
112         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory debug object"));
113     }
114 
115     memset(p_Mem->p_MemDbg, ILLEGAL_BASE, sizeof(t_MemDbg) * p_Mem->num);
116 
117     return E_OK;
118 }
119 
120 
121 /*****************************************************************************/
122 static t_Error DebugMemGet(t_Handle h_Mem, void *p_Block, uintptr_t ownerAddress)
123 {
124     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
125     t_MemDbg        *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg;
126     uint32_t        blockIndex;
127 
128     ASSERT_COND(ownerAddress != ILLEGAL_BASE);
129 
130     /* Find block num */
131     if (p_Mem->consecutiveMem)
132     {
133         blockIndex =
134             (((uint8_t *)p_Block - (p_Mem->p_Bases[0] + p_Mem->blockOffset)) / p_Mem->blockSize);
135     }
136     else
137     {
138         blockIndex = *(uint32_t *)((uint8_t *)p_Block - 4);
139     }
140 
141     ASSERT_COND(blockIndex < p_Mem->num);
142     ASSERT_COND(p_MemDbg[blockIndex].ownerAddress == ILLEGAL_BASE);
143 
144     p_MemDbg[blockIndex].ownerAddress = ownerAddress;
145 
146     return E_OK;
147 }
148 
149 /*****************************************************************************/
150 static t_Error DebugMemPut(t_Handle h_Mem, void *p_Block)
151 {
152     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
153     t_MemDbg        *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg;
154     uint32_t        blockIndex;
155     uint8_t         *p_Temp;
156 
157     /* Find block num */
158     if (p_Mem->consecutiveMem)
159     {
160         blockIndex =
161             (((uint8_t *)p_Block - (p_Mem->p_Bases[0] + p_Mem->blockOffset)) / p_Mem->blockSize);
162 
163         if (blockIndex >= p_Mem->num)
164         {
165             RETURN_ERROR(MAJOR, E_INVALID_ADDRESS,
166                          ("Freed address (0x%08x) does not belong to this pool", p_Block));
167         }
168     }
169     else
170     {
171         blockIndex = *(uint32_t *)((uint8_t *)p_Block - 4);
172 
173         if (blockIndex >= p_Mem->num)
174         {
175             RETURN_ERROR(MAJOR, E_INVALID_ADDRESS,
176                          ("Freed address (0x%08x) does not belong to this pool", p_Block));
177         }
178 
179         /* Verify that the block matches the corresponding base */
180         p_Temp = p_Mem->p_Bases[blockIndex];
181 
182         ALIGN_BLOCK(p_Temp, p_Mem->prefixSize, p_Mem->alignment);
183 
184         if (p_Temp == p_Mem->p_Bases[blockIndex])
185             p_Temp += p_Mem->alignment;
186 
187         if (p_Temp != p_Block)
188         {
189             RETURN_ERROR(MAJOR, E_INVALID_ADDRESS,
190                          ("Freed address (0x%08x) does not belong to this pool", p_Block));
191         }
192     }
193 
194     if (p_MemDbg[blockIndex].ownerAddress == ILLEGAL_BASE)
195     {
196         RETURN_ERROR(MAJOR, E_ALREADY_FREE,
197                      ("Attempt to free unallocated address (0x%08x)", p_Block));
198     }
199 
200     p_MemDbg[blockIndex].ownerAddress = (uintptr_t)ILLEGAL_BASE;
201 
202     return E_OK;
203 }
204 
205 #endif /* DEBUG_MEM_LEAKS */
206 
207 
208 /*****************************************************************************/
209 uint32_t MEM_ComputePartitionSize(uint32_t num,
210                                   uint16_t dataSize,
211                                   uint16_t prefixSize,
212                                   uint16_t postfixSize,
213                                   uint16_t alignment)
214 {
215     uint32_t  blockSize = 0, pad1 = 0, pad2 = 0;
216 
217     /* Make sure that the alignment is at least 4 */
218     if (alignment < 4)
219     {
220         alignment = 4;
221     }
222 
223     pad1 = (uint32_t)PAD_ALIGNMENT(4, prefixSize);
224     /* Block size not including 2nd padding */
225     blockSize = pad1 + prefixSize + dataSize + postfixSize;
226     pad2 = PAD_ALIGNMENT(alignment, blockSize);
227     /* Block size including 2nd padding */
228     blockSize += pad2;
229 
230     return ((num * blockSize) + alignment);
231 }
232 
233 /*****************************************************************************/
234 t_Error MEM_Init(char       name[],
235                  t_Handle   *p_Handle,
236                  uint32_t   num,
237                  uint16_t   dataSize,
238                  uint16_t   prefixSize,
239                  uint16_t   postfixSize,
240                  uint16_t   alignment)
241 {
242     uint8_t     *p_Memory;
243     uint32_t    allocSize;
244     t_Error     errCode;
245 
246     allocSize = MEM_ComputePartitionSize(num,
247                                          dataSize,
248                                          prefixSize,
249                                          postfixSize,
250                                          alignment);
251 
252     p_Memory = (uint8_t *)XX_Malloc(allocSize);
253     if (!p_Memory)
254     {
255         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment"));
256     }
257 
258     errCode = MEM_InitByAddress(name,
259                                 p_Handle,
260                                 num,
261                                 dataSize,
262                                 prefixSize,
263                                 postfixSize,
264                                 alignment,
265                                 p_Memory);
266     if (errCode != E_OK)
267     {
268         RETURN_ERROR(MAJOR, errCode, NO_MSG);
269     }
270 
271     ((t_MemorySegment *)(*p_Handle))->allocOwner = e_MEM_ALLOC_OWNER_LOCAL;
272 
273     return E_OK;
274 }
275 
276 
277 /*****************************************************************************/
278 t_Error MEM_InitByAddress(char      name[],
279                           t_Handle  *p_Handle,
280                           uint32_t  num,
281                           uint16_t  dataSize,
282                           uint16_t  prefixSize,
283                           uint16_t  postfixSize,
284                           uint16_t  alignment,
285                           uint8_t   *p_Memory)
286 {
287     t_MemorySegment *p_Mem;
288     uint32_t        i, blockSize;
289     uint16_t        alignPad, endPad;
290     uint8_t         *p_Blocks;
291 
292      /* prepare in case of error */
293     *p_Handle = NULL;
294 
295     if (!p_Memory)
296     {
297         RETURN_ERROR(MAJOR, E_NULL_POINTER, ("Memory blocks"));
298     }
299 
300     p_Blocks = p_Memory;
301 
302     /* make sure that the alignment is at least 4 and power of 2 */
303     if (alignment < 4)
304     {
305         alignment = 4;
306     }
307     else if (!POWER_OF_2(alignment))
308     {
309         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)"));
310     }
311 
312     /* first allocate the segment descriptor */
313     p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment));
314     if (!p_Mem)
315     {
316         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure"));
317     }
318 
319     /* allocate the blocks stack */
320     p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*));
321     if (!p_Mem->p_BlocksStack)
322     {
323         XX_Free(p_Mem);
324         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack"));
325     }
326 
327     /* allocate the blocks bases array */
328     p_Mem->p_Bases = (uint8_t **)XX_Malloc(sizeof(uint8_t*));
329     if (!p_Mem->p_Bases)
330     {
331         MEM_Free(p_Mem);
332         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array"));
333     }
334     memset(p_Mem->p_Bases, 0, sizeof(uint8_t*));
335 
336     /* store info about this segment */
337     p_Mem->num = num;
338     p_Mem->current = 0;
339     p_Mem->dataSize = dataSize;
340     p_Mem->p_Bases[0] = p_Blocks;
341     p_Mem->getFailures = 0;
342     p_Mem->allocOwner = e_MEM_ALLOC_OWNER_EXTERNAL;
343     p_Mem->consecutiveMem = TRUE;
344     p_Mem->prefixSize = prefixSize;
345     p_Mem->postfixSize = postfixSize;
346     p_Mem->alignment = alignment;
347     /* store name */
348     strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1);
349 
350     p_Mem->h_Spinlock = XX_InitSpinlock();
351     if (!p_Mem->h_Spinlock)
352     {
353         MEM_Free(p_Mem);
354         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!"));
355     }
356 
357     alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize);
358     /* Make sure the entire size is a multiple of alignment */
359     endPad = (uint16_t)PAD_ALIGNMENT(alignment, (alignPad + prefixSize + dataSize + postfixSize));
360 
361     /* The following manipulation places the data of block[0] in an aligned address,
362        since block size is aligned the following block datas will all be aligned */
363     ALIGN_BLOCK(p_Blocks, prefixSize, alignment);
364 
365     blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad);
366 
367     /* initialize the blocks */
368     for (i=0; i < num; i++)
369     {
370         p_Mem->p_BlocksStack[i] = p_Blocks;
371         p_Blocks += blockSize;
372     }
373 
374     /* return handle to caller */
375     *p_Handle = (t_Handle)p_Mem;
376 
377 #ifdef DEBUG_MEM_LEAKS
378     {
379         t_Error errCode = InitMemDebugDatabase(p_Mem);
380 
381         if (errCode != E_OK)
382             RETURN_ERROR(MAJOR, errCode, NO_MSG);
383 
384         p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]);
385         p_Mem->blockSize = blockSize;
386     }
387 #endif /* DEBUG_MEM_LEAKS */
388 
389     return E_OK;
390 }
391 
392 
393 /*****************************************************************************/
394 t_Error MEM_InitSmart(char      name[],
395                       t_Handle  *p_Handle,
396                       uint32_t  num,
397                       uint16_t  dataSize,
398                       uint16_t  prefixSize,
399                       uint16_t  postfixSize,
400                       uint16_t  alignment,
401                       uint8_t   memPartitionId,
402                       bool      consecutiveMem)
403 {
404     t_MemorySegment *p_Mem;
405     uint32_t        i, blockSize;
406     uint16_t        alignPad, endPad;
407 
408     /* prepare in case of error */
409     *p_Handle = NULL;
410 
411     /* make sure that size is always a multiple of 4 */
412     if (dataSize & 3)
413     {
414         dataSize &= ~3;
415         dataSize += 4;
416     }
417 
418     /* make sure that the alignment is at least 4 and power of 2 */
419     if (alignment < 4)
420     {
421         alignment = 4;
422     }
423     else if (!POWER_OF_2(alignment))
424     {
425         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)"));
426     }
427 
428     /* first allocate the segment descriptor */
429     p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment));
430     if (!p_Mem)
431     {
432         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure"));
433     }
434 
435     /* allocate the blocks stack */
436     p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*));
437     if (!p_Mem->p_BlocksStack)
438     {
439         MEM_Free(p_Mem);
440         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack"));
441     }
442 
443     /* allocate the blocks bases array */
444     p_Mem->p_Bases = (uint8_t **)XX_Malloc((consecutiveMem ? 1 : num) * sizeof(uint8_t*));
445     if (!p_Mem->p_Bases)
446     {
447         MEM_Free(p_Mem);
448         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array"));
449     }
450     memset(p_Mem->p_Bases, 0, (consecutiveMem ? 1 : num) * sizeof(uint8_t*));
451 
452     /* store info about this segment */
453     p_Mem->num = num;
454     p_Mem->current = 0;
455     p_Mem->dataSize = dataSize;
456     p_Mem->getFailures = 0;
457     p_Mem->allocOwner = e_MEM_ALLOC_OWNER_LOCAL_SMART;
458     p_Mem->consecutiveMem = consecutiveMem;
459     p_Mem->prefixSize = prefixSize;
460     p_Mem->postfixSize = postfixSize;
461     p_Mem->alignment = alignment;
462 
463     p_Mem->h_Spinlock = XX_InitSpinlock();
464     if (!p_Mem->h_Spinlock)
465     {
466         MEM_Free(p_Mem);
467         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!"));
468     }
469 
470     alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize);
471     /* Make sure the entire size is a multiple of alignment */
472     endPad = (uint16_t)PAD_ALIGNMENT(alignment, alignPad + prefixSize + dataSize + postfixSize);
473 
474     /* Calculate blockSize */
475     blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad);
476 
477     /* Now allocate the blocks */
478     if (p_Mem->consecutiveMem)
479     {
480         /* |alignment - 1| bytes at most will be discarded in the beginning of the
481            received segment for alignment reasons, therefore the allocation is of:
482            (alignment + (num * block size)). */
483         uint8_t *p_Blocks = (uint8_t *)
484             XX_MallocSmart((uint32_t)((num * blockSize) + alignment), memPartitionId, 1);
485         if (!p_Blocks)
486         {
487             MEM_Free(p_Mem);
488             RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment blocks"));
489         }
490 
491         /* Store the memory segment address */
492         p_Mem->p_Bases[0] = p_Blocks;
493 
494         /* The following manipulation places the data of block[0] in an aligned address,
495            since block size is aligned the following block datas will all be aligned.*/
496         ALIGN_BLOCK(p_Blocks, prefixSize, alignment);
497 
498         /* initialize the blocks */
499         for (i = 0; i < num; i++)
500         {
501             p_Mem->p_BlocksStack[i] = p_Blocks;
502             p_Blocks += blockSize;
503         }
504 
505 #ifdef DEBUG_MEM_LEAKS
506         p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]);
507         p_Mem->blockSize = blockSize;
508 #endif /* DEBUG_MEM_LEAKS */
509     }
510     else
511     {
512         /* |alignment - 1| bytes at most will be discarded in the beginning of the
513            received segment for alignment reasons, therefore the allocation is of:
514            (alignment + block size). */
515         for (i = 0; i < num; i++)
516         {
517             uint8_t *p_Block = (uint8_t *)
518                 XX_MallocSmart((uint32_t)(blockSize + alignment), memPartitionId, 1);
519             if (!p_Block)
520             {
521                 MEM_Free(p_Mem);
522                 RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment blocks"));
523             }
524 
525             /* Store the memory segment address */
526             p_Mem->p_Bases[i] = p_Block;
527 
528             /* The following places the data of each block in an aligned address */
529             ALIGN_BLOCK(p_Block, prefixSize, alignment);
530 
531 #ifdef DEBUG_MEM_LEAKS
532             /* Need 4 bytes before the meaningful bytes to store the block index.
533                We know we have them because alignment is at least 4 bytes. */
534             if (p_Block == p_Mem->p_Bases[i])
535                 p_Block += alignment;
536 
537             *(uint32_t *)(p_Block - 4) = i;
538 #endif /* DEBUG_MEM_LEAKS */
539 
540             p_Mem->p_BlocksStack[i] = p_Block;
541         }
542     }
543 
544     /* store name */
545     strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1);
546 
547     /* return handle to caller */
548     *p_Handle = (t_Handle)p_Mem;
549 
550 #ifdef DEBUG_MEM_LEAKS
551     {
552         t_Error errCode = InitMemDebugDatabase(p_Mem);
553 
554         if (errCode != E_OK)
555             RETURN_ERROR(MAJOR, errCode, NO_MSG);
556     }
557 #endif /* DEBUG_MEM_LEAKS */
558 
559     return E_OK;
560 }
561 
562 
563 /*****************************************************************************/
564 void MEM_Free(t_Handle h_Mem)
565 {
566     t_MemorySegment *p_Mem = (t_MemorySegment*)h_Mem;
567     uint32_t        num, i;
568 
569     /* Check MEM leaks */
570     MEM_CheckLeaks(h_Mem);
571 
572     if (p_Mem)
573     {
574         num = p_Mem->consecutiveMem ? 1 : p_Mem->num;
575 
576         if (p_Mem->allocOwner == e_MEM_ALLOC_OWNER_LOCAL_SMART)
577         {
578             for (i=0; i < num; i++)
579             {
580                 if (p_Mem->p_Bases[i])
581                 {
582                     XX_FreeSmart(p_Mem->p_Bases[i]);
583                 }
584             }
585         }
586         else if (p_Mem->allocOwner == e_MEM_ALLOC_OWNER_LOCAL)
587         {
588             for (i=0; i < num; i++)
589             {
590                 if (p_Mem->p_Bases[i])
591                 {
592                     XX_Free(p_Mem->p_Bases[i]);
593                 }
594             }
595         }
596 
597         if (p_Mem->h_Spinlock)
598             XX_FreeSpinlock(p_Mem->h_Spinlock);
599 
600         if (p_Mem->p_Bases)
601             XX_Free(p_Mem->p_Bases);
602 
603         if (p_Mem->p_BlocksStack)
604             XX_Free(p_Mem->p_BlocksStack);
605 
606 #ifdef DEBUG_MEM_LEAKS
607         if (p_Mem->p_MemDbg)
608             XX_Free(p_Mem->p_MemDbg);
609 #endif /* DEBUG_MEM_LEAKS */
610 
611        XX_Free(p_Mem);
612     }
613 }
614 
615 
616 /*****************************************************************************/
617 void * MEM_Get(t_Handle h_Mem)
618 {
619     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
620     uint8_t         *p_Block;
621     uint32_t        intFlags;
622 #ifdef DEBUG_MEM_LEAKS
623     uintptr_t       callerAddr = 0;
624 
625     GET_CALLER_ADDR;
626 #endif /* DEBUG_MEM_LEAKS */
627 
628     ASSERT_COND(h_Mem);
629 
630     intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock);
631     /* check if there is an available block */
632     if ((p_Block = (uint8_t *)MemGet(p_Mem)) == NULL)
633     {
634         XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
635         return NULL;
636     }
637 
638 #ifdef DEBUG_MEM_LEAKS
639     DebugMemGet(p_Mem, p_Block, callerAddr);
640 #endif /* DEBUG_MEM_LEAKS */
641     XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
642 
643     return (void *)p_Block;
644 }
645 
646 
647 /*****************************************************************************/
648 uint16_t MEM_GetN(t_Handle h_Mem, uint32_t num, void *array[])
649 {
650     t_MemorySegment     *p_Mem = (t_MemorySegment *)h_Mem;
651     uint32_t            availableBlocks;
652     register uint32_t   i;
653     uint32_t            intFlags;
654 #ifdef DEBUG_MEM_LEAKS
655     uintptr_t           callerAddr = 0;
656 
657     GET_CALLER_ADDR;
658 #endif /* DEBUG_MEM_LEAKS */
659 
660     ASSERT_COND(h_Mem);
661 
662     intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock);
663     /* check how many blocks are available */
664     availableBlocks = (uint32_t)(p_Mem->num - p_Mem->current);
665     if (num > availableBlocks)
666     {
667         num = availableBlocks;
668     }
669 
670     for (i=0; i < num; i++)
671     {
672         /* get pointer to block */
673         if ((array[i] = MemGet(p_Mem)) == NULL)
674         {
675             break;
676         }
677 
678 #ifdef DEBUG_MEM_LEAKS
679         DebugMemGet(p_Mem, array[i], callerAddr);
680 #endif /* DEBUG_MEM_LEAKS */
681     }
682     XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
683 
684     return (uint16_t)i;
685 }
686 
687 
688 /*****************************************************************************/
689 t_Error MEM_Put(t_Handle h_Mem, void *p_Block)
690 {
691     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
692     t_Error         rc;
693     uint32_t        intFlags;
694 
695     ASSERT_COND(h_Mem);
696 
697     intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock);
698     /* check if blocks stack is full */
699     if ((rc = MemPut(p_Mem, p_Block)) != E_OK)
700     {
701         XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
702         RETURN_ERROR(MAJOR, rc, NO_MSG);
703     }
704 
705 #ifdef DEBUG_MEM_LEAKS
706     DebugMemPut(p_Mem, p_Block);
707 #endif /* DEBUG_MEM_LEAKS */
708     XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
709 
710     return E_OK;
711 }
712 
713 
714 #ifdef DEBUG_MEM_LEAKS
715 
716 /*****************************************************************************/
717 void MEM_CheckLeaks(t_Handle h_Mem)
718 {
719     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
720     t_MemDbg        *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg;
721     uint8_t         *p_Block;
722     int             i;
723 
724     ASSERT_COND(h_Mem);
725 
726     if (p_Mem->consecutiveMem)
727     {
728         for (i=0; i < p_Mem->num; i++)
729         {
730             if (p_MemDbg[i].ownerAddress != ILLEGAL_BASE)
731             {
732                 /* Find the block address */
733                 p_Block = ((p_Mem->p_Bases[0] + p_Mem->blockOffset) +
734                            (i * p_Mem->blockSize));
735 
736                 XX_Print("MEM leak: 0x%08x, Caller address: 0x%08x\n",
737                          p_Block, p_MemDbg[i].ownerAddress);
738             }
739         }
740     }
741     else
742     {
743         for (i=0; i < p_Mem->num; i++)
744         {
745             if (p_MemDbg[i].ownerAddress != ILLEGAL_BASE)
746             {
747                 /* Find the block address */
748                 p_Block = p_Mem->p_Bases[i];
749 
750                 ALIGN_BLOCK(p_Block, p_Mem->prefixSize, p_Mem->alignment);
751 
752                 if (p_Block == p_Mem->p_Bases[i])
753                     p_Block += p_Mem->alignment;
754 
755                 XX_Print("MEM leak: 0x%08x, Caller address: 0x%08x\n",
756                          p_Block, p_MemDbg[i].ownerAddress);
757             }
758         }
759     }
760 }
761 
762 #endif /* DEBUG_MEM_LEAKS */
763 
764 
765 #endif
766