xref: /freebsd/sys/contrib/ncsw/etc/ncsw_mem.c (revision acc1a9ef8333c798c210fa94be6af4d5fe2dd794)
1 /******************************************************************************
2 
3  � 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4  All rights reserved.
5 
6  This is proprietary source code of Freescale Semiconductor Inc.,
7  and its use is subject to the NetComm Device Drivers EULA.
8  The copyright notice above does not evidence any actual or intended
9  publication of such source code.
10 
11  ALTERNATIVELY, redistribution and use in source and binary forms, with
12  or without modification, are permitted provided that the following
13  conditions are met:
14      * Redistributions of source code must retain the above copyright
15        notice, this list of conditions and the following disclaimer.
16      * Redistributions in binary form must reproduce the above copyright
17        notice, this list of conditions and the following disclaimer in the
18        documentation and/or other materials provided with the distribution.
19      * Neither the name of Freescale Semiconductor nor the
20        names of its contributors may be used to endorse or promote products
21        derived from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34 
35  **************************************************************************/
36 #include "error_ext.h"
37 #include "part_ext.h"
38 #include "std_ext.h"
39 #include "string_ext.h"
40 #include "mem_ext.h"
41 #include "mem.h"
42 #include "xx_ext.h"
43 
44 
45 #define PAD_ALIGNMENT(align, x) (((x)%(align)) ? ((align)-((x)%(align))) : 0)
46 
47 #define ALIGN_BLOCK(p_Block, prefixSize, alignment)                 \
48     do {                                                            \
49         p_Block += (prefixSize);                                    \
50         p_Block += PAD_ALIGNMENT((alignment), (uintptr_t)(p_Block)); \
51     } while (0)
52 
53 #if defined(__GNUC__)
54 #define GET_CALLER_ADDR \
55     __asm__ ("mflr  %0" : "=r" (callerAddr))
56 #elif defined(__MWERKS__)
57 /* NOTE: This implementation is only valid for CodeWarrior for PowerPC */
58 #define GET_CALLER_ADDR \
59     __asm__("add  %0, 0, %0" : : "r" (callerAddr))
60 #endif /* defined(__GNUC__) */
61 
62 
63 /*****************************************************************************/
64 static __inline__ void * MemGet(t_MemorySegment *p_Mem)
65 {
66     uint8_t *p_Block;
67 
68     /* check if there is an available block */
69     if (p_Mem->current == p_Mem->num)
70     {
71         p_Mem->getFailures++;
72         return NULL;
73     }
74 
75     /* get the block */
76     p_Block = p_Mem->p_BlocksStack[p_Mem->current];
77 #ifdef DEBUG
78     p_Mem->p_BlocksStack[p_Mem->current] = NULL;
79 #endif /* DEBUG */
80     /* advance current index */
81     p_Mem->current++;
82 
83     return (void *)p_Block;
84 }
85 
86 /*****************************************************************************/
87 static __inline__ t_Error MemPut(t_MemorySegment *p_Mem, void *p_Block)
88 {
89     /* check if blocks stack is full */
90     if (p_Mem->current > 0)
91     {
92         /* decrease current index */
93         p_Mem->current--;
94         /* put the block */
95         p_Mem->p_BlocksStack[p_Mem->current] = (uint8_t *)p_Block;
96         return E_OK;
97     }
98 
99     RETURN_ERROR(MAJOR, E_FULL, NO_MSG);
100 }
101 
102 
103 #ifdef DEBUG_MEM_LEAKS
104 
105 /*****************************************************************************/
106 static t_Error InitMemDebugDatabase(t_MemorySegment *p_Mem)
107 {
108     p_Mem->p_MemDbg = (void *)XX_Malloc(sizeof(t_MemDbg) * p_Mem->num);
109     if (!p_Mem->p_MemDbg)
110     {
111         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory debug object"));
112     }
113 
114     memset(p_Mem->p_MemDbg, ILLEGAL_BASE, sizeof(t_MemDbg) * p_Mem->num);
115 
116     return E_OK;
117 }
118 
119 
120 /*****************************************************************************/
121 static t_Error DebugMemGet(t_Handle h_Mem, void *p_Block, uintptr_t ownerAddress)
122 {
123     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
124     t_MemDbg        *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg;
125     uint32_t        blockIndex;
126 
127     ASSERT_COND(ownerAddress != ILLEGAL_BASE);
128 
129     /* Find block num */
130     if (p_Mem->consecutiveMem)
131     {
132         blockIndex =
133             (((uint8_t *)p_Block - (p_Mem->p_Bases[0] + p_Mem->blockOffset)) / p_Mem->blockSize);
134     }
135     else
136     {
137         blockIndex = *(uint32_t *)((uint8_t *)p_Block - 4);
138     }
139 
140     ASSERT_COND(blockIndex < p_Mem->num);
141     ASSERT_COND(p_MemDbg[blockIndex].ownerAddress == ILLEGAL_BASE);
142 
143     p_MemDbg[blockIndex].ownerAddress = ownerAddress;
144 
145     return E_OK;
146 }
147 
148 /*****************************************************************************/
149 static t_Error DebugMemPut(t_Handle h_Mem, void *p_Block)
150 {
151     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
152     t_MemDbg        *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg;
153     uint32_t        blockIndex;
154     uint8_t         *p_Temp;
155 
156     /* Find block num */
157     if (p_Mem->consecutiveMem)
158     {
159         blockIndex =
160             (((uint8_t *)p_Block - (p_Mem->p_Bases[0] + p_Mem->blockOffset)) / p_Mem->blockSize);
161 
162         if (blockIndex >= p_Mem->num)
163         {
164             RETURN_ERROR(MAJOR, E_INVALID_ADDRESS,
165                          ("Freed address (0x%08x) does not belong to this pool", p_Block));
166         }
167     }
168     else
169     {
170         blockIndex = *(uint32_t *)((uint8_t *)p_Block - 4);
171 
172         if (blockIndex >= p_Mem->num)
173         {
174             RETURN_ERROR(MAJOR, E_INVALID_ADDRESS,
175                          ("Freed address (0x%08x) does not belong to this pool", p_Block));
176         }
177 
178         /* Verify that the block matches the corresponding base */
179         p_Temp = p_Mem->p_Bases[blockIndex];
180 
181         ALIGN_BLOCK(p_Temp, p_Mem->prefixSize, p_Mem->alignment);
182 
183         if (p_Temp == p_Mem->p_Bases[blockIndex])
184             p_Temp += p_Mem->alignment;
185 
186         if (p_Temp != p_Block)
187         {
188             RETURN_ERROR(MAJOR, E_INVALID_ADDRESS,
189                          ("Freed address (0x%08x) does not belong to this pool", p_Block));
190         }
191     }
192 
193     if (p_MemDbg[blockIndex].ownerAddress == ILLEGAL_BASE)
194     {
195         RETURN_ERROR(MAJOR, E_ALREADY_FREE,
196                      ("Attempt to free unallocated address (0x%08x)", p_Block));
197     }
198 
199     p_MemDbg[blockIndex].ownerAddress = (uintptr_t)ILLEGAL_BASE;
200 
201     return E_OK;
202 }
203 
204 #endif /* DEBUG_MEM_LEAKS */
205 
206 
207 /*****************************************************************************/
208 uint32_t MEM_ComputePartitionSize(uint32_t num,
209                                   uint16_t dataSize,
210                                   uint16_t prefixSize,
211                                   uint16_t postfixSize,
212                                   uint16_t alignment)
213 {
214     uint32_t  blockSize = 0, pad1 = 0, pad2 = 0;
215 
216     /* Make sure that the alignment is at least 4 */
217     if (alignment < 4)
218     {
219         alignment = 4;
220     }
221 
222     pad1 = (uint32_t)PAD_ALIGNMENT(4, prefixSize);
223     /* Block size not including 2nd padding */
224     blockSize = pad1 + prefixSize + dataSize + postfixSize;
225     pad2 = PAD_ALIGNMENT(alignment, blockSize);
226     /* Block size including 2nd padding */
227     blockSize += pad2;
228 
229     return ((num * blockSize) + alignment);
230 }
231 
232 /*****************************************************************************/
233 t_Error MEM_Init(char       name[],
234                  t_Handle   *p_Handle,
235                  uint32_t   num,
236                  uint16_t   dataSize,
237                  uint16_t   prefixSize,
238                  uint16_t   postfixSize,
239                  uint16_t   alignment)
240 {
241     uint8_t     *p_Memory;
242     uint32_t    allocSize;
243     t_Error     errCode;
244 
245     allocSize = MEM_ComputePartitionSize(num,
246                                          dataSize,
247                                          prefixSize,
248                                          postfixSize,
249                                          alignment);
250 
251     p_Memory = (uint8_t *)XX_Malloc(allocSize);
252     if (!p_Memory)
253     {
254         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment"));
255     }
256 
257     errCode = MEM_InitByAddress(name,
258                                 p_Handle,
259                                 num,
260                                 dataSize,
261                                 prefixSize,
262                                 postfixSize,
263                                 alignment,
264                                 p_Memory);
265     if (errCode != E_OK)
266     {
267         RETURN_ERROR(MAJOR, errCode, NO_MSG);
268     }
269 
270     ((t_MemorySegment *)(*p_Handle))->allocOwner = e_MEM_ALLOC_OWNER_LOCAL;
271 
272     return E_OK;
273 }
274 
275 
276 /*****************************************************************************/
277 t_Error MEM_InitByAddress(char      name[],
278                           t_Handle  *p_Handle,
279                           uint32_t  num,
280                           uint16_t  dataSize,
281                           uint16_t  prefixSize,
282                           uint16_t  postfixSize,
283                           uint16_t  alignment,
284                           uint8_t   *p_Memory)
285 {
286     t_MemorySegment *p_Mem;
287     uint32_t        i, blockSize;
288     uint16_t        alignPad, endPad;
289     uint8_t         *p_Blocks;
290 
291      /* prepare in case of error */
292     *p_Handle = NULL;
293 
294     if (!p_Memory)
295     {
296         RETURN_ERROR(MAJOR, E_NULL_POINTER, ("Memory blocks"));
297     }
298 
299     p_Blocks = p_Memory;
300 
301     /* make sure that the alignment is at least 4 and power of 2 */
302     if (alignment < 4)
303     {
304         alignment = 4;
305     }
306     else if (!POWER_OF_2(alignment))
307     {
308         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)"));
309     }
310 
311     /* first allocate the segment descriptor */
312     p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment));
313     if (!p_Mem)
314     {
315         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure"));
316     }
317 
318     /* allocate the blocks stack */
319     p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*));
320     if (!p_Mem->p_BlocksStack)
321     {
322         XX_Free(p_Mem);
323         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack"));
324     }
325 
326     /* allocate the blocks bases array */
327     p_Mem->p_Bases = (uint8_t **)XX_Malloc(sizeof(uint8_t*));
328     if (!p_Mem->p_Bases)
329     {
330         MEM_Free(p_Mem);
331         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array"));
332     }
333     memset(p_Mem->p_Bases, 0, sizeof(uint8_t*));
334 
335     /* store info about this segment */
336     p_Mem->num = num;
337     p_Mem->current = 0;
338     p_Mem->dataSize = dataSize;
339     p_Mem->p_Bases[0] = p_Blocks;
340     p_Mem->getFailures = 0;
341     p_Mem->allocOwner = e_MEM_ALLOC_OWNER_EXTERNAL;
342     p_Mem->consecutiveMem = TRUE;
343     p_Mem->prefixSize = prefixSize;
344     p_Mem->postfixSize = postfixSize;
345     p_Mem->alignment = alignment;
346     /* store name */
347     strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1);
348 
349     p_Mem->h_Spinlock = XX_InitSpinlock();
350     if (!p_Mem->h_Spinlock)
351     {
352         MEM_Free(p_Mem);
353         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!"));
354     }
355 
356     alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize);
357     /* Make sure the entire size is a multiple of alignment */
358     endPad = (uint16_t)PAD_ALIGNMENT(alignment, (alignPad + prefixSize + dataSize + postfixSize));
359 
360     /* The following manipulation places the data of block[0] in an aligned address,
361        since block size is aligned the following block datas will all be aligned */
362     ALIGN_BLOCK(p_Blocks, prefixSize, alignment);
363 
364     blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad);
365 
366     /* initialize the blocks */
367     for (i=0; i < num; i++)
368     {
369         p_Mem->p_BlocksStack[i] = p_Blocks;
370         p_Blocks += blockSize;
371     }
372 
373     /* return handle to caller */
374     *p_Handle = (t_Handle)p_Mem;
375 
376 #ifdef DEBUG_MEM_LEAKS
377     {
378         t_Error errCode = InitMemDebugDatabase(p_Mem);
379 
380         if (errCode != E_OK)
381             RETURN_ERROR(MAJOR, errCode, NO_MSG);
382 
383         p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]);
384         p_Mem->blockSize = blockSize;
385     }
386 #endif /* DEBUG_MEM_LEAKS */
387 
388     return E_OK;
389 }
390 
391 
392 /*****************************************************************************/
393 t_Error MEM_InitSmart(char      name[],
394                       t_Handle  *p_Handle,
395                       uint32_t  num,
396                       uint16_t  dataSize,
397                       uint16_t  prefixSize,
398                       uint16_t  postfixSize,
399                       uint16_t  alignment,
400                       uint8_t   memPartitionId,
401                       bool      consecutiveMem)
402 {
403     t_MemorySegment *p_Mem;
404     uint32_t        i, blockSize;
405     uint16_t        alignPad, endPad;
406 
407     /* prepare in case of error */
408     *p_Handle = NULL;
409 
410     /* make sure that size is always a multiple of 4 */
411     if (dataSize & 3)
412     {
413         dataSize &= ~3;
414         dataSize += 4;
415     }
416 
417     /* make sure that the alignment is at least 4 and power of 2 */
418     if (alignment < 4)
419     {
420         alignment = 4;
421     }
422     else if (!POWER_OF_2(alignment))
423     {
424         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Alignment (should be power of 2)"));
425     }
426 
427     /* first allocate the segment descriptor */
428     p_Mem = (t_MemorySegment *)XX_Malloc(sizeof(t_MemorySegment));
429     if (!p_Mem)
430     {
431         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment structure"));
432     }
433 
434     /* allocate the blocks stack */
435     p_Mem->p_BlocksStack = (uint8_t **)XX_Malloc(num * sizeof(uint8_t*));
436     if (!p_Mem->p_BlocksStack)
437     {
438         MEM_Free(p_Mem);
439         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment block pointers stack"));
440     }
441 
442     /* allocate the blocks bases array */
443     p_Mem->p_Bases = (uint8_t **)XX_Malloc((consecutiveMem ? 1 : num) * sizeof(uint8_t*));
444     if (!p_Mem->p_Bases)
445     {
446         MEM_Free(p_Mem);
447         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment base pointers array"));
448     }
449     memset(p_Mem->p_Bases, 0, (consecutiveMem ? 1 : num) * sizeof(uint8_t*));
450 
451     /* store info about this segment */
452     p_Mem->num = num;
453     p_Mem->current = 0;
454     p_Mem->dataSize = dataSize;
455     p_Mem->getFailures = 0;
456     p_Mem->allocOwner = e_MEM_ALLOC_OWNER_LOCAL_SMART;
457     p_Mem->consecutiveMem = consecutiveMem;
458     p_Mem->prefixSize = prefixSize;
459     p_Mem->postfixSize = postfixSize;
460     p_Mem->alignment = alignment;
461 
462     p_Mem->h_Spinlock = XX_InitSpinlock();
463     if (!p_Mem->h_Spinlock)
464     {
465         MEM_Free(p_Mem);
466         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't create spinlock!"));
467     }
468 
469     alignPad = (uint16_t)PAD_ALIGNMENT(4, prefixSize);
470     /* Make sure the entire size is a multiple of alignment */
471     endPad = (uint16_t)PAD_ALIGNMENT(alignment, alignPad + prefixSize + dataSize + postfixSize);
472 
473     /* Calculate blockSize */
474     blockSize = (uint32_t)(alignPad + prefixSize + dataSize + postfixSize + endPad);
475 
476     /* Now allocate the blocks */
477     if (p_Mem->consecutiveMem)
478     {
479         /* |alignment - 1| bytes at most will be discarded in the beginning of the
480            received segment for alignment reasons, therefore the allocation is of:
481            (alignment + (num * block size)). */
482         uint8_t *p_Blocks = (uint8_t *)
483             XX_MallocSmart((uint32_t)((num * blockSize) + alignment), memPartitionId, 1);
484         if (!p_Blocks)
485         {
486             MEM_Free(p_Mem);
487             RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment blocks"));
488         }
489 
490         /* Store the memory segment address */
491         p_Mem->p_Bases[0] = p_Blocks;
492 
493         /* The following manipulation places the data of block[0] in an aligned address,
494            since block size is aligned the following block datas will all be aligned.*/
495         ALIGN_BLOCK(p_Blocks, prefixSize, alignment);
496 
497         /* initialize the blocks */
498         for (i = 0; i < num; i++)
499         {
500             p_Mem->p_BlocksStack[i] = p_Blocks;
501             p_Blocks += blockSize;
502         }
503 
504 #ifdef DEBUG_MEM_LEAKS
505         p_Mem->blockOffset = (uint32_t)(p_Mem->p_BlocksStack[0] - p_Mem->p_Bases[0]);
506         p_Mem->blockSize = blockSize;
507 #endif /* DEBUG_MEM_LEAKS */
508     }
509     else
510     {
511         /* |alignment - 1| bytes at most will be discarded in the beginning of the
512            received segment for alignment reasons, therefore the allocation is of:
513            (alignment + block size). */
514         for (i = 0; i < num; i++)
515         {
516             uint8_t *p_Block = (uint8_t *)
517                 XX_MallocSmart((uint32_t)(blockSize + alignment), memPartitionId, 1);
518             if (!p_Block)
519             {
520                 MEM_Free(p_Mem);
521                 RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory segment blocks"));
522             }
523 
524             /* Store the memory segment address */
525             p_Mem->p_Bases[i] = p_Block;
526 
527             /* The following places the data of each block in an aligned address */
528             ALIGN_BLOCK(p_Block, prefixSize, alignment);
529 
530 #ifdef DEBUG_MEM_LEAKS
531             /* Need 4 bytes before the meaningful bytes to store the block index.
532                We know we have them because alignment is at least 4 bytes. */
533             if (p_Block == p_Mem->p_Bases[i])
534                 p_Block += alignment;
535 
536             *(uint32_t *)(p_Block - 4) = i;
537 #endif /* DEBUG_MEM_LEAKS */
538 
539             p_Mem->p_BlocksStack[i] = p_Block;
540         }
541     }
542 
543     /* store name */
544     strncpy(p_Mem->name, name, MEM_MAX_NAME_LENGTH-1);
545 
546     /* return handle to caller */
547     *p_Handle = (t_Handle)p_Mem;
548 
549 #ifdef DEBUG_MEM_LEAKS
550     {
551         t_Error errCode = InitMemDebugDatabase(p_Mem);
552 
553         if (errCode != E_OK)
554             RETURN_ERROR(MAJOR, errCode, NO_MSG);
555     }
556 #endif /* DEBUG_MEM_LEAKS */
557 
558     return E_OK;
559 }
560 
561 
562 /*****************************************************************************/
563 void MEM_Free(t_Handle h_Mem)
564 {
565     t_MemorySegment *p_Mem = (t_MemorySegment*)h_Mem;
566     uint32_t        num, i;
567 
568     /* Check MEM leaks */
569     MEM_CheckLeaks(h_Mem);
570 
571     if (p_Mem)
572     {
573         num = p_Mem->consecutiveMem ? 1 : p_Mem->num;
574 
575         if (p_Mem->allocOwner == e_MEM_ALLOC_OWNER_LOCAL_SMART)
576         {
577             for (i=0; i < num; i++)
578             {
579                 if (p_Mem->p_Bases[i])
580                 {
581                     XX_FreeSmart(p_Mem->p_Bases[i]);
582                 }
583             }
584         }
585         else if (p_Mem->allocOwner == e_MEM_ALLOC_OWNER_LOCAL)
586         {
587             for (i=0; i < num; i++)
588             {
589                 if (p_Mem->p_Bases[i])
590                 {
591                     XX_Free(p_Mem->p_Bases[i]);
592                 }
593             }
594         }
595 
596         if (p_Mem->h_Spinlock)
597             XX_FreeSpinlock(p_Mem->h_Spinlock);
598 
599         if (p_Mem->p_Bases)
600             XX_Free(p_Mem->p_Bases);
601 
602         if (p_Mem->p_BlocksStack)
603             XX_Free(p_Mem->p_BlocksStack);
604 
605 #ifdef DEBUG_MEM_LEAKS
606         if (p_Mem->p_MemDbg)
607             XX_Free(p_Mem->p_MemDbg);
608 #endif /* DEBUG_MEM_LEAKS */
609 
610        XX_Free(p_Mem);
611     }
612 }
613 
614 
615 /*****************************************************************************/
616 void * MEM_Get(t_Handle h_Mem)
617 {
618     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
619     uint8_t         *p_Block;
620     uint32_t        intFlags;
621 #ifdef DEBUG_MEM_LEAKS
622     uintptr_t       callerAddr = 0;
623 
624     GET_CALLER_ADDR;
625 #endif /* DEBUG_MEM_LEAKS */
626 
627     ASSERT_COND(h_Mem);
628 
629     intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock);
630     /* check if there is an available block */
631     if ((p_Block = (uint8_t *)MemGet(p_Mem)) == NULL)
632     {
633         XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
634         return NULL;
635     }
636 
637 #ifdef DEBUG_MEM_LEAKS
638     DebugMemGet(p_Mem, p_Block, callerAddr);
639 #endif /* DEBUG_MEM_LEAKS */
640     XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
641 
642     return (void *)p_Block;
643 }
644 
645 
646 /*****************************************************************************/
647 uint16_t MEM_GetN(t_Handle h_Mem, uint32_t num, void *array[])
648 {
649     t_MemorySegment     *p_Mem = (t_MemorySegment *)h_Mem;
650     uint32_t            availableBlocks;
651     register uint32_t   i;
652     uint32_t            intFlags;
653 #ifdef DEBUG_MEM_LEAKS
654     uintptr_t           callerAddr = 0;
655 
656     GET_CALLER_ADDR;
657 #endif /* DEBUG_MEM_LEAKS */
658 
659     ASSERT_COND(h_Mem);
660 
661     intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock);
662     /* check how many blocks are available */
663     availableBlocks = (uint32_t)(p_Mem->num - p_Mem->current);
664     if (num > availableBlocks)
665     {
666         num = availableBlocks;
667     }
668 
669     for (i=0; i < num; i++)
670     {
671         /* get pointer to block */
672         if ((array[i] = MemGet(p_Mem)) == NULL)
673         {
674             break;
675         }
676 
677 #ifdef DEBUG_MEM_LEAKS
678         DebugMemGet(p_Mem, array[i], callerAddr);
679 #endif /* DEBUG_MEM_LEAKS */
680     }
681     XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
682 
683     return (uint16_t)i;
684 }
685 
686 
687 /*****************************************************************************/
688 t_Error MEM_Put(t_Handle h_Mem, void *p_Block)
689 {
690     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
691     t_Error         rc;
692     uint32_t        intFlags;
693 
694     ASSERT_COND(h_Mem);
695 
696     intFlags = XX_LockIntrSpinlock(p_Mem->h_Spinlock);
697     /* check if blocks stack is full */
698     if ((rc = MemPut(p_Mem, p_Block)) != E_OK)
699     {
700         XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
701         RETURN_ERROR(MAJOR, rc, NO_MSG);
702     }
703 
704 #ifdef DEBUG_MEM_LEAKS
705     DebugMemPut(p_Mem, p_Block);
706 #endif /* DEBUG_MEM_LEAKS */
707     XX_UnlockIntrSpinlock(p_Mem->h_Spinlock, intFlags);
708 
709     return E_OK;
710 }
711 
712 
713 #ifdef DEBUG_MEM_LEAKS
714 
715 /*****************************************************************************/
716 void MEM_CheckLeaks(t_Handle h_Mem)
717 {
718     t_MemorySegment *p_Mem = (t_MemorySegment *)h_Mem;
719     t_MemDbg        *p_MemDbg = (t_MemDbg *)p_Mem->p_MemDbg;
720     uint8_t         *p_Block;
721     int             i;
722 
723     ASSERT_COND(h_Mem);
724 
725     if (p_Mem->consecutiveMem)
726     {
727         for (i=0; i < p_Mem->num; i++)
728         {
729             if (p_MemDbg[i].ownerAddress != ILLEGAL_BASE)
730             {
731                 /* Find the block address */
732                 p_Block = ((p_Mem->p_Bases[0] + p_Mem->blockOffset) +
733                            (i * p_Mem->blockSize));
734 
735                 XX_Print("MEM leak: 0x%08x, Caller address: 0x%08x\n",
736                          p_Block, p_MemDbg[i].ownerAddress);
737             }
738         }
739     }
740     else
741     {
742         for (i=0; i < p_Mem->num; i++)
743         {
744             if (p_MemDbg[i].ownerAddress != ILLEGAL_BASE)
745             {
746                 /* Find the block address */
747                 p_Block = p_Mem->p_Bases[i];
748 
749                 ALIGN_BLOCK(p_Block, p_Mem->prefixSize, p_Mem->alignment);
750 
751                 if (p_Block == p_Mem->p_Bases[i])
752                     p_Block += p_Mem->alignment;
753 
754                 XX_Print("MEM leak: 0x%08x, Caller address: 0x%08x\n",
755                          p_Block, p_MemDbg[i].ownerAddress);
756             }
757         }
758     }
759 }
760 
761 #endif /* DEBUG_MEM_LEAKS */
762 
763 
764