source: trunk/libs/newlib/src/newlib/libc/stdlib/nano-mallocr.c @ 471

Last change on this file since 471 was 444, checked in by satin@…, 6 years ago

add newlib,libalmos-mkh, restructure shared_syscalls.h and mini-libc

File size: 19.1 KB
Line 
1/*
2 * Copyright (c) 2012, 2013 ARM Ltd
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 *    products derived from this software without specific prior written
15 *    permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/* Implementation of <<malloc>> <<free>> <<calloc>> <<realloc>>, optional
30 * as to be reenterable.
31 *
32 * Interface documentation refer to malloc.c.
33 */
34
35#include <stdio.h>
36#include <string.h>
37#include <errno.h>
38
39#if DEBUG
40#include <assert.h>
41#else
42#define assert(x) ((void)0)
43#endif
44
45#ifndef MAX
46#define MAX(a,b) ((a) >= (b) ? (a) : (b))
47#endif
48
49#define _SBRK_R(X) _sbrk_r(X)
50
51#ifdef INTERNAL_NEWLIB
52
53#include <sys/config.h>
54#include <reent.h>
55
56#define RARG struct _reent *reent_ptr,
57#define RONEARG struct _reent *reent_ptr
58#define RCALL reent_ptr,
59#define RONECALL reent_ptr
60
61#define MALLOC_LOCK __malloc_lock(reent_ptr)
62#define MALLOC_UNLOCK __malloc_unlock(reent_ptr)
63
64#define RERRNO reent_ptr->_errno
65
66#define nano_malloc             _malloc_r
67#define nano_free               _free_r
68#define nano_realloc            _realloc_r
69#define nano_memalign           _memalign_r
70#define nano_valloc             _valloc_r
71#define nano_pvalloc            _pvalloc_r
72#define nano_calloc             _calloc_r
73#define nano_cfree              _cfree_r
74#define nano_malloc_usable_size _malloc_usable_size_r
75#define nano_malloc_stats       _malloc_stats_r
76#define nano_mallinfo           _mallinfo_r
77#define nano_mallopt            _mallopt_r
78
79#else /* ! INTERNAL_NEWLIB */
80
81#define RARG
82#define RONEARG
83#define RCALL
84#define RONECALL
85#define MALLOC_LOCK
86#define MALLOC_UNLOCK
87#define RERRNO errno
88
89#define nano_malloc             malloc
90#define nano_free               free
91#define nano_realloc            realloc
92#define nano_memalign           memalign
93#define nano_valloc             valloc
94#define nano_pvalloc            pvalloc
95#define nano_calloc             calloc
96#define nano_cfree              cfree
97#define nano_malloc_usable_size malloc_usable_size
98#define nano_malloc_stats       malloc_stats
99#define nano_mallinfo           mallinfo
100#define nano_mallopt            mallopt
101#endif /* ! INTERNAL_NEWLIB */
102
103/* Redefine names to avoid conflict with user names */
104#define free_list __malloc_free_list
105#define sbrk_start __malloc_sbrk_start
106#define current_mallinfo __malloc_current_mallinfo
107
108#define ALIGN_TO(size, align) \
109    (((size) + (align) -1L) & ~((align) -1L))
110
111/* Alignment of allocated block */
112#define MALLOC_ALIGN (8U)
113#define CHUNK_ALIGN (sizeof(void*))
114#define MALLOC_PADDING ((MAX(MALLOC_ALIGN, CHUNK_ALIGN)) - CHUNK_ALIGN)
115
116/* as well as the minimal allocation size
117 * to hold a free pointer */
118#define MALLOC_MINSIZE (sizeof(void *))
119#define MALLOC_PAGE_ALIGN (0x1000)
120#define MAX_ALLOC_SIZE (0x80000000U)
121
122typedef size_t malloc_size_t;
123
124typedef struct malloc_chunk
125{
126    /*          --------------------------------------
127     *   chunk->| size                               |
128     *          --------------------------------------
129     *          | Padding for alignment              |
130     *          | This includes padding inserted by  |
131     *          | the compiler (to align fields) and |
132     *          | explicit padding inserted by this  |
133     *          | implementation. If any explicit    |
134     *          | padding is being used then the     |
135     *          | sizeof (size) bytes at             |
136     *          | mem_ptr - CHUNK_OFFSET must be     |
137     *          | initialized with the negative      |
138     *          | offset to size.                    |
139     *          --------------------------------------
140     * mem_ptr->| When allocated: data               |
141     *          | When freed: pointer to next free   |
142     *          | chunk                              |
143     *          --------------------------------------
144     */
145    /* size of the allocated payload area, including size before
146       CHUNK_OFFSET */
147    long size;
148
149    /* since here, the memory is either the next free block, or data load */
150    struct malloc_chunk * next;
151}chunk;
152
153/* Copied from malloc.h */
154struct mallinfo
155{
156  size_t arena;    /* total space allocated from system */
157  size_t ordblks;  /* number of non-inuse chunks */
158  size_t smblks;   /* unused -- always zero */
159  size_t hblks;    /* number of mmapped regions */
160  size_t hblkhd;   /* total space in mmapped regions */
161  size_t usmblks;  /* unused -- always zero */
162  size_t fsmblks;  /* unused -- always zero */
163  size_t uordblks; /* total allocated space */
164  size_t fordblks; /* total non-inuse space */
165  size_t keepcost; /* top-most, releasable (via malloc_trim) space */
166};
167
168#define CHUNK_OFFSET ((malloc_size_t)(&(((struct malloc_chunk *)0)->next)))
169
170/* size of smallest possible chunk. A memory piece smaller than this size
171 * won't be able to create a chunk */
172#define MALLOC_MINCHUNK (CHUNK_OFFSET + MALLOC_PADDING + MALLOC_MINSIZE)
173
174/* Forward data declarations */
175extern chunk * free_list;
176extern char * sbrk_start;
177extern struct mallinfo current_mallinfo;
178
179/* Forward function declarations */
180extern void * nano_malloc(RARG malloc_size_t);
181extern void nano_free (RARG void * free_p);
182extern void nano_cfree(RARG void * ptr);
183extern void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem);
184extern struct mallinfo nano_mallinfo(RONEARG);
185extern void nano_malloc_stats(RONEARG);
186extern malloc_size_t nano_malloc_usable_size(RARG void * ptr);
187extern void * nano_realloc(RARG void * ptr, malloc_size_t size);
188extern void * nano_memalign(RARG size_t align, size_t s);
189extern int nano_mallopt(RARG int parameter_number, int parameter_value);
190extern void * nano_valloc(RARG size_t s);
191extern void * nano_pvalloc(RARG size_t s);
192
193static inline chunk * get_chunk_from_ptr(void * ptr)
194{
195    /* Assume that there is no explicit padding in the
196       chunk, and that the chunk starts at ptr - CHUNK_OFFSET.  */
197    chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
198
199    /* c->size being negative indicates that there is explicit padding in
200       the chunk. In which case, c->size is currently the negative offset to
201       the true size.  */
202    if (c->size < 0) c = (chunk *)((char *)c + c->size);
203    return c;
204}
205
206#ifdef DEFINE_MALLOC
207/* List list header of free blocks */
208chunk * free_list = NULL;
209
210/* Starting point of memory allocated from system */
211char * sbrk_start = NULL;
212
213/** Function sbrk_aligned
214  * Algorithm:
215  *   Use sbrk() to obtain more memory and ensure it is CHUNK_ALIGN aligned
216  *   Optimise for the case that it is already aligned - only ask for extra
217  *   padding after we know we need it
218  */
219static void* sbrk_aligned(RARG malloc_size_t s)
220{
221    char *p, *align_p;
222
223    if (sbrk_start == NULL) sbrk_start = _SBRK_R(RCALL 0);
224
225    p = _SBRK_R(RCALL s);
226
227    /* sbrk returns -1 if fail to allocate */
228    if (p == (void *)-1)
229        return p;
230
231    align_p = (char*)ALIGN_TO((unsigned long)p, CHUNK_ALIGN);
232    if (align_p != p)
233    {
234        /* p is not aligned, ask for a few more bytes so that we have s
235         * bytes reserved from align_p. */
236        p = _SBRK_R(RCALL align_p - p);
237        if (p == (void *)-1)
238            return p;
239    }
240    return align_p;
241}
242
243/** Function nano_malloc
244  * Algorithm:
245  *   Walk through the free list to find the first match. If fails to find
246  *   one, call sbrk to allocate a new chunk.
247  */
248void * nano_malloc(RARG malloc_size_t s)
249{
250    chunk *p, *r;
251    char * ptr, * align_ptr;
252    int offset;
253
254    malloc_size_t alloc_size;
255
256    alloc_size = ALIGN_TO(s, CHUNK_ALIGN); /* size of aligned data load */
257    alloc_size += MALLOC_PADDING; /* padding */
258    alloc_size += CHUNK_OFFSET; /* size of chunk head */
259    alloc_size = MAX(alloc_size, MALLOC_MINCHUNK);
260
261    if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s)
262    {
263        RERRNO = ENOMEM;
264        return NULL;
265    }
266
267    MALLOC_LOCK;
268
269    p = free_list;
270    r = p;
271
272    while (r)
273    {
274        int rem = r->size - alloc_size;
275        if (rem >= 0)
276        {
277            if (rem >= MALLOC_MINCHUNK)
278            {
279                /* Find a chunk that much larger than required size, break
280                * it into two chunks and return the second one */
281                r->size = rem;
282                r = (chunk *)((char *)r + rem);
283                r->size = alloc_size;
284            }
285            /* Find a chunk that is exactly the size or slightly bigger
286             * than requested size, just return this chunk */
287            else if (p == r)
288            {
289                /* Now it implies p==r==free_list. Move the free_list
290                 * to next chunk */
291                free_list = r->next;
292            }
293            else
294            {
295                /* Normal case. Remove it from free_list */
296                p->next = r->next;
297            }
298            break;
299        }
300        p=r;
301        r=r->next;
302    }
303
304    /* Failed to find a appropriate chunk. Ask for more memory */
305    if (r == NULL)
306    {
307        r = sbrk_aligned(RCALL alloc_size);
308
309        /* sbrk returns -1 if fail to allocate */
310        if (r == (void *)-1)
311        {
312            RERRNO = ENOMEM;
313            MALLOC_UNLOCK;
314            return NULL;
315        }
316        r->size = alloc_size;
317    }
318    MALLOC_UNLOCK;
319
320    ptr = (char *)r + CHUNK_OFFSET;
321
322    align_ptr = (char *)ALIGN_TO((unsigned long)ptr, MALLOC_ALIGN);
323    offset = align_ptr - ptr;
324
325    if (offset)
326    {
327        /* Initialize sizeof (malloc_chunk.size) bytes at
328           align_ptr - CHUNK_OFFSET with negative offset to the
329           size field (at the start of the chunk).
330
331           The negative offset to size from align_ptr - CHUNK_OFFSET is
332           the size of any remaining padding minus CHUNK_OFFSET.  This is
333           equivalent to the total size of the padding, because the size of
334           any remaining padding is the total size of the padding minus
335           CHUNK_OFFSET.
336
337           Note that the size of the padding must be at least CHUNK_OFFSET.
338
339           The rest of the padding is not initialized.  */
340        *(long *)((char *)r + offset) = -offset;
341    }
342
343    assert(align_ptr + size <= (char *)r + alloc_size);
344    return align_ptr;
345}
346#endif /* DEFINE_MALLOC */
347
348#ifdef DEFINE_FREE
349#define MALLOC_CHECK_DOUBLE_FREE
350
351/** Function nano_free
352  * Implementation of libc free.
353  * Algorithm:
354  *  Maintain a global free chunk single link list, headed by global
355  *  variable free_list.
356  *  When free, insert the to-be-freed chunk into free list. The place to
357  *  insert should make sure all chunks are sorted by address from low to
358  *  high.  Then merge with neighbor chunks if adjacent.
359  */
360void nano_free (RARG void * free_p)
361{
362    chunk * p_to_free;
363    chunk * p, * q;
364
365    if (free_p == NULL) return;
366
367    p_to_free = get_chunk_from_ptr(free_p);
368
369    MALLOC_LOCK;
370    if (free_list == NULL)
371    {
372        /* Set first free list element */
373        p_to_free->next = free_list;
374        free_list = p_to_free;
375        MALLOC_UNLOCK;
376        return;
377    }
378
379    if (p_to_free < free_list)
380    {
381        if ((char *)p_to_free + p_to_free->size == (char *)free_list)
382        {
383            /* Chunk to free is just before the first element of
384             * free list  */
385            p_to_free->size += free_list->size;
386            p_to_free->next = free_list->next;
387        }
388        else
389        {
390            /* Insert before current free_list */
391            p_to_free->next = free_list;
392        }
393        free_list = p_to_free;
394        MALLOC_UNLOCK;
395        return;
396    }
397
398    q = free_list;
399    /* Walk through the free list to find the place for insert. */
400    do
401    {
402        p = q;
403        q = q->next;
404    } while (q && q <= p_to_free);
405
406    /* Now p <= p_to_free and either q == NULL or q > p_to_free
407     * Try to merge with chunks immediately before/after it. */
408
409    if ((char *)p + p->size == (char *)p_to_free)
410    {
411        /* Chunk to be freed is adjacent
412         * to a free chunk before it */
413        p->size += p_to_free->size;
414        /* If the merged chunk is also adjacent
415         * to the chunk after it, merge again */
416        if ((char *)p + p->size == (char *) q)
417        {
418            p->size += q->size;
419            p->next = q->next;
420        }
421    }
422#ifdef MALLOC_CHECK_DOUBLE_FREE
423    else if ((char *)p + p->size > (char *)p_to_free)
424    {
425        /* Report double free fault */
426        RERRNO = ENOMEM;
427        MALLOC_UNLOCK;
428        return;
429    }
430#endif
431    else if ((char *)p_to_free + p_to_free->size == (char *) q)
432    {
433        /* Chunk to be freed is adjacent
434         * to a free chunk after it */
435        p_to_free->size += q->size;
436        p_to_free->next = q->next;
437        p->next = p_to_free;
438    }
439    else
440    {
441        /* Not adjacent to any chunk. Just insert it. Resulting
442         * a fragment. */
443        p_to_free->next = q;
444        p->next = p_to_free;
445    }
446    MALLOC_UNLOCK;
447}
448#endif /* DEFINE_FREE */
449
450#ifdef DEFINE_CFREE
451void nano_cfree(RARG void * ptr)
452{
453    nano_free(RCALL ptr);
454}
455#endif /* DEFINE_CFREE */
456
457#ifdef DEFINE_CALLOC
458/* Function nano_calloc
459 * Implement calloc simply by calling malloc and set zero */
460void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem)
461{
462    void * mem = nano_malloc(RCALL n * elem);
463    if (mem != NULL) memset(mem, 0, n * elem);
464    return mem;
465}
466#endif /* DEFINE_CALLOC */
467
468#ifdef DEFINE_REALLOC
469/* Function nano_realloc
470 * Implement realloc by malloc + memcpy */
471void * nano_realloc(RARG void * ptr, malloc_size_t size)
472{
473    void * mem;
474    chunk * p_to_realloc;
475
476    if (ptr == NULL) return nano_malloc(RCALL size);
477
478    if (size == 0)
479    {
480        nano_free(RCALL ptr);
481        return NULL;
482    }
483
484    /* TODO: There is chance to shrink the chunk if newly requested
485     * size is much small */
486    if (nano_malloc_usable_size(RCALL ptr) >= size)
487      return ptr;
488
489    mem = nano_malloc(RCALL size);
490    if (mem != NULL)
491    {
492        memcpy(mem, ptr, size);
493        nano_free(RCALL ptr);
494    }
495    return mem;
496}
497#endif /* DEFINE_REALLOC */
498
499#ifdef DEFINE_MALLINFO
500struct mallinfo current_mallinfo={0,0,0,0,0,0,0,0,0,0};
501
502struct mallinfo nano_mallinfo(RONEARG)
503{
504    char * sbrk_now;
505    chunk * pf;
506    size_t free_size = 0;
507    size_t total_size;
508
509    MALLOC_LOCK;
510
511    if (sbrk_start == NULL) total_size = 0;
512    else {
513        sbrk_now = _SBRK_R(RCALL 0);
514
515        if (sbrk_now == (void *)-1)
516            total_size = (size_t)-1;
517        else
518            total_size = (size_t) (sbrk_now - sbrk_start);
519    }
520
521    for (pf = free_list; pf; pf = pf->next)
522        free_size += pf->size;
523
524    current_mallinfo.arena = total_size;
525    current_mallinfo.fordblks = free_size;
526    current_mallinfo.uordblks = total_size - free_size;
527
528    MALLOC_UNLOCK;
529    return current_mallinfo;
530}
531#endif /* DEFINE_MALLINFO */
532
533#ifdef DEFINE_MALLOC_STATS
534void nano_malloc_stats(RONEARG)
535{
536    nano_mallinfo(RONECALL);
537    fiprintf(stderr, "max system bytes = %10u\n",
538             current_mallinfo.arena);
539    fiprintf(stderr, "system bytes     = %10u\n",
540             current_mallinfo.arena);
541    fiprintf(stderr, "in use bytes     = %10u\n",
542             current_mallinfo.uordblks);
543}
544#endif /* DEFINE_MALLOC_STATS */
545
546#ifdef DEFINE_MALLOC_USABLE_SIZE
547malloc_size_t nano_malloc_usable_size(RARG void * ptr)
548{
549    chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
550    int size_or_offset = c->size;
551
552    if (size_or_offset < 0)
553    {
554        /* Padding is used. Excluding the padding size */
555        c = (chunk *)((char *)c + c->size);
556        return c->size - CHUNK_OFFSET + size_or_offset;
557    }
558    return c->size - CHUNK_OFFSET;
559}
560#endif /* DEFINE_MALLOC_USABLE_SIZE */
561
562#ifdef DEFINE_MEMALIGN
563/* Function nano_memalign
564 * Allocate memory block aligned at specific boundary.
565 *   align: required alignment. Must be power of 2. Return NULL
566 *          if not power of 2. Undefined behavior is bigger than
567 *          pointer value range.
568 *   s: required size.
569 * Return: allocated memory pointer aligned to align
570 * Algorithm: Malloc a big enough block, padding pointer to aligned
571 *            address, then truncate and free the tail if too big.
572 *            Record the offset of align pointer and original pointer
573 *            in the padding area.
574 */
575void * nano_memalign(RARG size_t align, size_t s)
576{
577    chunk * chunk_p;
578    malloc_size_t size_allocated, offset, ma_size, size_with_padding;
579    char * allocated, * aligned_p;
580
581    /* Return NULL if align isn't power of 2 */
582    if ((align & (align-1)) != 0) return NULL;
583
584    align = MAX(align, MALLOC_ALIGN);
585    ma_size = ALIGN_TO(MAX(s, MALLOC_MINSIZE), CHUNK_ALIGN);
586    size_with_padding = ma_size + align - MALLOC_ALIGN;
587
588    allocated = nano_malloc(RCALL size_with_padding);
589    if (allocated == NULL) return NULL;
590
591    chunk_p = get_chunk_from_ptr(allocated);
592    aligned_p = (char *)ALIGN_TO(
593                  (unsigned long)((char *)chunk_p + CHUNK_OFFSET),
594                  (unsigned long)align);
595    offset = aligned_p - ((char *)chunk_p + CHUNK_OFFSET);
596
597    if (offset)
598    {
599        if (offset >= MALLOC_MINCHUNK)
600        {
601            /* Padding is too large, free it */
602            chunk * front_chunk = chunk_p;
603            chunk_p = (chunk *)((char *)chunk_p + offset);
604            chunk_p->size = front_chunk->size - offset;
605            front_chunk->size = offset;
606            nano_free(RCALL (char *)front_chunk + CHUNK_OFFSET);
607        }
608        else
609        {
610            /* Padding is used. Need to set a jump offset for aligned pointer
611            * to get back to chunk head */
612            assert(offset >= sizeof(int));
613            *(long *)((char *)chunk_p + offset) = -offset;
614        }
615    }
616
617    size_allocated = chunk_p->size;
618    if ((char *)chunk_p + size_allocated >
619         (aligned_p + ma_size + MALLOC_MINCHUNK))
620    {
621        /* allocated much more than what's required for padding, free
622         * tail part */
623        chunk * tail_chunk = (chunk *)(aligned_p + ma_size);
624        chunk_p->size = aligned_p + ma_size - (char *)chunk_p;
625        tail_chunk->size = size_allocated - chunk_p->size;
626        nano_free(RCALL (char *)tail_chunk + CHUNK_OFFSET);
627    }
628    return aligned_p;
629}
630#endif /* DEFINE_MEMALIGN */
631
632#ifdef DEFINE_MALLOPT
633int nano_mallopt(RARG int parameter_number, int parameter_value)
634{
635    return 0;
636}
637#endif /* DEFINE_MALLOPT */
638
639#ifdef DEFINE_VALLOC
640void * nano_valloc(RARG size_t s)
641{
642    return nano_memalign(RCALL MALLOC_PAGE_ALIGN, s);
643}
644#endif /* DEFINE_VALLOC */
645
646#ifdef DEFINE_PVALLOC
647void * nano_pvalloc(RARG size_t s)
648{
649    return nano_valloc(RCALL ALIGN_TO(s, MALLOC_PAGE_ALIGN));
650}
651#endif /* DEFINE_PVALLOC */
Note: See TracBrowser for help on using the repository browser.