summaryrefslogtreecommitdiff
path: root/toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
diff options
context:
space:
mode:
authorPeter Korsgaard <jacmet@sunsite.dk>2010-02-22 22:05:41 +0100
committerPeter Korsgaard <jacmet@sunsite.dk>2010-02-22 22:05:41 +0100
commit43ffd946ad569f3a1d0662de221cd4cb65bdc8e6 (patch)
tree51a058ab53af3fd5c3d6cbe57c1f5f3cbd9b3015 /toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
parentc8ff60c8283b0ad116fcf43f485f199c29432c9d (diff)
toolchain/uClibc: additional 0.9.30.2 patches
Everything on the 0_9_30 branch since the release (0.9.30.3 to be) Signed-off-by: Peter Korsgaard <jacmet@sunsite.dk>
Diffstat (limited to 'toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch')
-rw-r--r--toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch89
1 files changed, 89 insertions, 0 deletions
diff --git a/toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch b/toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
new file mode 100644
index 000000000..aa9c2f42f
--- /dev/null
+++ b/toolchain/uClibc/uClibc-0.9.30.2-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
@@ -0,0 +1,89 @@
+From fa476d01f1c1990a92ee49d1f1c557b83805d0e9 Mon Sep 17 00:00:00 2001
+From: Freeman Wang <xwang@ubicom.com>
+Date: Sat, 19 Dec 2009 13:43:00 -0800
+Subject: [PATCH 09/15] malloc: fix race condition and other bugs in the no-mmu malloc
+
+Fixes multiple race conditions on mmb list. This was done by
+making the mmb_heap_lock into a recursive lock and making the
+regular heap_lock extend to cover the mmb heap handling.
+
+Also move the new_mmb allocation up to before the mmb list is
+iterated through to find the insertion point. When the mmb_heap
+also runs out and needs to be extended when the regular heap is
+just extended, the mmb list could be messed up.
+
+Signed-off-by: Freeman Wang <xwang@ubicom.com>
+Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
+---
+ libc/stdlib/malloc/free.c | 6 +++---
+ libc/stdlib/malloc/malloc.c | 7 ++++---
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
+index 90e18f4..741248a 100644
+--- a/libc/stdlib/malloc/free.c
++++ b/libc/stdlib/malloc/free.c
+@@ -179,14 +179,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap
+ /* Start searching again from the end of this block. */
+ start = mmb_end;
+
++ /* Release the descriptor block we used. */
++ free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
++
+ /* We have to unlock the heap before we recurse to free the mmb
+ descriptor, because we might be unmapping from the mmb
+ heap. */
+ __heap_unlock (heap_lock);
+
+- /* Release the descriptor block we used. */
+- free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
+-
+ /* Do the actual munmap. */
+ munmap ((void *)mmb_start, mmb_end - mmb_start);
+
+diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
+index 71f9e58..84a6acd 100644
+--- a/libc/stdlib/malloc/malloc.c
++++ b/libc/stdlib/malloc/malloc.c
+@@ -48,7 +48,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
+ HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
+ struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
+ #ifdef HEAP_USE_LOCKING
+-pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
++pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+ #endif
+ #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+
+@@ -151,19 +151,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
+ /* Try again to allocate. */
+ mem = __heap_alloc (heap, &size);
+
+- __heap_unlock (heap_lock);
+
+ #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
+ /* Insert a record of BLOCK in sorted order into the
+ __malloc_mmapped_blocks list. */
+
++ new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
++
+ for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
+ mmb;
+ prev_mmb = mmb, mmb = mmb->next)
+ if (block < mmb->mem)
+ break;
+
+- new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
+ new_mmb->next = mmb;
+ new_mmb->mem = block;
+ new_mmb->size = block_size;
+@@ -177,6 +177,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
+ (unsigned)new_mmb,
+ (unsigned)new_mmb->mem, block_size);
+ #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
++ __heap_unlock (heap_lock);
+ }
+ }
+
+--
+1.6.6.1
+