summaryrefslogtreecommitdiff
path: root/toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch
diff options
context:
space:
mode:
authorPeter Korsgaard <jacmet@sunsite.dk>2010-03-12 23:14:59 +0100
committerPeter Korsgaard <jacmet@sunsite.dk>2010-03-12 23:14:59 +0100
commitde859f6b2ac8660113e15aff6bc394a441dda60e (patch)
tree2087a5bb8b6dd6e9964f676261efd20a63b593ae /toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch
parent12ebf5714c58cecdb19dbd9266ace5e9380562b0 (diff)
toolchain: Add uClibc 0.9.30.3, remove older 0.9.30.x
As the 0.9.30.x stable releases only contain bugfixes, there isn't much sense in using the older 0.9.30.x releases instead of .3, so use a single 0.9.30.x config similar to how we do it for the kernel headers. Signed-off-by: Peter Korsgaard <jacmet@sunsite.dk>
Diffstat (limited to 'toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch')
-rw-r--r--toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch165
1 files changed, 165 insertions, 0 deletions
diff --git a/toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch b/toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch
new file mode 100644
index 000000000..fd2037111
--- /dev/null
+++ b/toolchain/uClibc/uClibc-0.9.30.3-xtensa-013-atomic.h.patch
@@ -0,0 +1,165 @@
+--- /dev/null 2008-09-18 06:50:54.356228028 -0700
++++ uClibc-0.9.29/libc/sysdeps/linux/xtensa/bits/atomic.h 2008-10-04 11:40:21.000000000 -0700
+@@ -0,0 +1,162 @@
++/*
++ * Copyright (C) 2008 Tensilica, Inc.
++ * Contributed by Joe Taylor <joe@tensilica.com> 2008
++ *
++ * This file is subject to the terms and conditions of the GNU Lesser General
++ * Public License. See the file "COPYING.LIB" in the main directory of this
++ * archive for more details.
++ */
++
++#ifndef _XTENSA_BITS_ATOMIC_H
++#define _XTENSA_BITS_ATOMIC_H 1
++
++/* Xtensa has only a 32-bit form of a store-conditional instruction,
++ so just stub out the rest. */
++
++/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
++ Return the old *MEM value. */
++
++#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
++ ({__typeof__(*(mem)) __tmp, __value; \
++ __asm__ __volatile__( \
++ "1: l32i %1, %2, 0 \n" \
++ " bne %1, %4, 2f \n" \
++ " wsr %1, SCOMPARE1 \n" \
++ " mov %0, %1 \n" \
++ " mov %1, %3 \n" \
++ " s32c1i %1, %2, 0 \n" \
++ " bne %0, %1, 1b \n" \
++ "2: \n" \
++ : "=&a" (__value), "=&a" (__tmp) \
++ : "a" (mem), "a" (newval), "a" (oldval) \
++ : "memory" ); \
++ __tmp; \
++ })
++
++/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
++ Return zero if *MEM was changed or non-zero if no exchange happened. */
++
++#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
++ ({__typeof__(*(mem)) __tmp, __value; \
++ __asm__ __volatile__( \
++ "1: l32i %0, %2, 0 \n" \
++ " sub %1, %4, %0 \n" \
++ " bnez %1, 2f \n" \
++ " wsr %0, SCOMPARE1 \n" \
++ " mov %1, %3 \n" \
++ " s32c1i %1, %2, 0 \n" \
++ " bne %0, %1, 1b \n" \
++ " movi %1, 0 \n" \
++ "2: \n" \
++ : "=&a" (__value), "=&a" (__tmp) \
++ : "a" (mem), "a" (newval), "a" (oldval) \
++ : "memory" ); \
++ __tmp != 0; \
++ })
++
++/* Store NEWVALUE in *MEM and return the old value. */
++
++#define __arch_exchange_32_acq(mem, newval) \
++ ({__typeof__(*(mem)) __tmp, __value; \
++ __asm__ __volatile__( \
++ "1: l32i %0, %2, 0 \n" \
++ " wsr %0, SCOMPARE1 \n" \
++ " mov %1, %3 \n" \
++ " s32c1i %1, %2, 0 \n" \
++ " bne %0, %1, 1b \n" \
++ : "=&a" (__value), "=&a" (__tmp) \
++ : "a" (mem), "a" (newval) \
++ : "memory" ); \
++ __tmp; \
++ })
++
++/* Add VALUE to *MEM and return the old value of *MEM. */
++
++#define __arch_atomic_exchange_and_add_32(mem, value) \
++ ({__typeof__(*(mem)) __tmp, __value; \
++ __asm__ __volatile__( \
++ "1: l32i %0, %2, 0 \n" \
++ " wsr %0, SCOMPARE1 \n" \
++ " add %1, %0, %3 \n" \
++ " s32c1i %1, %2, 0 \n" \
++ " bne %0, %1, 1b \n" \
++ : "=&a" (__value), "=&a" (__tmp) \
++ : "a" (mem), "a" (value) \
++ : "memory" ); \
++ __tmp; \
++ })
++
++/* Subtract VALUE from *MEM and return the old value of *MEM. */
++
++#define __arch_atomic_exchange_and_sub_32(mem, value) \
++ ({__typeof__(*(mem)) __tmp, __value; \
++ __asm__ __volatile__( \
++ "1: l32i %0, %2, 0 \n" \
++ " wsr %0, SCOMPARE1 \n" \
++ " sub %1, %0, %3 \n" \
++ " s32c1i %1, %2, 0 \n" \
++ " bne %0, %1, 1b \n" \
++ : "=&a" (__value), "=&a" (__tmp) \
++ : "a" (mem), "a" (value) \
++ : "memory" ); \
++ __tmp; \
++ })
++
++/* Decrement *MEM if it is > 0, and return the old value. */
++
++#define __arch_atomic_decrement_if_positive_32(mem) \
++ ({__typeof__(*(mem)) __tmp, __value; \
++ __asm__ __volatile__( \
++ "1: l32i %0, %2, 0 \n" \
++ " blti %0, 1, 2f \n" \
++ " wsr %0, SCOMPARE1 \n" \
++ " addi %1, %0, -1 \n" \
++ " s32c1i %1, %2, 0 \n" \
++ " bne %0, %1, 1b \n" \
++ "2: \n" \
++ : "=&a" (__value), "=&a" (__tmp) \
++ : "a" (mem) \
++ : "memory" ); \
++ __tmp; \
++ })
++
++
++/* These are the preferred public interfaces: */
++
++#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
++ ({ \
++ if (sizeof (*mem) != 4) \
++ abort(); \
++ __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
++ })
++
++#define atomic_exchange_acq(mem, newval) \
++ ({ \
++ if (sizeof(*(mem)) != 4) \
++ abort(); \
++ __arch_exchange_32_acq(mem, newval); \
++ })
++
++#define atomic_exchange_and_add(mem, newval) \
++ ({ \
++ if (sizeof(*(mem)) != 4) \
++ abort(); \
++ __arch_atomic_exchange_and_add_32(mem, newval); \
++ })
++
++#define atomic_exchange_and_sub(mem, newval) \
++ ({ \
++ if (sizeof(*(mem)) != 4) \
++ abort(); \
++ __arch_atomic_exchange_and_sub_32(mem, newval); \
++ })
++
++#define atomic_decrement_if_positive(mem) \
++ ({ \
++ if (sizeof(*(mem)) != 4) \
++ abort(); \
++ __arch_atomic_decrement_if_positive_32(mem); \
++ })
++
++#endif /* _XTENSA_BITS_ATOMIC_H */
++