summaryrefslogtreecommitdiff
path: root/src/gallium/drivers
diff options
context:
space:
mode:
authorQicheng Christopher Li <chrisl@vmware.com>2010-05-24 13:44:13 +0100
committerJosé Fonseca <jfonseca@vmware.com>2010-05-24 13:45:26 +0100
commit80ee3a440cd3c0403004cf35e0638fc52088b9ff (patch)
tree34d5a11f2b6a89bdf13398de1fbcbcde53311c91 /src/gallium/drivers
parent3c929e55449410f97c7d9213d09aa88ef02c888c (diff)
llvmpipe: Unit test for sin/cos that compares against reference implementation.
Signed-off-by: José Fonseca <jfonseca@vmware.com>
Diffstat (limited to 'src/gallium/drivers')
-rw-r--r--src/gallium/drivers/llvmpipe/Makefile6
-rw-r--r--src/gallium/drivers/llvmpipe/SConscript2
-rw-r--r--src/gallium/drivers/llvmpipe/lp_test_sincos.c204
-rw-r--r--src/gallium/drivers/llvmpipe/sse_mathfun.h773
4 files changed, 984 insertions, 1 deletions
diff --git a/src/gallium/drivers/llvmpipe/Makefile b/src/gallium/drivers/llvmpipe/Makefile
index 526e85c82e..213e4c4a99 100644
--- a/src/gallium/drivers/llvmpipe/Makefile
+++ b/src/gallium/drivers/llvmpipe/Makefile
@@ -50,8 +50,12 @@ CPP_SOURCES = \
PROGS := lp_test_format \
lp_test_blend \
lp_test_conv \
- lp_test_printf
+ lp_test_printf \
+ lp_test_sincos
+lp_test_sincos.o : sse_mathfun.h
+
+PROGS_DEPS := ../../auxiliary/libgallium.a
include ../../Makefile.template
diff --git a/src/gallium/drivers/llvmpipe/SConscript b/src/gallium/drivers/llvmpipe/SConscript
index c155558aa5..2c38dc42b0 100644
--- a/src/gallium/drivers/llvmpipe/SConscript
+++ b/src/gallium/drivers/llvmpipe/SConscript
@@ -76,6 +76,8 @@ if env['platform'] != 'embedded':
'format',
'blend',
'conv',
+ 'printf',
+ 'sincos',
]
for test in tests:
diff --git a/src/gallium/drivers/llvmpipe/lp_test_sincos.c b/src/gallium/drivers/llvmpipe/lp_test_sincos.c
new file mode 100644
index 0000000000..883e15fbb6
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_test_sincos.c
@@ -0,0 +1,204 @@
+/**************************************************************************
+ *
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "gallivm/lp_bld.h"
+#include "gallivm/lp_bld_printf.h"
+#include "gallivm/lp_bld_arit.h"
+
+#include <llvm-c/Analysis.h>
+#include <llvm-c/ExecutionEngine.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/Transforms/Scalar.h>
+
+#include "lp_test.h"
+
+
+struct sincos_test_case {
+};
+
+
+void
+write_tsv_header(FILE *fp)
+{
+ fprintf(fp,
+ "result\t"
+ "format\n");
+
+ fflush(fp);
+}
+
+
+#ifdef PIPE_ARCH_SSE
+
+#define USE_SSE2
+#include "sse_mathfun.h"
+
+typedef __m128 (*test_sincos_t)(__m128);
+
+static LLVMValueRef
+add_sincos_test(LLVMModuleRef module, bool sin)
+{
+ LLVMTypeRef v4sf = LLVMVectorType(LLVMFloatType(), 4);
+ LLVMTypeRef args[1] = { v4sf };
+ LLVMValueRef func = LLVMAddFunction(module, "sincos", LLVMFunctionType(v4sf, args, 1, 0));
+ LLVMValueRef arg1 = LLVMGetParam(func, 0);
+ LLVMBuilderRef builder = LLVMCreateBuilder();
+ LLVMBasicBlockRef block = LLVMAppendBasicBlock(func, "entry");
+ LLVMValueRef ret;
+ struct lp_build_context bld;
+
+ bld.builder = builder;
+ bld.type.floating = 1;
+ bld.type.width = 32;
+ bld.type.length = 4;
+
+ LLVMSetFunctionCallConv(func, LLVMCCallConv);
+
+ LLVMPositionBuilderAtEnd(builder, block);
+ ret = sin ? lp_build_sin(&bld, arg1) : lp_build_cos(&bld, arg1);
+ LLVMBuildRet(builder, ret);
+ LLVMDisposeBuilder(builder);
+ return func;
+}
+
+static void
+printv(char* string, v4sf value)
+{
+ v4sf v = value;
+ uint32_t *p = (uint32_t *) &v;
+ float *f = (float *)&v;
+ printf("%s: %f(%x) %f(%x) %f(%x) %f(%x)\n", string,
+ f[0], p[0], f[1], p[1], f[2], p[2], f[3], p[3]);
+}
+
+PIPE_ALIGN_STACK
+static boolean
+test_sincos(unsigned verbose, FILE *fp, const struct sincos_test_case *testcase)
+{
+ LLVMModuleRef module = NULL;
+ LLVMValueRef test_sin = NULL, test_cos = NULL;
+ LLVMExecutionEngineRef engine = NULL;
+ LLVMModuleProviderRef provider = NULL;
+ LLVMPassManagerRef pass = NULL;
+ char *error = NULL;
+ test_sincos_t sin_func;
+ test_sincos_t cos_func;
+ float unpacked[4];
+ unsigned packed;
+ boolean success = TRUE;
+
+ module = LLVMModuleCreateWithName("test");
+
+ test_sin = add_sincos_test(module, TRUE);
+ test_cos = add_sincos_test(module, FALSE);
+
+ if(LLVMVerifyModule(module, LLVMPrintMessageAction, &error)) {
+ printf("LLVMVerifyModule: %s\n", error);
+ LLVMDumpModule(module);
+ abort();
+ }
+ LLVMDisposeMessage(error);
+
+ provider = LLVMCreateModuleProviderForExistingModule(module);
+ if (LLVMCreateJITCompiler(&engine, provider, 1, &error)) {
+ fprintf(stderr, "%s\n", error);
+ LLVMDisposeMessage(error);
+ abort();
+ }
+
+#if 0
+ pass = LLVMCreatePassManager();
+ LLVMAddTargetData(LLVMGetExecutionEngineTargetData(engine), pass);
+ /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
+ * but there are more on SVN. */
+ LLVMAddConstantPropagationPass(pass);
+ LLVMAddInstructionCombiningPass(pass);
+ LLVMAddPromoteMemoryToRegisterPass(pass);
+ LLVMAddGVNPass(pass);
+ LLVMAddCFGSimplificationPass(pass);
+ LLVMRunPassManager(pass, module);
+#else
+ (void)pass;
+#endif
+
+ sin_func = (test_sincos_t)LLVMGetPointerToGlobal(engine, test_sin);
+ cos_func = (test_sincos_t)LLVMGetPointerToGlobal(engine, test_cos);
+
+ memset(unpacked, 0, sizeof unpacked);
+ packed = 0;
+
+
+ // LLVMDumpModule(module);
+ {
+ v4sf src = {3.14159/4.0, -3.14159/4.0, 1.0, -1.0};
+ printv("ref ",sin_ps(src));
+ printv("llvm", sin_func(src));
+ printv("ref ",cos_ps(src));
+ printv("llvm",cos_func(src));
+ }
+
+ LLVMFreeMachineCodeForFunction(engine, test_sin);
+ LLVMFreeMachineCodeForFunction(engine, test_cos);
+
+ LLVMDisposeExecutionEngine(engine);
+ if(pass)
+ LLVMDisposePassManager(pass);
+
+ return success;
+}
+
+#else /* !PIPE_ARCH_SSE */
+
+static boolean
+test_sincos(unsigned verbose, FILE *fp, const struct sincos_test_case *testcase)
+{
+ return TRUE;
+}
+
+#endif /* !PIPE_ARCH_SSE */
+
+
+boolean
+test_all(unsigned verbose, FILE *fp)
+{
+ bool success = TRUE;
+
+ test_sincos(verbose, fp, NULL);
+
+ return success;
+}
+
+
+boolean
+test_some(unsigned verbose, FILE *fp, unsigned long n)
+{
+ return test_all(verbose, fp);
+}
diff --git a/src/gallium/drivers/llvmpipe/sse_mathfun.h b/src/gallium/drivers/llvmpipe/sse_mathfun.h
new file mode 100644
index 0000000000..8ac2064b7b
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/sse_mathfun.h
@@ -0,0 +1,773 @@
+/* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
+
+ Inspired by Intel Approximate Math library, and based on the
+ corresponding algorithms of the cephes math library
+
+ The default is to use the SSE1 version. If you define USE_SSE2 the
+ the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
+ not expect any significant performance improvement with SSE2.
+*/
+
+/* Copyright (C) 2007 Julien Pommier
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ (this is the zlib license)
+*/
+
+#include <xmmintrin.h>
+
+/* yes I know, the top of this file is quite ugly */
+
+#ifdef _MSC_VER /* visual c++ */
+# define ALIGN16_BEG __declspec(align(16))
+# define ALIGN16_END
+#else /* gcc or icc */
+# define ALIGN16_BEG
+# define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+/* __m128 is ugly to write */
+typedef __m128 v4sf; // vector of 4 float (sse1)
+
+#ifdef USE_SSE2
+# include <emmintrin.h>
+typedef __m128i v4si; // vector of 4 int (sse2)
+#else
+typedef __m64 v2si; // vector of 2 int (mmx)
+#endif
+
+/* declare some SSE constants -- why can't I figure a better way to do that? */
+#define _PS_CONST(Name, Val) \
+ static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
+#define _PI32_CONST(Name, Val) \
+ static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
+#define _PS_CONST_TYPE(Name, Type, Val) \
+ static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
+
+_PS_CONST(1 , 1.0f);
+_PS_CONST(0p5, 0.5f);
+/* the smallest non denormalized float number */
+_PS_CONST_TYPE(min_norm_pos, int, 0x00800000);
+_PS_CONST_TYPE(mant_mask, int, 0x7f800000);
+_PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
+
+_PS_CONST_TYPE(sign_mask, int, 0x80000000);
+_PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
+
+_PI32_CONST(1, 1);
+_PI32_CONST(inv1, ~1);
+_PI32_CONST(2, 2);
+_PI32_CONST(4, 4);
+_PI32_CONST(0x7f, 0x7f);
+
+_PS_CONST(cephes_SQRTHF, 0.707106781186547524);
+_PS_CONST(cephes_log_p0, 7.0376836292E-2);
+_PS_CONST(cephes_log_p1, - 1.1514610310E-1);
+_PS_CONST(cephes_log_p2, 1.1676998740E-1);
+_PS_CONST(cephes_log_p3, - 1.2420140846E-1);
+_PS_CONST(cephes_log_p4, + 1.4249322787E-1);
+_PS_CONST(cephes_log_p5, - 1.6668057665E-1);
+_PS_CONST(cephes_log_p6, + 2.0000714765E-1);
+_PS_CONST(cephes_log_p7, - 2.4999993993E-1);
+_PS_CONST(cephes_log_p8, + 3.3333331174E-1);
+_PS_CONST(cephes_log_q1, -2.12194440e-4);
+_PS_CONST(cephes_log_q2, 0.693359375);
+
+v4sf log_ps(v4sf x);
+v4sf exp_ps(v4sf x);
+v4sf sin_ps(v4sf x);
+v4sf cos_ps(v4sf x);
+void sincos_ps(v4sf x, v4sf *s, v4sf *c);
+
+#if defined (__MINGW32__)
+
+/* the ugly part below: many versions of gcc used to be completely buggy with respect to some intrinsics
+ The movehl_ps is fixed in mingw 3.4.5, but I found out that all the _mm_cmp* intrinsics were completely
+ broken on my mingw gcc 3.4.5 ...
+
+ Note that the bug on _mm_cmp* does occur only at -O0 optimization level
+*/
+
+inline __m128 my_movehl_ps(__m128 a, const __m128 b) {
+ asm (
+ "movhlps %2,%0\n\t"
+ : "=x" (a)
+ : "0" (a), "x"(b)
+ );
+ return a; }
+#warning "redefined _mm_movehl_ps (see gcc bug 21179)"
+#define _mm_movehl_ps my_movehl_ps
+
+inline __m128 my_cmplt_ps(__m128 a, const __m128 b) {
+ asm (
+ "cmpltps %2,%0\n\t"
+ : "=x" (a)
+ : "0" (a), "x"(b)
+ );
+ return a;
+ }
+inline __m128 my_cmpgt_ps(__m128 a, const __m128 b) {
+ asm (
+ "cmpnleps %2,%0\n\t"
+ : "=x" (a)
+ : "0" (a), "x"(b)
+ );
+ return a;
+}
+inline __m128 my_cmpeq_ps(__m128 a, const __m128 b) {
+ asm (
+ "cmpeqps %2,%0\n\t"
+ : "=x" (a)
+ : "0" (a), "x"(b)
+ );
+ return a;
+}
+#warning "redefined _mm_cmpxx_ps functions..."
+#define _mm_cmplt_ps my_cmplt_ps
+#define _mm_cmpgt_ps my_cmpgt_ps
+#define _mm_cmpeq_ps my_cmpeq_ps
+#endif
+
+#ifndef USE_SSE2
+typedef union xmm_mm_union {
+ __m128 xmm;
+ __m64 mm[2];
+} xmm_mm_union;
+
+#define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
+ xmm_mm_union u; u.xmm = xmm_; \
+ mm0_ = u.mm[0]; \
+ mm1_ = u.mm[1]; \
+}
+
+#define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
+ xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
+ }
+
+#endif // USE_SSE2
+
+/* natural logarithm computed for 4 simultaneous float
+ return NaN for x <= 0
+*/
+v4sf log_ps(v4sf x) {
+#ifdef USE_SSE2
+ v4si emm0;
+#else
+ v2si mm0, mm1;
+#endif
+ v4sf one = *(v4sf*)_ps_1;
+
+ v4sf invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
+ v4sf e, mask, tmp, z, y;
+
+ x = _mm_max_ps(x, *(v4sf*)_ps_min_norm_pos); /* cut off denormalized stuff */
+
+#ifndef USE_SSE2
+ /* part 1: x = frexpf(x, &e); */
+ COPY_XMM_TO_MM(x, mm0, mm1);
+ mm0 = _mm_srli_pi32(mm0, 23);
+ mm1 = _mm_srli_pi32(mm1, 23);
+#else
+ emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
+#endif
+ /* keep only the fractional part */
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_mant_mask);
+ x = _mm_or_ps(x, *(v4sf*)_ps_0p5);
+
+#ifndef USE_SSE2
+ /* now e=mm0:mm1 contain the really base-2 exponent */
+ mm0 = _mm_sub_pi32(mm0, *(v2si*)_pi32_0x7f);
+ mm1 = _mm_sub_pi32(mm1, *(v2si*)_pi32_0x7f);
+ e = _mm_cvtpi32x2_ps(mm0, mm1);
+ _mm_empty(); /* bye bye mmx */
+#else
+ emm0 = _mm_sub_epi32(emm0, *(v4si*)_pi32_0x7f);
+ e = _mm_cvtepi32_ps(emm0);
+#endif
+
+ e = _mm_add_ps(e, one);
+
+ /* part2:
+ if( x < SQRTHF ) {
+ e -= 1;
+ x = x + x - 1.0;
+ } else { x = x - 1.0; }
+ */
+
+ mask = _mm_cmplt_ps(x, *(v4sf*)_ps_cephes_SQRTHF);
+ tmp = _mm_and_ps(x, mask);
+ x = _mm_sub_ps(x, one);
+ e = _mm_sub_ps(e, _mm_and_ps(one, mask));
+ x = _mm_add_ps(x, tmp);
+
+
+ z = _mm_mul_ps(x,x);
+
+ y = *(v4sf*)_ps_cephes_log_p0;
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p1);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p2);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p3);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p4);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p5);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p6);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p7);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p8);
+ y = _mm_mul_ps(y, x);
+
+ y = _mm_mul_ps(y, z);
+
+
+ tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q1);
+ y = _mm_add_ps(y, tmp);
+
+
+ tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
+ y = _mm_sub_ps(y, tmp);
+
+ tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q2);
+ x = _mm_add_ps(x, y);
+ x = _mm_add_ps(x, tmp);
+ x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
+ return x;
+}
+
+_PS_CONST(exp_hi, 88.3762626647949f);
+_PS_CONST(exp_lo, -88.3762626647949f);
+
+_PS_CONST(cephes_LOG2EF, 1.44269504088896341);
+_PS_CONST(cephes_exp_C1, 0.693359375);
+_PS_CONST(cephes_exp_C2, -2.12194440e-4);
+
+_PS_CONST(cephes_exp_p0, 1.9875691500E-4);
+_PS_CONST(cephes_exp_p1, 1.3981999507E-3);
+_PS_CONST(cephes_exp_p2, 8.3334519073E-3);
+_PS_CONST(cephes_exp_p3, 4.1665795894E-2);
+_PS_CONST(cephes_exp_p4, 1.6666665459E-1);
+_PS_CONST(cephes_exp_p5, 5.0000001201E-1);
+
+v4sf exp_ps(v4sf x) {
+ v4sf tmp = _mm_setzero_ps(), fx;
+#ifdef USE_SSE2
+ v4si emm0;
+#else
+ v2si mm0, mm1;
+#endif
+ v4sf one = *(v4sf*)_ps_1;
+ v4sf mask, z, y, pow2n;
+
+ x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
+ x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
+
+ /* express exp(x) as exp(g + n*log(2)) */
+ fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
+ fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
+
+ /* how to perform a floorf with SSE: just below */
+#ifndef USE_SSE2
+ /* step 1 : cast to int */
+ tmp = _mm_movehl_ps(tmp, fx);
+ mm0 = _mm_cvttps_pi32(fx);
+ mm1 = _mm_cvttps_pi32(tmp);
+ /* step 2 : cast back to float */
+ tmp = _mm_cvtpi32x2_ps(mm0, mm1);
+#else
+ emm0 = _mm_cvttps_epi32(fx);
+ tmp = _mm_cvtepi32_ps(emm0);
+#endif
+ /* if greater, substract 1 */
+ mask = _mm_cmpgt_ps(tmp, fx);
+ mask = _mm_and_ps(mask, one);
+ fx = _mm_sub_ps(tmp, mask);
+
+ tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
+ z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
+ x = _mm_sub_ps(x, tmp);
+ x = _mm_sub_ps(x, z);
+
+ z = _mm_mul_ps(x,x);
+
+ y = *(v4sf*)_ps_cephes_exp_p0;
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
+ y = _mm_mul_ps(y, x);
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, x);
+ y = _mm_add_ps(y, one);
+
+ /* build 2^n */
+#ifndef USE_SSE2
+ z = _mm_movehl_ps(z, fx);
+ mm0 = _mm_cvttps_pi32(fx);
+ mm1 = _mm_cvttps_pi32(z);
+ mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f);
+ mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f);
+ mm0 = _mm_slli_pi32(mm0, 23);
+ mm1 = _mm_slli_pi32(mm1, 23);
+
+ COPY_MM_TO_XMM(mm0, mm1, pow2n);
+ _mm_empty();
+#else
+ emm0 = _mm_cvttps_epi32(fx);
+ emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
+ emm0 = _mm_slli_epi32(emm0, 23);
+ pow2n = _mm_castsi128_ps(emm0);
+#endif
+ y = _mm_mul_ps(y, pow2n);
+ return y;
+}
+
+_PS_CONST(minus_cephes_DP1, -0.78515625);
+_PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
+_PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
+_PS_CONST(sincof_p0, -1.9515295891E-4);
+_PS_CONST(sincof_p1, 8.3321608736E-3);
+_PS_CONST(sincof_p2, -1.6666654611E-1);
+_PS_CONST(coscof_p0, 2.443315711809948E-005);
+_PS_CONST(coscof_p1, -1.388731625493765E-003);
+_PS_CONST(coscof_p2, 4.166664568298827E-002);
+_PS_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
+
+
+/* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
+ it runs also on old athlons XPs and the pentium III of your grand
+ mother.
+
+ The code is the exact rewriting of the cephes sinf function.
+ Precision is excellent as long as x < 8192 (I did not bother to
+ take into account the special handling they have for greater values
+ -- it does not return garbage for arguments over 8192, though, but
+ the extra precision is missing).
+
+ Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
+ surprising but correct result.
+
+ Performance is also surprisingly good, 1.33 times faster than the
+ macos vsinf SSE2 function, and 1.5 times faster than the
+ __vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
+ too bad for an SSE1 function (with no special tuning) !
+ However the latter libraries probably have a much better handling of NaN,
+ Inf, denormalized and other special arguments..
+
+ On my core 1 duo, the execution of this function takes approximately 95 cycles.
+
+ From what I have observed on the experiments with Intel AMath lib, switching to an
+ SSE2 version would improve the perf by only 10%.
+
+ Since it is based on SSE intrinsics, it has to be compiled at -O2 to
+ deliver full speed.
+*/
+v4sf sin_ps(v4sf x) { // any x
+ v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
+
+#ifdef USE_SSE2
+ v4si emm0, emm2;
+#else
+ v2si mm0, mm1, mm2, mm3;
+#endif
+ v4sf swap_sign_bit, poly_mask, z, tmp, y2;
+
+ sign_bit = x;
+ /* take the absolute value */
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
+ /* extract the sign bit (upper one) */
+ sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
+
+ /* scale by 4/Pi */
+ y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
+
+ //printf("plop:"); print4(y);
+#ifdef USE_SSE2
+ /* store the integer part of y in mm0 */
+ emm2 = _mm_cvttps_epi32(y);
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
+ y = _mm_cvtepi32_ps(emm2);
+ /* get the swap sign flag */
+ emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
+ emm0 = _mm_slli_epi32(emm0, 29);
+ /* get the polynom selection mask
+ there is one polynom for 0 <= x <= Pi/4
+ and another one for Pi/4<x<=Pi/2
+
+ Both branches will be computed.
+ */
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+ swap_sign_bit = _mm_castsi128_ps(emm0);
+ poly_mask = _mm_castsi128_ps(emm2);
+ sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
+#else
+ /* store the integer part of y in mm0:mm1 */
+ xmm2 = _mm_movehl_ps(xmm2, y);
+ mm2 = _mm_cvttps_pi32(y);
+ mm3 = _mm_cvttps_pi32(xmm2);
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
+ mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
+ y = _mm_cvtpi32x2_ps(mm2, mm3);
+ /* get the swap sign flag */
+ mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
+ mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
+ mm0 = _mm_slli_pi32(mm0, 29);
+ mm1 = _mm_slli_pi32(mm1, 29);
+ /* get the polynom selection mask */
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
+ mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
+ mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
+
+ COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit);
+ COPY_MM_TO_XMM(mm2, mm3, poly_mask);
+ sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
+ _mm_empty(); /* good-bye mmx */
+#endif
+
+ /* The magic pass: "Extended precision modular arithmetic"
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
+ xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
+ xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
+ xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
+ xmm1 = _mm_mul_ps(y, xmm1);
+ xmm2 = _mm_mul_ps(y, xmm2);
+ xmm3 = _mm_mul_ps(y, xmm3);
+ x = _mm_add_ps(x, xmm1);
+ x = _mm_add_ps(x, xmm2);
+ x = _mm_add_ps(x, xmm3);
+
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
+ y = *(v4sf*)_ps_coscof_p0;
+ z = _mm_mul_ps(x,x);
+
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
+ y = _mm_mul_ps(y, z);
+ y = _mm_mul_ps(y, z);
+ tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
+ y = _mm_sub_ps(y, tmp);
+ y = _mm_add_ps(y, *(v4sf*)_ps_1);
+
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
+
+ y2 = *(v4sf*)_ps_sincof_p0;
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_mul_ps(y2, x);
+ y2 = _mm_add_ps(y2, x);
+
+ /* select the correct result from the two polynoms */
+ xmm3 = poly_mask;
+ y2 = _mm_and_ps(xmm3, y2); //, xmm3);
+ y = _mm_andnot_ps(xmm3, y);
+ y = _mm_add_ps(y,y2);
+ /* update the sign */
+ y = _mm_xor_ps(y, sign_bit);
+
+ return y;
+}
+
+/* almost the same as sin_ps */
+v4sf cos_ps(v4sf x) { // any x
+ v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
+#ifdef USE_SSE2
+ v4si emm0, emm2;
+#else
+ v2si mm0, mm1, mm2, mm3;
+#endif
+ v4sf sign_bit, poly_mask, z, tmp, y2;
+
+ /* take the absolute value */
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
+
+ /* scale by 4/Pi */
+ y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
+
+#ifdef USE_SSE2
+ /* store the integer part of y in mm0 */
+ emm2 = _mm_cvttps_epi32(y);
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
+ y = _mm_cvtepi32_ps(emm2);
+
+ emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
+
+ /* get the swap sign flag */
+ emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
+ emm0 = _mm_slli_epi32(emm0, 29);
+ /* get the polynom selection mask */
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+ sign_bit = _mm_castsi128_ps(emm0);
+ poly_mask = _mm_castsi128_ps(emm2);
+#else
+ /* store the integer part of y in mm0:mm1 */
+ xmm2 = _mm_movehl_ps(xmm2, y);
+ mm2 = _mm_cvttps_pi32(y);
+ mm3 = _mm_cvttps_pi32(xmm2);
+
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
+ mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
+
+ y = _mm_cvtpi32x2_ps(mm2, mm3);
+
+
+ mm2 = _mm_sub_pi32(mm2, *(v2si*)_pi32_2);
+ mm3 = _mm_sub_pi32(mm3, *(v2si*)_pi32_2);
+
+ /* get the swap sign flag in mm0:mm1 and the
+ polynom selection mask in mm2:mm3 */
+
+ mm0 = _mm_andnot_si64(mm2, *(v2si*)_pi32_4);
+ mm1 = _mm_andnot_si64(mm3, *(v2si*)_pi32_4);
+ mm0 = _mm_slli_pi32(mm0, 29);
+ mm1 = _mm_slli_pi32(mm1, 29);
+
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
+
+ mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
+ mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
+
+ COPY_MM_TO_XMM(mm0, mm1, sign_bit);
+ COPY_MM_TO_XMM(mm2, mm3, poly_mask);
+ _mm_empty(); /* good-bye mmx */
+#endif
+ /* The magic pass: "Extended precision modular arithmetic"
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
+ xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
+ xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
+ xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
+ xmm1 = _mm_mul_ps(y, xmm1);
+ xmm2 = _mm_mul_ps(y, xmm2);
+ xmm3 = _mm_mul_ps(y, xmm3);
+ x = _mm_add_ps(x, xmm1);
+ x = _mm_add_ps(x, xmm2);
+ x = _mm_add_ps(x, xmm3);
+
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
+ y = *(v4sf*)_ps_coscof_p0;
+ z = _mm_mul_ps(x,x);
+
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
+ y = _mm_mul_ps(y, z);
+ y = _mm_mul_ps(y, z);
+ tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
+ y = _mm_sub_ps(y, tmp);
+ y = _mm_add_ps(y, *(v4sf*)_ps_1);
+
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
+
+ y2 = *(v4sf*)_ps_sincof_p0;
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_mul_ps(y2, x);
+ y2 = _mm_add_ps(y2, x);
+
+ /* select the correct result from the two polynoms */
+ xmm3 = poly_mask;
+ y2 = _mm_and_ps(xmm3, y2); //, xmm3);
+ y = _mm_andnot_ps(xmm3, y);
+ y = _mm_add_ps(y,y2);
+ /* update the sign */
+ y = _mm_xor_ps(y, sign_bit);
+
+ return y;
+}
+
+/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
+ it is almost as fast, and gives you a free cosine with your sine */
+void sincos_ps(v4sf x, v4sf *s, v4sf *c) {
+ v4sf xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
+#ifdef USE_SSE2
+ v4si emm0, emm2, emm4;
+#else
+ v2si mm0, mm1, mm2, mm3, mm4, mm5;
+#endif
+ v4sf swap_sign_bit_sin, poly_mask, z, tmp, y2, ysin1, ysin2;
+ v4sf sign_bit_cos;
+
+ sign_bit_sin = x;
+ /* take the absolute value */
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
+ /* extract the sign bit (upper one) */
+ sign_bit_sin = _mm_and_ps(sign_bit_sin, *(v4sf*)_ps_sign_mask);
+
+ /* scale by 4/Pi */
+ y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
+
+#ifdef USE_SSE2
+ /* store the integer part of y in emm2 */
+ emm2 = _mm_cvttps_epi32(y);
+
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
+ y = _mm_cvtepi32_ps(emm2);
+
+ emm4 = emm2;
+
+ /* get the swap sign flag for the sine */
+ emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
+ emm0 = _mm_slli_epi32(emm0, 29);
+ swap_sign_bit_sin = _mm_castsi128_ps(emm0);
+
+ /* get the polynom selection mask for the sine*/
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+ poly_mask = _mm_castsi128_ps(emm2);
+#else
+ /* store the integer part of y in mm2:mm3 */
+ xmm3 = _mm_movehl_ps(xmm3, y);
+ mm2 = _mm_cvttps_pi32(y);
+ mm3 = _mm_cvttps_pi32(xmm3);
+
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
+ mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
+
+ y = _mm_cvtpi32x2_ps(mm2, mm3);
+
+ mm4 = mm2;
+ mm5 = mm3;
+
+ /* get the swap sign flag for the sine */
+ mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
+ mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
+ mm0 = _mm_slli_pi32(mm0, 29);
+ mm1 = _mm_slli_pi32(mm1, 29);
+
+ COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit_sin);
+
+ /* get the polynom selection mask for the sine */
+
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
+ mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
+ mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
+
+ COPY_MM_TO_XMM(mm2, mm3, poly_mask);
+#endif
+
+ /* The magic pass: "Extended precision modular arithmetic"
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
+ xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
+ xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
+ xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
+ xmm1 = _mm_mul_ps(y, xmm1);
+ xmm2 = _mm_mul_ps(y, xmm2);
+ xmm3 = _mm_mul_ps(y, xmm3);
+ x = _mm_add_ps(x, xmm1);
+ x = _mm_add_ps(x, xmm2);
+ x = _mm_add_ps(x, xmm3);
+
+#ifdef USE_SSE2
+ emm4 = _mm_sub_epi32(emm4, *(v4si*)_pi32_2);
+ emm4 = _mm_andnot_si128(emm4, *(v4si*)_pi32_4);
+ emm4 = _mm_slli_epi32(emm4, 29);
+ sign_bit_cos = _mm_castsi128_ps(emm4);
+#else
+ /* get the sign flag for the cosine */
+ mm4 = _mm_sub_pi32(mm4, *(v2si*)_pi32_2);
+ mm5 = _mm_sub_pi32(mm5, *(v2si*)_pi32_2);
+ mm4 = _mm_andnot_si64(mm4, *(v2si*)_pi32_4);
+ mm5 = _mm_andnot_si64(mm5, *(v2si*)_pi32_4);
+ mm4 = _mm_slli_pi32(mm4, 29);
+ mm5 = _mm_slli_pi32(mm5, 29);
+ COPY_MM_TO_XMM(mm4, mm5, sign_bit_cos);
+ _mm_empty(); /* good-bye mmx */
+#endif
+
+ sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
+
+
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
+ z = _mm_mul_ps(x,x);
+ y = *(v4sf*)_ps_coscof_p0;
+
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
+ y = _mm_mul_ps(y, z);
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
+ y = _mm_mul_ps(y, z);
+ y = _mm_mul_ps(y, z);
+ tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
+ y = _mm_sub_ps(y, tmp);
+ y = _mm_add_ps(y, *(v4sf*)_ps_1);
+
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
+
+ y2 = *(v4sf*)_ps_sincof_p0;
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
+ y2 = _mm_mul_ps(y2, z);
+ y2 = _mm_mul_ps(y2, x);
+ y2 = _mm_add_ps(y2, x);
+
+ /* select the correct result from the two polynoms */
+ xmm3 = poly_mask;
+ ysin2 = _mm_and_ps(xmm3, y2);
+ ysin1 = _mm_andnot_ps(xmm3, y);
+ y2 = _mm_sub_ps(y2,ysin2);
+ y = _mm_sub_ps(y, ysin1);
+
+ xmm1 = _mm_add_ps(ysin1,ysin2);
+ xmm2 = _mm_add_ps(y,y2);
+
+ /* update the sign */
+ *s = _mm_xor_ps(xmm1, sign_bit_sin);
+ *c = _mm_xor_ps(xmm2, sign_bit_cos);
+}
+