# HG changeset patch # User Peter Meerwald # Date 1316170388 -7200 # Node ID 723f588b82ac7d9f244b16fa39a6cd44dfcc4478 import diff -r 000000000000 -r 723f588b82ac .hgignore --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.hgignore Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,4 @@ +syntax: glob +*.o +peck_test_x86 +peck_test_arm diff -r 000000000000 -r 723f588b82ac _peck_fft_guts.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/_peck_fft_guts.h Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,164 @@ +/* +Copyright (c) 2003-2010, Mark Borgerding + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* peck_fft.h + defines peck_fft_scalar as either short or a float type + and defines + typedef struct { peck_fft_scalar r; peck_fft_scalar i; }peck_fft_cpx; */ +#include "peck_fft.h" +#include + +#define MAXFACTORS 32 +/* e.g. an fft of length 128 has 4 factors + as far as kissfft is concerned + 4*4*4*2 + */ + +struct peck_fft_state{ + int nfft; + int inverse; + int factors[2*MAXFACTORS]; + peck_fft_cpx twiddles[1]; +}; + +/* + Explanation of macros dealing with complex math: + + C_MUL(m,a,b) : m = a*b + C_FIXDIV( c , div ) : if a fixed point impl., c /= div. noop otherwise + C_SUB( res, a,b) : res = a - b + C_SUBFROM( res , a) : res -= a + C_ADDTO( res , a) : res += a + * */ +#ifdef FIXED_POINT +#if (FIXED_POINT==32) +# define FRACBITS 31 +# define SAMPPROD int64_t +#define SAMP_MAX 2147483647 +#else +# define FRACBITS 15 +# define SAMPPROD int32_t +#define SAMP_MAX 32767 +#endif + +#define SAMP_MIN -SAMP_MAX + +#if defined(CHECK_OVERFLOW) +# define CHECK_OVERFLOW_OP(a,op,b) \ + if ( (SAMPPROD)(a) op (SAMPPROD)(b) > SAMP_MAX || (SAMPPROD)(a) op (SAMPPROD)(b) < SAMP_MIN ) { \ + fprintf(stderr,"WARNING:overflow @ " __FILE__ "(%d): (%d " #op" %d) = %ld\n",__LINE__,(a),(b),(SAMPPROD)(a) op (SAMPPROD)(b) ); } +#endif + + +# define smul(a,b) ( (SAMPPROD)(a)*(b) ) +# define sround( x ) (peck_fft_scalar)( ( (x) + (1<<(FRACBITS-1)) ) >> FRACBITS ) + +# define S_MUL(a,b) sround( smul(a,b) ) + +# define C_MUL(m,a,b) \ + do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \ + (m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0) + +# define DIVSCALAR(x,k) \ + (x) = sround( smul( x, SAMP_MAX/k ) ) + +# define C_FIXDIV(c,div) \ + do { DIVSCALAR( (c).r , div); \ + DIVSCALAR( (c).i , div); }while (0) + +# define C_MULBYSCALAR( c, s ) \ + do{ (c).r = sround( smul( (c).r , s ) ) ;\ + (c).i = sround( smul( (c).i , s ) ) ; }while(0) + +#else /* not FIXED_POINT*/ + +# define S_MUL(a,b) ( (a)*(b) ) +#define C_MUL(m,a,b) \ + do{ (m).r = (a).r*(b).r - (a).i*(b).i;\ + (m).i = (a).r*(b).i + (a).i*(b).r; }while(0) +# define C_FIXDIV(c,div) /* NOOP */ +# define C_MULBYSCALAR( c, s ) \ + do{ (c).r *= (s);\ + (c).i *= (s); }while(0) +#endif + +#ifndef CHECK_OVERFLOW_OP +# define CHECK_OVERFLOW_OP(a,op,b) /* noop */ +#endif + +#define C_ADD( res, a,b)\ + do { \ + CHECK_OVERFLOW_OP((a).r,+,(b).r)\ + CHECK_OVERFLOW_OP((a).i,+,(b).i)\ + (res).r=(a).r+(b).r; (res).i=(a).i+(b).i; \ + }while(0) +#define C_SUB( res, a,b)\ + do { \ + CHECK_OVERFLOW_OP((a).r,-,(b).r)\ + CHECK_OVERFLOW_OP((a).i,-,(b).i)\ + (res).r=(a).r-(b).r; (res).i=(a).i-(b).i; \ + }while(0) +#define C_ADDTO( res , a)\ + do { \ + CHECK_OVERFLOW_OP((res).r,+,(a).r)\ + CHECK_OVERFLOW_OP((res).i,+,(a).i)\ + (res).r += (a).r; (res).i += (a).i;\ + }while(0) + +#define C_SUBFROM( res , a)\ + do {\ + CHECK_OVERFLOW_OP((res).r,-,(a).r)\ + CHECK_OVERFLOW_OP((res).i,-,(a).i)\ + (res).r -= (a).r; (res).i -= (a).i; \ + }while(0) + + +#ifdef FIXED_POINT +# define PECK_FFT_COS(phase) floor(.5+SAMP_MAX * cos (phase)) +# define PECK_FFT_SIN(phase) floor(.5+SAMP_MAX * sin (phase)) +# define HALF_OF(x) ((x)>>1) +#elif defined(USE_SIMD) +# define PECK_FFT_COS(phase) _mm_set1_ps( cos(phase) ) +# define PECK_FFT_SIN(phase) _mm_set1_ps( sin(phase) ) +# define HALF_OF(x) ((x)*_mm_set1_ps(.5)) +#else +# define PECK_FFT_COS(phase) (peck_fft_scalar) cos(phase) +# define PECK_FFT_SIN(phase) (peck_fft_scalar) sin(phase) +# define HALF_OF(x) ((x)*.5) +#endif + +#define kf_cexp(x,phase) \ + do{ \ + (x)->r = PECK_FFT_COS(phase);\ + (x)->i = PECK_FFT_SIN(phase);\ + }while(0) + + +/* a debugging function */ +#define pcpx(c)\ + fprintf(stderr,"%g + %gi\n",(double)((c)->r),(double)((c)->i) ) + + +#ifdef PECK_FFT_USE_ALLOCA +// define this to allow use of alloca instead of malloc for temporary buffers +// Temporary buffers are used in two case: +// 1. FFT sizes that have "bad" factors. i.e. not 2,3 and 5 +// 2. "in-place" FFTs. Notice the quotes, since kissfft does not really do an in-place transform. +#include +#define PECK_FFT_TMP_ALLOC(nbytes) alloca(nbytes) +#define PECK_FFT_TMP_FREE(ptr) +#else +#define PECK_FFT_TMP_ALLOC(nbytes) PECK_FFT_MALLOC(nbytes) +#define PECK_FFT_TMP_FREE(ptr) PECK_FFT_FREE(ptr) +#endif diff -r 000000000000 -r 723f588b82ac compile.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/compile.sh Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,21 @@ + +gcc \ + -I . \ + -o peck_test_x86 \ + peck_fftr.c peck_fft.c \ + peck_test.c \ + -lm + +exit + +/opt/arm-2011.03/bin/arm-none-linux-gnueabi-gcc \ + -I . \ + -O3 -mcpu=cortex-a8 -mfpu=neon -mfloat-abi=softfp -ffast-math -fomit-frame-pointer \ + -o peck_test_arm \ + peck_fftr.c peck_fft.c \ + peck_test.c \ + -lm + +time ./peck_test_x86 + +scp peck_test_arm root@192.168.233.114:. diff -r 000000000000 -r 723f588b82ac peck_fft.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/peck_fft.c Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,375 @@ +/* +Copyright (c) 2003-2010, Mark Borgerding + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + + +#include "_peck_fft_guts.h" +/* The guts header contains all the multiplication and addition macros that are defined for + fixed or floating point complex numbers. It also delares the kf_ internal functions. + */ + +static void kf_bfly2( + peck_fft_cpx * Fout, + const size_t fstride, + const peck_fft_cfg st, + int m + ) +{ + +//printf("kf_bfly2\n"); + + peck_fft_cpx * Fout2; + peck_fft_cpx * tw1 = st->twiddles; + peck_fft_cpx t; + Fout2 = Fout + m; + do{ + C_FIXDIV(*Fout,2); C_FIXDIV(*Fout2,2); + + C_MUL (t, *Fout2 , *tw1); + tw1 += fstride; + C_SUB( *Fout2 , *Fout , t ); + C_ADDTO( *Fout , t ); + ++Fout2; + ++Fout; + }while (--m); +} + +static void kf_bfly4( + peck_fft_cpx * Fout, + const size_t fstride, + const peck_fft_cfg st, + const size_t m + ) +{ + peck_fft_cpx *tw1,*tw2,*tw3; + peck_fft_cpx scratch[6]; + size_t k=m; + const size_t m2=2*m; + const size_t m3=3*m; + +//printf("kf_bfly4\n"); + + + tw3 = tw2 = tw1 = st->twiddles; + + do { + C_FIXDIV(*Fout,4); C_FIXDIV(Fout[m],4); C_FIXDIV(Fout[m2],4); C_FIXDIV(Fout[m3],4); + + C_MUL(scratch[0],Fout[m] , *tw1 ); + C_MUL(scratch[1],Fout[m2] , *tw2 ); + C_MUL(scratch[2],Fout[m3] , *tw3 ); + + C_SUB( scratch[5] , *Fout, scratch[1] ); + C_ADDTO(*Fout, scratch[1]); + C_ADD( scratch[3] , scratch[0] , scratch[2] ); + C_SUB( scratch[4] , scratch[0] , scratch[2] ); + C_SUB( Fout[m2], *Fout, scratch[3] ); + tw1 += fstride; + tw2 += fstride*2; + tw3 += fstride*3; + C_ADDTO( *Fout , scratch[3] ); + + if(st->inverse) { + Fout[m].r = scratch[5].r - scratch[4].i; + Fout[m].i = scratch[5].i + scratch[4].r; + Fout[m3].r = scratch[5].r + scratch[4].i; + Fout[m3].i = scratch[5].i - scratch[4].r; + }else{ + Fout[m].r = scratch[5].r + scratch[4].i; + Fout[m].i = scratch[5].i - scratch[4].r; + Fout[m3].r = scratch[5].r - scratch[4].i; + Fout[m3].i = scratch[5].i + scratch[4].r; + } + ++Fout; + }while(--k); +} + +static void kf_bfly3( + peck_fft_cpx * Fout, + const size_t fstride, + const peck_fft_cfg st, + size_t m + ) +{ + size_t k=m; + const size_t m2 = 2*m; + peck_fft_cpx *tw1,*tw2; + peck_fft_cpx scratch[5]; + peck_fft_cpx epi3; + epi3 = st->twiddles[fstride*m]; + +printf("kf_bfly3\n"); + + + tw1=tw2=st->twiddles; + + do{ + C_FIXDIV(*Fout,3); C_FIXDIV(Fout[m],3); C_FIXDIV(Fout[m2],3); + + C_MUL(scratch[1],Fout[m] , *tw1); + C_MUL(scratch[2],Fout[m2] , *tw2); + + C_ADD(scratch[3],scratch[1],scratch[2]); + C_SUB(scratch[0],scratch[1],scratch[2]); + tw1 += fstride; + tw2 += fstride*2; + + Fout[m].r = Fout->r - HALF_OF(scratch[3].r); + Fout[m].i = Fout->i - HALF_OF(scratch[3].i); + + C_MULBYSCALAR( scratch[0] , epi3.i ); + + C_ADDTO(*Fout,scratch[3]); + + Fout[m2].r = Fout[m].r + scratch[0].i; + Fout[m2].i = Fout[m].i - scratch[0].r; + + Fout[m].r -= scratch[0].i; + Fout[m].i += scratch[0].r; + + ++Fout; + }while(--k); +} + +static void kf_bfly5( + peck_fft_cpx * Fout, + const size_t fstride, + const peck_fft_cfg st, + int m + ) +{ + peck_fft_cpx *Fout0,*Fout1,*Fout2,*Fout3,*Fout4; + int u; + peck_fft_cpx scratch[13]; + peck_fft_cpx * twiddles = st->twiddles; + peck_fft_cpx *tw; + peck_fft_cpx ya,yb; + ya = twiddles[fstride*m]; + yb = twiddles[fstride*2*m]; + +printf("kf_bfly5\n"); + + + Fout0=Fout; + Fout1=Fout0+m; + Fout2=Fout0+2*m; + Fout3=Fout0+3*m; + Fout4=Fout0+4*m; + + tw=st->twiddles; + for ( u=0; ur += scratch[7].r + scratch[8].r; + Fout0->i += scratch[7].i + scratch[8].i; + + scratch[5].r = scratch[0].r + S_MUL(scratch[7].r,ya.r) + S_MUL(scratch[8].r,yb.r); + scratch[5].i = scratch[0].i + S_MUL(scratch[7].i,ya.r) + S_MUL(scratch[8].i,yb.r); + + scratch[6].r = S_MUL(scratch[10].i,ya.i) + S_MUL(scratch[9].i,yb.i); + scratch[6].i = -S_MUL(scratch[10].r,ya.i) - S_MUL(scratch[9].r,yb.i); + + C_SUB(*Fout1,scratch[5],scratch[6]); + C_ADD(*Fout4,scratch[5],scratch[6]); + + scratch[11].r = scratch[0].r + S_MUL(scratch[7].r,yb.r) + S_MUL(scratch[8].r,ya.r); + scratch[11].i = scratch[0].i + S_MUL(scratch[7].i,yb.r) + S_MUL(scratch[8].i,ya.r); + scratch[12].r = - S_MUL(scratch[10].i,yb.i) + S_MUL(scratch[9].i,ya.i); + scratch[12].i = S_MUL(scratch[10].r,yb.i) - S_MUL(scratch[9].r,ya.i); + + C_ADD(*Fout2,scratch[11],scratch[12]); + C_SUB(*Fout3,scratch[11],scratch[12]); + + ++Fout0;++Fout1;++Fout2;++Fout3;++Fout4; + } +} + +/* perform the butterfly for one stage of a mixed radix FFT */ +static void kf_bfly_generic( + peck_fft_cpx * Fout, + const size_t fstride, + const peck_fft_cfg st, + int m, + int p + ) +{ + int u,k,q1,q; + peck_fft_cpx * twiddles = st->twiddles; + peck_fft_cpx t; + int Norig = st->nfft; + +printf("kf_bfly_generic\n"); + + + peck_fft_cpx * scratch = (peck_fft_cpx*)PECK_FFT_TMP_ALLOC(sizeof(peck_fft_cpx)*p); + + for ( u=0; u=Norig) twidx-=Norig; + C_MUL(t,scratch[q] , twiddles[twidx] ); + C_ADDTO( Fout[ k ] ,t); + } + k += m; + } + } + PECK_FFT_TMP_FREE(scratch); +} + +static void kf_work( + peck_fft_cpx * Fout, + const peck_fft_cpx * f, + const size_t fstride, + int *factors, + const peck_fft_cfg st) { + peck_fft_cpx *Fout_beg = Fout; + const int p = *factors++; /* the radix */ + const int m = *factors++; /* stage's FFT length / p */ + const peck_fft_cpx *Fout_end = Fout + p*m; + +// printf("kf_work\n"); + + if (m == 1) { + do { + *Fout = *f; + f += fstride; + } while (++Fout != Fout_end); + } else { + do { + // recursive call: + // DFT of size m*p performed by doing + // p instances of smaller DFTs of size m, + // each one takes a decimated version of the input + kf_work(Fout, f, fstride*p, factors, st); + f += fstride; + } while ((Fout += m) != Fout_end); + } + + Fout=Fout_beg; + + // recombine the p smaller DFTs + switch (p) { + case 2: kf_bfly2(Fout, fstride, st, m); break; + case 3: kf_bfly3(Fout, fstride, st, m); break; + case 4: kf_bfly4(Fout, fstride, st, m); break; + case 5: kf_bfly5(Fout, fstride, st, m); break; + default: kf_bfly_generic(Fout, fstride, st, m, p); break; + } +} + +/* + * facbuf is populated by p1, m1, p2, m2, ... + * where + * p[i] * m[i] = m[i-1] + * m0 = n + */ +static void kf_factor(int n, int * facbuf) { + int p = 4; + float floor_sqrt; + floor_sqrt = floorf(sqrtf(n)); + + /* factor out powers of 4, powers of 2, then any remaining primes */ + do { + while (n % p) { + switch (p) { + case 4: p = 2; break; + case 2: p = 3; break; + default: p += 2; break; + } + if (p > floor_sqrt) + p = n; /* no more factors, skip to end */ + } + n /= p; + *facbuf++ = p; + *facbuf++ = n; + } while (n > 1); +} + +/* + * User-callable function to allocate all necessary storage space for the fft. + * The return value is a contiguous block of memory, allocated with malloc. As such, + * it can be freed with free(), rather than a peck_fft-specific function. + */ +peck_fft_cfg peck_fft_alloc(int nfft, int inverse_fft, void * mem, size_t * lenmem) { + peck_fft_cfg st = NULL; + size_t memneeded = sizeof(struct peck_fft_state) + + sizeof(peck_fft_cpx)*(nfft-1); /* twiddle factors */ + + if (lenmem == NULL) { + st = ( peck_fft_cfg)PECK_FFT_MALLOC(memneeded); + } else { + if (mem != NULL && *lenmem >= memneeded) + st = (peck_fft_cfg)mem; + *lenmem = memneeded; + } + + if (st) { + int i; + st->nfft=nfft; + st->inverse = inverse_fft; + + for (i = 0; i < nfft; ++i) { + const float pi = 3.14159265359f; + float phase = -2*pi*i / nfft; + if (st->inverse) + phase *= -1; + kf_cexp(st->twiddles+i, phase); + } + + kf_factor(nfft, st->factors); + } + return st; +} + +void peck_fft(peck_fft_cfg cfg, const peck_fft_cpx *fin, peck_fft_cpx *fout) { + kf_work(fout, fin, 1, cfg->factors, cfg); +} + +void peck_fft_cleanup(void) { + /* nothing needed any more */ +} + +int peck_fft_next_fast_size(int n) { + while (1) { + int m = n; + while ((m % 2) == 0) m /= 2; + while ((m % 3) == 0) m /= 3; + while ((m % 5) == 0) m /= 5; + if (m <= 1) + break; /* n is completely factorable by twos, threes, and fives */ + n++; + } + return n; +} diff -r 000000000000 -r 723f588b82ac peck_fft.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/peck_fft.h Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,111 @@ +#ifndef PECK_FFT_H +#define PECK_FFT_H + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef USE_SIMD +# include +# define peck_fft_scalar __m128 +#define PECK_FFT_MALLOC(nbytes) _mm_malloc(nbytes,16) +#define PECK_FFT_FREE _mm_free +#else +#define PECK_FFT_MALLOC malloc +#define PECK_FFT_FREE free +#endif + + +#ifdef FIXED_POINT +#include +# if (FIXED_POINT == 32) +# define peck_fft_scalar int32_t +# else +# define peck_fft_scalar int16_t +# endif +#else +# ifndef peck_fft_scalar +/* default is float */ +# define peck_fft_scalar float +# endif +#endif + +typedef struct { + peck_fft_scalar r; + peck_fft_scalar i; +}peck_fft_cpx; + +typedef struct peck_fft_state* peck_fft_cfg; + +/* + * peck_fft_alloc + * + * Initialize a FFT (or IFFT) algorithm's cfg/state buffer. + * + * typical usage: peck_fft_cfg mycfg=peck_fft_alloc(1024,0,NULL,NULL); + * + * The return value from fft_alloc is a cfg buffer used internally + * by the fft routine or NULL. + * + * If lenmem is NULL, then peck_fft_alloc will allocate a cfg buffer using malloc. + * The returned value should be free()d when done to avoid memory leaks. + * + * The state can be placed in a user supplied buffer 'mem': + * If lenmem is not NULL and mem is not NULL and *lenmem is large enough, + * then the function places the cfg in mem and the size used in *lenmem + * and returns mem. + * + * If lenmem is not NULL and ( mem is NULL or *lenmem is not large enough), + * then the function returns NULL and places the minimum cfg + * buffer size in *lenmem. + * */ + +peck_fft_cfg peck_fft_alloc(int nfft,int inverse_fft,void * mem,size_t * lenmem); + +/* + * peck_fft(cfg,in_out_buf) + * + * Perform an FFT on a complex input buffer. + * for a forward FFT, + * fin should be f[0] , f[1] , ... ,f[nfft-1] + * fout will be F[0] , F[1] , ... ,F[nfft-1] + * Note that each element is complex and can be accessed like + f[k].r and f[k].i + * */ +void peck_fft(peck_fft_cfg cfg,const peck_fft_cpx *fin,peck_fft_cpx *fout); + +/* + A more generic version of the above function. It reads its input from every Nth sample. + * */ +void peck_fft_stride(peck_fft_cfg cfg,const peck_fft_cpx *fin,peck_fft_cpx *fout,int fin_stride); + +/* If peck_fft_alloc allocated a buffer, it is one contiguous + buffer and can be simply free()d when no longer needed*/ +#define peck_fft_free free + +/* + Cleans up some memory that gets managed internally. Not necessary to call, but it might clean up + your compiler output to call this before you exit. +*/ +void peck_fft_cleanup(void); + + +/* + * Returns the smallest integer k, such that k>=n and k has only "fast" factors (2,3,5) + */ +int peck_fft_next_fast_size(int n); + +/* for real ffts, we need an even size */ +#define peck_fftr_next_fast_size_real(n) \ + (peck_fft_next_fast_size( ((n)+1)>>1)<<1) + +#ifdef __cplusplus +} +#endif + +#endif diff -r 000000000000 -r 723f588b82ac peck_fftr.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/peck_fftr.c Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,156 @@ +/* +Copyright (c) 2003-2004, Mark Borgerding + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "peck_fftr.h" +#include "_peck_fft_guts.h" + +struct peck_fftr_state{ + peck_fft_cfg substate; + peck_fft_cpx *tmpbuf; + peck_fft_cpx *super_twiddles; +#ifdef USE_SIMD + void * pad; +#endif +}; + +peck_fftr_cfg peck_fftr_alloc(int nfft, int inverse_fft, void *mem, size_t *lenmem) { + int i; + peck_fftr_cfg st = NULL; + size_t subsize, memneeded; + + if (nfft & 1) { + fprintf(stderr, "Real FFT must be even.\n"); + return NULL; + } + nfft >>= 1; + + peck_fft_alloc(nfft, inverse_fft, NULL, &subsize); + memneeded = sizeof(struct peck_fftr_state) + subsize + sizeof(peck_fft_cpx) * (nfft * 3 / 2); + + if (lenmem == NULL) { + st = (peck_fftr_cfg) PECK_FFT_MALLOC(memneeded); + } else { + if (*lenmem >= memneeded) + st = (peck_fftr_cfg) mem; + *lenmem = memneeded; + } + if (!st) + return NULL; + + st->substate = (peck_fft_cfg) (st + 1); /* just beyond peck_fftr_state struct */ + st->tmpbuf = (peck_fft_cpx *) (((char *) st->substate) + subsize); + st->super_twiddles = st->tmpbuf + nfft; + peck_fft_alloc(nfft, inverse_fft, st->substate, &subsize); + + for (i = 0; i < nfft/2; ++i) { + float phase = + -3.14159265359f * ((float) (i+1) / nfft + 0.5f); + if (inverse_fft) + phase *= -1; + kf_cexp(st->super_twiddles+i,phase); + } + return st; +} + +void peck_fftr(peck_fftr_cfg st, const peck_fft_scalar *timedata, peck_fft_cpx *freqdata) { + /* Input buffer timedata is stored row-wise */ + int k, ncfft; + peck_fft_cpx fpnk, fpk, f1k, f2k, tw, tdc; + + if (st->substate->inverse) { + fprintf(stderr, "peck_fft usage error: improper alloc\n"); + exit(EXIT_FAILURE); + } + + ncfft = st->substate->nfft; + + /* Perform the parallel FFT of two real signals packed in real,imag */ + peck_fft(st->substate, (const peck_fft_cpx*)timedata, st->tmpbuf); + /* The real part of the DC element of the frequency spectrum in st->tmpbuf + * contains the sum of the even-numbered elements of the input time sequence. + * The imag part is the sum of the odd-numbered elements. + * + * The sum of tdc.r and tdc.i is the sum of the input time sequence, + * yielding DC of the input time sequence. + * The difference of tdc.r - tdc.i is the sum of the input (dot product) [1,-1,1,-1,... + * yielding the Nyquist bin of input time sequence. + */ + + tdc.r = st->tmpbuf[0].r; + tdc.i = st->tmpbuf[0].i; + C_FIXDIV(tdc,2); + CHECK_OVERFLOW_OP(tdc.r ,+, tdc.i); + CHECK_OVERFLOW_OP(tdc.r ,-, tdc.i); + freqdata[0].r = tdc.r + tdc.i; + freqdata[ncfft].r = tdc.r - tdc.i; +#ifdef USE_SIMD + freqdata[ncfft].i = freqdata[0].i = _mm_set1_ps(0); +#else + freqdata[ncfft].i = freqdata[0].i = 0; +#endif + + for (k = 1; k <= ncfft/2; ++k) { + fpk = st->tmpbuf[k]; + fpnk.r = st->tmpbuf[ncfft-k].r; + fpnk.i = - st->tmpbuf[ncfft-k].i; + C_FIXDIV(fpk, 2); + C_FIXDIV(fpnk, 2); + + C_ADD(f1k, fpk, fpnk); + C_SUB(f2k, fpk, fpnk); + C_MUL(tw, f2k, st->super_twiddles[k-1]); + + freqdata[k].r = HALF_OF(f1k.r + tw.r); + freqdata[k].i = HALF_OF(f1k.i + tw.i); + freqdata[ncfft-k].r = HALF_OF(f1k.r - tw.r); + freqdata[ncfft-k].i = HALF_OF(tw.i - f1k.i); + } +} + +void peck_fftri(peck_fftr_cfg st,const peck_fft_cpx *freqdata,peck_fft_scalar *timedata) { + /* input buffer timedata is stored row-wise */ + int k, ncfft; + + if (st->substate->inverse == 0) { + fprintf (stderr, "peck_fft usage error: improper alloc\n"); + exit(EXIT_FAILURE); + } + + ncfft = st->substate->nfft; + + st->tmpbuf[0].r = freqdata[0].r + freqdata[ncfft].r; + st->tmpbuf[0].i = freqdata[0].r - freqdata[ncfft].r; + C_FIXDIV(st->tmpbuf[0], 2); + + for (k = 1; k <= ncfft / 2; ++k) { + peck_fft_cpx fk, fnkc, fek, fok, tmp; + fk = freqdata[k]; + fnkc.r = freqdata[ncfft - k].r; + fnkc.i = -freqdata[ncfft - k].i; + C_FIXDIV(fk , 2); + C_FIXDIV(fnkc , 2); + + C_ADD(fek, fk, fnkc); + C_SUB(tmp, fk, fnkc); + C_MUL(fok, tmp, st->super_twiddles[k-1]); + C_ADD(st->tmpbuf[k], fek, fok); + C_SUB(st->tmpbuf[ncfft - k], fek, fok); +#ifdef USE_SIMD + st->tmpbuf[ncfft - k].i *= _mm_set1_ps(-1.0); +#else + st->tmpbuf[ncfft - k].i *= -1; +#endif + } + peck_fft(st->substate, st->tmpbuf, (peck_fft_cpx *) timedata); +} diff -r 000000000000 -r 723f588b82ac peck_fftr.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/peck_fftr.h Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,43 @@ +#ifndef PECK_FFTR_H +#define PECK_FFTR_H + +#include "peck_fft.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Real optimized version can save about 45% cpu time vs. complex fft of a real seq. + */ +typedef struct peck_fftr_state *peck_fftr_cfg; + +/* + * peck_fftr_alloc() + * nfft must be even + * + * If you don't care to allocate space, use mem = lenmem = NULL. + */ +peck_fftr_cfg peck_fftr_alloc(int nfft, int inverse_fft, void * mem, size_t * lenmem); + +/* + * peck_fftr() + * input timedata has nfft scalar points + * output freqdata has nfft/2+1 complex points + */ +void peck_fftr(peck_fftr_cfg cfg,const peck_fft_scalar *timedata, peck_fft_cpx *freqdata); + +/* + * peck_fftri() + * input freqdata has nfft/2+1 complex points + * output timedata has nfft scalar points + */ +void peck_fftri(peck_fftr_cfg cfg,const peck_fft_cpx *freqdata, peck_fft_scalar *timedata); + +#define peck_fftr_free free + +#ifdef __cplusplus +} +#endif + +#endif /* PECK_FFTR_H */ diff -r 000000000000 -r 723f588b82ac peck_test.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/peck_test.c Fri Sep 16 12:53:08 2011 +0200 @@ -0,0 +1,77 @@ +#include +#include +#include + +#include +#include + +void enable_runfast() { +#ifdef __arm__ + static const unsigned int x = 0x04086060; + static const unsigned int y = 0x03000000; + int r; + asm volatile ( + "fmrx %0, fpscr \n\t" //r0 = FPSCR + "and %0, %0, %1 \n\t" //r0 = r0 & 0x04086060 + "orr %0, %0, %2 \n\t" //r0 = r0 | 0x03000000 + "fmxr fpscr, %0 \n\t" //FPSCR = r0 + : "=r"(r) + : "r"(x), "r"(y) + ); +#endif +} + +int main(int argc, char *argv[]) { + unsigned int i, j; + peck_fftr_cfg p, pi; + + enable_runfast(); + + const unsigned int N = 256; + + peck_fft_scalar in[N]; + peck_fft_cpx out[N/2 + 1]; + peck_fft_scalar res[N]; + + for (i = 0; i < N; i++) { + in[i] = (i % 13) / 3; + } + + p = peck_fftr_alloc(N, 0, NULL, NULL); + pi = peck_fftr_alloc(N, 1, NULL, NULL); + + for (j = 0; j < 10000; j++) { + if (j == 0) { + for (i = 0; i < 8; i++) + printf("%d: %f\n", i, in[i]); + printf("----\n"); + } + + peck_fftr(p, in, out); + + if (j == 0) { + for (i = 0; i < 8; i++) + printf("%d: %f %f\n", i, out[i].r, out[i].i); + printf("----\n"); + } + + peck_fftri(pi, out, res); + + if (j == 0) { + for (i = 0; i < 8; i++) + printf("%d: %f\n", i, res[i] / N); + } + } + peck_fftr_free(p); + peck_fftr_free(pi); + peck_fft_cleanup(); + + for (i = 0; i < N; i++) { + if (fabs(in[i] - res[i]/N) > 0.00001) { + fprintf(stderr, "!!!! ERROR !!!! at %d\n", i); + exit(EXIT_FAILURE); + } + } + + return EXIT_SUCCESS; +}