29 #include "../FFTLIB_fft1d_i32fc_c32fc_o32fc.h"
32 #define LAST_LOOP_UNROLL 0
35 #include "../../../common/printv.h"
43 #define SE_PARAM_BASE (0x0000)
44 #define SE_LOOP1_PARAM_OFFSET (SE_PARAM_BASE)
45 #define SE_LOOP2_PARAM_OFFSET (SE_LOOP1_PARAM_OFFSET + SE_PARAM_SIZE)
46 #define SE_LOOP3_PARAM_OFFSET (SE_LOOP2_PARAM_OFFSET + SE_PARAM_SIZE)
47 #define SE_LOOP4_PARAM_OFFSET (SE_LOOP3_PARAM_OFFSET + SE_PARAM_SIZE)
48 #define SE_LOOP5_PARAM_OFFSET (SE_LOOP4_PARAM_OFFSET + SE_PARAM_SIZE)
49 #define SE_TWID_PARAM_OFFSET (SE_LOOP5_PARAM_OFFSET + SE_PARAM_SIZE)
50 #define SA_LOOP1_PARAM_OFFSET (SE_TWID_PARAM_OFFSET + SE_PARAM_SIZE)
51 #define SA_LOOP2_PARAM_OFFSET (SA_LOOP1_PARAM_OFFSET + SA_PARAM_SIZE)
52 #define SA_LOOP3_PARAM_OFFSET (SA_LOOP2_PARAM_OFFSET + SA_PARAM_SIZE)
65 #if defined(FFTLIB_CHECK_PARAMS) || \
66 defined(FFTLIB_FFT1D_I32FC_C32FC_O32FC_CHECK_PARAMS)
68 pX, bufParamsX, pW, bufParamsW, pY, bufParamsY, pBlock);
73 uint32_t numPointsPerDft;
74 uint32_t seCnt1, seCnt2, seCnt3, seCnt4;
75 __SE_TEMPLATE_v1 se0_param = __gen_SE_TEMPLATE_v1 ();
76 __SE_TEMPLATE_v1 se1_param = __gen_SE_TEMPLATE_v1 ();
77 __SA_TEMPLATE_v1 sa0_param = __gen_SA_TEMPLATE_v1 ();
79 numPoints = bufParamsX->
dim_x >> 1;
80 numPointsPerDft = numPoints;
81 seCnt1 = numPoints >> 2;
82 seCnt2 = numPoints >> 4;
84 seCnt4 = numPoints >> 2;
86 uint32_t elementSize = c7x::element_count_of<c7x::cfloat_vec>::value;
89 se0_param.ICNT0 = elementSize;
91 se0_param.DIM1 = seCnt1;
92 se0_param.ICNT2 = seCnt2;
93 se0_param.DIM2 = elementSize;
94 se0_param.ICNT3 = seCnt3;
95 se0_param.DIM3 = numPointsPerDft;
97 se0_param.ELETYPE = __SE_ELETYPE_32BIT_CMPLX_SWAP;
98 se0_param.VECLEN = c7x::se_veclen<c7x::cfloat_vec>::value;
99 se0_param.DIMFMT = __SE_DIMFMT_4D;
104 se1_param.ICNT0 = elementSize;
106 se1_param.DIM1 = seCnt1;
107 se1_param.ICNT2 = seCnt2;
108 se1_param.DIM2 = elementSize;
109 se1_param.ICNT3 = seCnt3;
112 se1_param.ELETYPE = __SE_ELETYPE_32BIT_CMPLX_SWAP;
113 se1_param.VECLEN = c7x::se_veclen<c7x::cfloat_vec>::value;
114 se1_param.DIMFMT = __SE_DIMFMT_4D;
119 sa0_param.ICNT0 = elementSize;
121 sa0_param.DIM1 = seCnt1;
122 sa0_param.ICNT2 = seCnt2;
123 sa0_param.DIM2 = elementSize;
124 sa0_param.ICNT3 = seCnt3;
125 sa0_param.DIM3 = numPointsPerDft;
127 sa0_param.VECLEN = c7x::sa_veclen<c7x::cfloat_vec>::value;
128 sa0_param.DIMFMT = __SA_DIMFMT_4D;
133 se0_param.ICNT0 = elementSize;
136 se0_param.ICNT2 = seCnt2;
139 se0_param.ELETYPE = __SE_ELETYPE_32BIT_CMPLX_SWAP;
143 se0_param.VECLEN = c7x::se_veclen<c7x::cfloat_vec>::value;
144 se0_param.DIMFMT = __SE_DIMFMT_3D;
149 sa0_param.ICNT0 = numPoints;
151 sa0_param.VECLEN = c7x::sa_veclen<c7x::cfloat_vec>::value;
152 sa0_param.DIMFMT = __SA_DIMFMT_1D;
157 se0_param = __gen_SE_TEMPLATE_v1 ();
158 se0_param.ICNT0 = numPoints;
160 se0_param.ELETYPE = __SE_ELETYPE_32BIT_CMPLX_SWAP;
161 se0_param.VECLEN = c7x::se_veclen<c7x::cfloat_vec>::value;
162 se0_param.DIMFMT = __SE_DIMFMT_1D;
167 sa0_param.ICNT0 = numPoints;
169 sa0_param.VECLEN = c7x::sa_veclen<c7x::cfloat_vec>::value;
170 sa0_param.DIMFMT = __SA_DIMFMT_1D;
175 se0_param.ICNT0 = seCnt4;
176 se0_param.ICNT1 = elementSize;
177 se0_param.DIM1 = seCnt4;
183 se0_param.ELETYPE = __SE_ELETYPE_32BIT_CMPLX_SWAP;
184 se0_param.TRANSPOSE = __SE_TRANSPOSE_64BIT;
185 se0_param.VECLEN = c7x::se_veclen<c7x::cfloat_vec>::value;
186 se0_param.DIMFMT = __SE_DIMFMT_2D;
191 se0_param.ICNT0 = seCnt4;
192 se0_param.ICNT1 = elementSize;
193 se0_param.DIM1 = seCnt4;
199 se0_param.ELETYPE = __SE_ELETYPE_32BIT_CMPLX_SWAP;
200 se0_param.TRANSPOSE = __SE_TRANSPOSE_64BIT;
201 se0_param.VECLEN = c7x::se_veclen<c7x::cfloat_vec>::value;
202 se0_param.DIMFMT = __SE_DIMFMT_2D;
221 uint32_t numPointsPerDft;
222 uint32_t numLeadingZeros;
223 uint32_t offsetBitReverse;
224 uint32_t seCnt1, seCnt2, seCnt3;
225 __SE_TEMPLATE_v1 se0_param = __gen_SE_TEMPLATE_v1 ();
226 __SE_TEMPLATE_v1 se1_param = __gen_SE_TEMPLATE_v1 ();
227 __SA_TEMPLATE_v1 sa0_param = __gen_SA_TEMPLATE_v1 ();
229 cfloat* restrict pXLocal;
230 cfloat* restrict pYLocal;
231 cfloat* restrict pWLocal;
232 cfloat* restrict pY0;
233 cfloat* restrict pY1;
234 cfloat* restrict pY2;
235 cfloat* restrict pY3;
236 cfloat* restrict pY4;
237 cfloat* restrict pY5;
238 cfloat* restrict pY6;
239 cfloat* restrict pY7;
241 typedef typename c7x::cfloat_vec
CV;
244 typedef typename c7x::float_vec
V;
247 CV vX_0, vX_N_4, vX_N_2, vX_3N_4;
248 CV vSum1, vSum2, vDiff1, vDiff2;
249 CV vTwX1, vTwX2, vTwX3;
250 CV vX0Temp, vX1Temp, vX2Temp, vX3Temp;
251 CV vX0, vX1, vX2, vX3;
252 CV vX_0_1, vX_N_4_1, vX_N_2_1, vX_3N_4_1;
253 CV vSum1_1, vSum2_1, vDiff1_1, vDiff2_1;
254 CV vX0_1, vX1_1, vX2_1, vX3_1;
255 CV vX0_2PtDft_1, vX0_2PtDft_2;
256 CV vX1_2PtDft_1, vX1_2PtDft_2;
257 CV vX2_2PtDft_1, vX2_2PtDft_2;
258 CV vX3_2PtDft_1, vX3_2PtDft_2;
261 #ifdef FFTLIB_CHECK_PARAMS
263 pX, bufParamsX, pW, bufParamsW, pY, bufParamsY, pBlock);
267 numPoints = bufParamsX->
dim_x >> 1;
268 numPointsPerDft = numPoints;
276 seCnt1 = numPointsPerDft >> 2;
277 seCnt2 = numPointsPerDft >> 4;
280 pXLocal = (cfloat*) pX;
281 pWLocal = (cfloat*) pW;
282 pYLocal = (cfloat*) pY;
284 while (numPointsPerDft >= 16) {
288 se0_param.DIM1 = seCnt1;
289 se0_param.ICNT2 = seCnt2;
292 se0_param.ICNT3 = seCnt3;
293 se0_param.DIM3 = numPointsPerDft;
294 __SE0_OPEN ((
void *) pXLocal, se0_param);
297 se1_param.DIM1 = seCnt1;
298 se1_param.ICNT2 = seCnt2;
300 se1_param.ICNT3 = seCnt3;
302 __SE1_OPEN ((
void *) pWLocal, se1_param);
305 sa0_param.DIM1 = seCnt1;
306 sa0_param.ICNT2 = seCnt2;
309 sa0_param.ICNT3 = seCnt3;
310 sa0_param.DIM3 = numPointsPerDft;
312 __SA0_OPEN (sa0_param);
315 for (k = 0; k < numPoints; k += 32) {
318 vX_0 = c7x::strm_eng<0, CV>::get_adv ();
319 vX_N_4 = c7x::strm_eng<0, CV>::get_adv ();
320 vX_N_2 = c7x::strm_eng<0, CV>::get_adv ();
321 vX_3N_4 = c7x::strm_eng<0, CV>::get_adv ();
323 vSum1 = vX_0 + vX_N_2;
324 vSum2 = vX_N_4 + vX_3N_4;
325 vDiff1 = vX_0 - vX_N_2;
326 vDiff2 = vX_N_4 - vX_3N_4;
328 vTwX1 = c7x::strm_eng<1, CV>::get_adv ();
329 vTwX2 = c7x::strm_eng<1, CV>::get_adv ();
330 vTwX3 = c7x::strm_eng<1, CV>::get_adv ();
332 vX0Temp = vSum1 + vSum2;
333 vX1Temp = vDiff1 - __vcrot90sp_vv (vDiff2);
334 vX2Temp = vSum1 - vSum2;
335 vX3Temp = vDiff1 + __vcrot90sp_vv (vDiff2);
338 vX1 = __complex_multiply (vX1Temp, vTwX1);
339 vX2 = __complex_multiply (vX2Temp, vTwX2);
340 vX3 = __complex_multiply (vX3Temp, vTwX3);
349 tmp = c7x::strm_agen<0, CV>::get_vpred ();
350 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
351 __vstore_pred (tmp, addr, vX0);
353 tmp = c7x::strm_agen<0, CV>::get_vpred ();
354 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
355 __vstore_pred (tmp, addr, vX2);
357 tmp = c7x::strm_agen<0, CV>::get_vpred ();
358 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
359 __vstore_pred (tmp, addr, vX1);
361 tmp = c7x::strm_agen<0, CV>::get_vpred ();
362 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
363 __vstore_pred (tmp, addr, vX3);
367 vX_0 = c7x::strm_eng<0, CV>::get_adv ();
368 vX_N_4 = c7x::strm_eng<0, CV>::get_adv ();
369 vX_N_2 = c7x::strm_eng<0, CV>::get_adv ();
370 vX_3N_4 = c7x::strm_eng<0, CV>::get_adv ();
372 vSum1 = vX_0 + vX_N_2;
373 vSum2 = vX_N_4 + vX_3N_4;
374 vDiff1 = vX_0 - vX_N_2;
375 vDiff2 = vX_N_4 - vX_3N_4;
377 vTwX1 = c7x::strm_eng<1, CV>::get_adv ();
378 vTwX2 = c7x::strm_eng<1, CV>::get_adv ();
379 vTwX3 = c7x::strm_eng<1, CV>::get_adv ();
381 vX0Temp = vSum1 + vSum2;
382 vX1Temp = vDiff1 - __vcrot90sp_vv (vDiff2);
383 vX2Temp = vSum1 - vSum2;
384 vX3Temp = vDiff1 + __vcrot90sp_vv (vDiff2);
387 vX1 = __complex_multiply (vX1Temp, vTwX1);
388 vX2 = __complex_multiply (vX2Temp, vTwX2);
389 vX3 = __complex_multiply (vX3Temp, vTwX3);
396 tmp = c7x::strm_agen<0, CV>::get_vpred ();
397 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
398 __vstore_pred (tmp, addr, vX0);
400 tmp = c7x::strm_agen<0, CV>::get_vpred ();
401 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
402 __vstore_pred (tmp, addr, vX2);
404 tmp = c7x::strm_agen<0, CV>::get_vpred ();
405 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
406 __vstore_pred (tmp, addr, vX1);
408 tmp = c7x::strm_agen<0, CV>::get_vpred ();
409 addr = c7x::strm_agen<0, CV>::get_adv (pXLocal);
410 __vstore_pred (tmp, addr, vX3);
416 numPointsPerDft >>= 2;
417 pWLocal += numPointsPerDft * 3;
426 if (numPointsPerDft == 4) {
430 __SE0_OPEN ((
void *) pXLocal, se0_param);
432 numLeadingZeros = __norm ((int32_t) (numPoints - 1)) + 1;
439 pY0 = (cfloat*) (pY + 0);
440 pY1 = (cfloat*) (pY + ((0x40000000u >> numLeadingZeros) << 1));
441 pY2 = (cfloat*) (pY + ((0x80000000u >> numLeadingZeros) << 1));
442 pY3 = (cfloat*) (pY + ((0xC0000000u >> numLeadingZeros) << 1));
444 #ifdef LAST_LOOP_UNROLL
450 pY4 = (cfloat*) (pY + ((0x20000000u >> numLeadingZeros) << 1));
451 pY5 = (cfloat*) (pY + ((0x60000000u >> numLeadingZeros) << 1));
452 pY6 = (cfloat*) (pY + ((0xA0000000u >> numLeadingZeros) << 1));
453 pY7 = (cfloat*) (pY + ((0xE0000000u >> numLeadingZeros) << 1));
456 #ifdef LAST_LOOP_UNROLL
457 for (k = 0; k < numPoints >> 2; k += 8)
459 for (k = 0; k < numPoints >> 2; k += 4)
462 offsetBitReverse = __bit_reverse (k) >> numLeadingZeros;
464 vX_0 = c7x::strm_eng<0, CV>::get_adv ();
465 vX_N_4 = c7x::strm_eng<0, CV>::get_adv ();
466 vX_N_2 = c7x::strm_eng<0, CV>::get_adv ();
467 vX_3N_4 = c7x::strm_eng<0, CV>::get_adv ();
469 vSum1 = vX_0 + vX_N_2;
470 vSum2 = vX_N_4 + vX_3N_4;
471 vDiff1 = vX_0 - vX_N_2;
472 vDiff2 = vX_N_4 - vX_3N_4;
475 vX1 = vDiff1 - __vcrot90sp_vv (vDiff2);
477 vX3 = vDiff1 + __vcrot90sp_vv (vDiff2);
479 __vstore_reverse_bit ((
CVP) (pY0 + offsetBitReverse), vX0);
480 __vstore_reverse_bit ((
CVP) (pY1 + offsetBitReverse), vX1);
481 __vstore_reverse_bit ((
CVP) (pY2 + offsetBitReverse), vX2);
482 __vstore_reverse_bit ((
CVP) (pY3 + offsetBitReverse), vX3);
484 #ifdef LAST_LOOP_UNROLL
485 vX_0_1 = c7x::strm_eng<0, CV>::get_adv ();
486 vX_N_4_1 = c7x::strm_eng<0, CV>::get_adv ();
487 vX_N_2_1 = c7x::strm_eng<0, CV>::get_adv ();
488 vX_3N_4_1 = c7x::strm_eng<0, CV>::get_adv ();
490 vSum1_1 = vX_0_1 + vX_N_2_1;
491 vSum2_1 = vX_N_4_1 + vX_3N_4_1;
492 vDiff1_1 = vX_0_1 - vX_N_2_1;
493 vDiff2_1 = vX_N_4_1 - vX_3N_4_1;
495 vX0_1 = vSum1_1 + vSum2_1;
496 vX1_1 = vDiff1_1 - __vcrot90sp_vv (vDiff2_1);
497 vX2_1 = vSum1_1 - vSum2_1;
498 vX3_1 = vDiff1_1 + __vcrot90sp_vv (vDiff2_1);
500 __vstore_reverse_bit ((
CVP) (pY4 + offsetBitReverse), vX0_1);
501 __vstore_reverse_bit ((
CVP) (pY5 + offsetBitReverse), vX1_1);
502 __vstore_reverse_bit ((
CVP) (pY6 + offsetBitReverse), vX2_1);
503 __vstore_reverse_bit ((
CVP) (pY7 + offsetBitReverse), vX3_1);
518 __SE0_OPEN ((
void *) pXLocal, se0_param);
520 numLeadingZeros = __norm ((int32_t) (numPoints - 1)) + 1;
524 vTwX1 =
CV (twTemp, twTemp, twTemp, twTemp);
528 vTwX1 = (
CV) (twTemp, twTemp, twTemp, twTemp);
534 vTwX2 =
CV (twTemp, twTemp, twTemp, twTemp);
538 vTwX2 = (
CV) (twTemp, twTemp, twTemp, twTemp);
543 vTwX3 =
CV (twTemp, twTemp, twTemp, twTemp);
547 vTwX3 = (
CV) (twTemp, twTemp, twTemp, twTemp);
559 pY0 = (cfloat*) (pY + (0x00000000u));
560 pY1 = (cfloat*) (pY + ((0x80000000u >> numLeadingZeros) << 1));
561 pY2 = (cfloat*) (pY + ((0x20000000u >> numLeadingZeros) << 1));
562 pY3 = (cfloat*) (pY + ((0xA0000000u >> numLeadingZeros) << 1));
563 pY4 = (cfloat*) (pY + ((0x40000000u >> numLeadingZeros) << 1));
564 pY5 = (cfloat*) (pY + ((0xC0000000u >> numLeadingZeros) << 1));
565 pY6 = (cfloat*) (pY + ((0x60000000u >> numLeadingZeros) << 1));
566 pY7 = (cfloat*) (pY + ((0xE0000000u >> numLeadingZeros) << 1));
568 for (k = 0; k < numPoints >> 2; k += 8) {
569 offsetBitReverse = __bit_reverse (k) >> numLeadingZeros;
571 vX_0 = c7x::strm_eng<0, CV>::get_adv ();
572 vX_0_1 = c7x::strm_eng<0, CV>::get_adv ();
573 vX_N_4 = c7x::strm_eng<0, CV>::get_adv ();
574 vX_N_4_1 = c7x::strm_eng<0, CV>::get_adv ();
575 vX_N_2 = c7x::strm_eng<0, CV>::get_adv ();
576 vX_N_2_1 = c7x::strm_eng<0, CV>::get_adv ();
577 vX_3N_4 = c7x::strm_eng<0, CV>::get_adv ();
578 vX_3N_4_1 = c7x::strm_eng<0, CV>::get_adv ();
580 vSum1 = vX_0 + vX_N_2;
581 vSum2 = vX_N_4 + vX_3N_4;
582 vDiff1 = vX_0 - vX_N_2;
583 vDiff2 = vX_N_4 - vX_3N_4;
586 vX1 = vDiff1 - __vcrot90sp_vv (vDiff2);
588 vX3 = vDiff1 + __vcrot90sp_vv (vDiff2);
590 vSum1_1 = vX_0_1 + vX_N_2_1;
591 vSum2_1 = vX_N_4_1 + vX_3N_4_1;
592 vDiff1_1 = vX_0_1 - vX_N_2_1;
593 vDiff2_1 = vX_N_4_1 - vX_3N_4_1;
595 vX0Temp = vSum1_1 + vSum2_1;
596 vX1Temp = vDiff1_1 - __vcrot90sp_vv (vDiff2_1);
597 vX2Temp = vSum1_1 - vSum2_1;
598 vX3Temp = vDiff1_1 + __vcrot90sp_vv (vDiff2_1);
601 vX1_1 = __complex_multiply (vX1Temp, vTwX1);
602 vX2_1 = __complex_multiply (vX2Temp, vTwX2);
603 vX3_1 = __complex_multiply (vX3Temp, vTwX3);
605 vX0_2PtDft_1 = vX0 + vX0_1;
606 vX0_2PtDft_2 = vX0 - vX0_1;
607 vX1_2PtDft_1 = vX1 + vX1_1;
608 vX1_2PtDft_2 = vX1 - vX1_1;
609 vX2_2PtDft_1 = vX2 + vX2_1;
610 vX2_2PtDft_2 = vX2 - vX2_1;
611 vX3_2PtDft_1 = vX3 + vX3_1;
612 vX3_2PtDft_2 = vX3 - vX3_1;
614 __vstore_reverse_bit ((
CVP) (pY0 + offsetBitReverse), vX0_2PtDft_1);
615 __vstore_reverse_bit ((
CVP) (pY1 + offsetBitReverse), vX0_2PtDft_2);
616 __vstore_reverse_bit ((
CVP) (pY2 + offsetBitReverse), vX1_2PtDft_1);
617 __vstore_reverse_bit ((
CVP) (pY3 + offsetBitReverse), vX1_2PtDft_2);
618 __vstore_reverse_bit ((
CVP) (pY4 + offsetBitReverse), vX2_2PtDft_1);
619 __vstore_reverse_bit ((
CVP) (pY5 + offsetBitReverse), vX2_2PtDft_2);
620 __vstore_reverse_bit ((
CVP) (pY6 + offsetBitReverse), vX3_2PtDft_1);
621 __vstore_reverse_bit ((
CVP) (pY7 + offsetBitReverse), vX3_2PtDft_2);
630 #if (!defined(FFTLIB_REMOVE_CHECK_PARAMS) && \
631 !defined(FFTLIB_FFT1D_I32FC_C32FC_O32FC_REMOVE_CHECK_PARAMS)) || \
632 (defined(FFTLIB_CHECK_PARAMS)) || \
633 (defined(FFTLIB_FFT1D_I32FC_C32FC_O32FC_CHECK_PARAMS))
646 if ((pX == NULL) || (pW == NULL) || (pY == NULL)) {
649 else if (bufParamsX->
dim_x != bufParamsW->
dim_x ||
653 else if (bufParamsX->
dim_x < 64 * 2) {
661 else if (((uint64_t) pX) & 0xFu) {
669 if (bufParamsX->
dim_x & (1u << k)) {
674 if ((1u << k) != bufParamsX->
dim_x) {
FFTLIB_STATUS_NAME
The enumeration of all status codes.
@ FFTLIB_ERR_INVALID_TYPE
@ FFTLIB_ERR_NULL_POINTER
@ FFTLIB_ERR_INVALID_DIMENSION
@ FFTLIB_ERR_NOT_ALIGNED_PTRS_STRIDES
float FFTLIB_F32
Single precision floating point.
#define SE_LOOP4_PARAM_OFFSET
#define SA_LOOP2_PARAM_OFFSET
#define SE_LOOP1_PARAM_OFFSET
#define SA_LOOP1_PARAM_OFFSET
#define SE_TWID_PARAM_OFFSET
#define SE_LOOP2_PARAM_OFFSET
#define SE_LOOP3_PARAM_OFFSET
#define SA_LOOP3_PARAM_OFFSET
#define SE_LOOP5_PARAM_OFFSET
FFTLIB_STATUS FFTLIB_fft1d_i32fc_c32fc_o32fc_init(FFTLIB_F32 *pX, FFTLIB_bufParams1D_t *bufParamsX, FFTLIB_F32 *pW, FFTLIB_bufParams1D_t *bufParamsW, FFTLIB_F32 *pY, FFTLIB_bufParams1D_t *bufParamsY, void *pBlock)
This function should be called before the FFTLIB_fft1d_i32fc_c32fc_o32fc_kernel function is called....
FFTLIB_STATUS FFTLIB_fft1d_i32fc_c32fc_o32fc_checkParams(FFTLIB_F32 *pX, FFTLIB_bufParams1D_t *bufParamsX, FFTLIB_F32 *pW, FFTLIB_bufParams1D_t *bufParamsW, FFTLIB_F32 *pY, FFTLIB_bufParams1D_t *bufParamsY, void *pBlock)
This function checks the validity of the parameters passed to FFTLIB_fft1d_i32fc_c32fc_o32fc_init and...
FFTLIB_STATUS FFTLIB_fft1d_i32fc_c32fc_o32fc_kernel(FFTLIB_F32 *pX, FFTLIB_bufParams1D_t *bufParamsX, FFTLIB_F32 *pW, FFTLIB_bufParams1D_t *bufParamsW, FFTLIB_F32 *pY, FFTLIB_bufParams1D_t *bufParamsY, void *pBlock)
This function is the main kernel compute function.
A structure for a 1 dimensional buffer descriptor.
uint32_t data_type
Values are of type FFTLIB_data_type_e.
uint32_t dim_x
Width of buffer in X dimension in elements.