59 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H 60 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H 66 #ifdef LV_HAVE_GENERIC 71 unsigned int num_points)
74 const unsigned int num_bytes = num_points * 8;
76 float* res = (
float*)result;
77 float* in = (
float*)input;
78 float* tp = (
float*)taps;
79 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
81 float sum0[2] = { 0, 0 };
82 float sum1[2] = { 0, 0 };
85 for (
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
86 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
87 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
88 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
89 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
95 res[0] = sum0[0] + sum1[0];
96 res[1] = sum0[1] + sum1[1];
98 if (num_bytes >> 3 & 1) {
99 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
107 #include <immintrin.h> 112 unsigned int num_points)
115 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
116 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
118 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
130 __m256 a = _mm256_loadu_ps((
const float*)&input[
i]);
131 __m256 b = _mm256_loadu_ps((
const float*)&taps[
i]);
132 __m256 b_real = _mm256_moveldup_ps(b);
133 __m256 b_imag = _mm256_movehdup_ps(b);
136 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
138 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
142 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
144 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
148 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
150 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
152 __m128 lower = _mm256_extractf128_ps(sum, 0);
153 _mm_storel_pi((__m64*)result, lower);
156 for (
long unsigned i = num_points & ~3u;
i < num_points; ++
i) {
168 #include <pmmintrin.h> 169 #include <xmmintrin.h> 174 unsigned int num_points)
177 __m128 sum_a_mult_b_real = _mm_setzero_ps();
178 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
180 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
192 __m128 a = _mm_loadu_ps((
const float*)&input[
i]);
193 __m128 b = _mm_loadu_ps((
const float*)&taps[
i]);
194 __m128 b_real = _mm_moveldup_ps(b);
195 __m128 b_imag = _mm_movehdup_ps(b);
198 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
200 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
205 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
207 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
209 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
211 _mm_storel_pi((__m64*)result, sum);
214 if (num_points & 1u) {
226 #include <arm_neon.h> 230 unsigned int num_points)
233 unsigned int quarter_points = num_points / 4;
240 float32x4x2_t a_val, b_val, accumulator;
241 float32x4x2_t tmp_imag;
242 accumulator.val[0] = vdupq_n_f32(0);
243 accumulator.val[1] = vdupq_n_f32(0);
245 for (number = 0; number < quarter_points; ++number) {
246 a_val = vld2q_f32((
float*)a_ptr);
247 b_val = vld2q_f32((
float*)b_ptr);
252 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
253 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
256 tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
257 tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
259 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
260 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
267 vst2q_f32((
float*)accum_result, accumulator);
268 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
271 for (number = quarter_points * 4; number < num_points; ++number) {
272 *result += (*a_ptr++) *
lv_conj(*b_ptr++);
280 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H 281 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H 289 #include <immintrin.h> 294 unsigned int num_points)
297 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
298 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
300 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
312 __m256 a = _mm256_load_ps((
const float*)&input[
i]);
313 __m256 b = _mm256_load_ps((
const float*)&taps[
i]);
314 __m256 b_real = _mm256_moveldup_ps(b);
315 __m256 b_imag = _mm256_movehdup_ps(b);
318 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
320 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
324 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
326 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
330 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
332 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
334 __m128 lower = _mm256_extractf128_ps(sum, 0);
335 _mm_storel_pi((__m64*)result, lower);
338 for (
long unsigned i = num_points & ~3u;
i < num_points; ++
i) {
349 #include <pmmintrin.h> 350 #include <xmmintrin.h> 355 unsigned int num_points)
358 __m128 sum_a_mult_b_real = _mm_setzero_ps();
359 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
361 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
373 __m128 a = _mm_load_ps((
const float*)&input[
i]);
374 __m128 b = _mm_load_ps((
const float*)&taps[
i]);
375 __m128 b_real = _mm_moveldup_ps(b);
376 __m128 b_imag = _mm_movehdup_ps(b);
379 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
381 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
386 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
388 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
390 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
392 _mm_storel_pi((__m64*)result, sum);
395 if (num_points & 1u) {
407 #ifdef LV_HAVE_GENERIC 413 unsigned int num_points)
416 const unsigned int num_bytes = num_points * 8;
418 float* res = (
float*)result;
419 float* in = (
float*)input;
420 float* tp = (
float*)taps;
421 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
423 float sum0[2] = { 0, 0 };
424 float sum1[2] = { 0, 0 };
427 for (
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
428 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
429 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
430 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
431 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
437 res[0] = sum0[0] + sum1[0];
438 res[1] = sum0[1] + sum1[1];
440 if (num_bytes >> 3 & 1) {
441 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
448 #if LV_HAVE_SSE && LV_HAVE_64 450 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse(
lv_32fc_t* result,
453 unsigned int num_points)
456 const unsigned int num_bytes = num_points * 8;
459 static const uint32_t conjugator[4] = {
460 0x00000000, 0x80000000, 0x00000000, 0x80000000
464 "# ccomplex_conjugate_dotprod_generic (float* result, const float *input,\n\t" 465 "# const float *taps, unsigned num_bytes)\n\t" 466 "# float sum0 = 0;\n\t" 467 "# float sum1 = 0;\n\t" 468 "# float sum2 = 0;\n\t" 469 "# float sum3 = 0;\n\t" 471 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t" 472 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t" 473 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t" 474 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t" 477 "# } while (--n_2_ccomplex_blocks != 0);\n\t" 478 "# result[0] = sum0 + sum2;\n\t" 479 "# result[1] = sum1 + sum3;\n\t" 480 "# TODO: prefetch and better scheduling\n\t" 481 " xor %%r9, %%r9\n\t" 482 " xor %%r10, %%r10\n\t" 483 " movq %[conjugator], %%r9\n\t" 484 " movq %%rcx, %%rax\n\t" 485 " movaps 0(%%r9), %%xmm8\n\t" 486 " movq %%rcx, %%r8\n\t" 487 " movq %[rsi], %%r9\n\t" 488 " movq %[rdx], %%r10\n\t" 489 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 490 " movaps 0(%%r9), %%xmm0\n\t" 491 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 492 " movups 0(%%r10), %%xmm2\n\t" 493 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t" 495 " xorps %%xmm8, %%xmm2\n\t" 496 " jmp .%=L1_test\n\t" 497 " # 4 taps / loop\n\t" 498 " # something like ?? cycles / loop\n\t" 500 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 501 "# movaps (%%r9), %%xmmA\n\t" 502 "# movaps (%%r10), %%xmmB\n\t" 503 "# movaps %%xmmA, %%xmmZ\n\t" 504 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 505 "# mulps %%xmmB, %%xmmA\n\t" 506 "# mulps %%xmmZ, %%xmmB\n\t" 507 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 508 "# xorps %%xmmPN, %%xmmA\n\t" 509 "# movaps %%xmmA, %%xmmZ\n\t" 510 "# unpcklps %%xmmB, %%xmmA\n\t" 511 "# unpckhps %%xmmB, %%xmmZ\n\t" 512 "# movaps %%xmmZ, %%xmmY\n\t" 513 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 514 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 515 "# addps %%xmmZ, %%xmmA\n\t" 516 "# addps %%xmmA, %%xmmC\n\t" 517 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 518 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 519 " movaps 16(%%r9), %%xmm1\n\t" 520 " movaps %%xmm0, %%xmm4\n\t" 521 " mulps %%xmm2, %%xmm0\n\t" 522 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 523 " movaps 16(%%r10), %%xmm3\n\t" 524 " movaps %%xmm1, %%xmm5\n\t" 525 " xorps %%xmm8, %%xmm3\n\t" 526 " addps %%xmm0, %%xmm6\n\t" 527 " mulps %%xmm3, %%xmm1\n\t" 528 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 529 " addps %%xmm1, %%xmm6\n\t" 530 " mulps %%xmm4, %%xmm2\n\t" 531 " movaps 32(%%r9), %%xmm0\n\t" 532 " addps %%xmm2, %%xmm7\n\t" 533 " mulps %%xmm5, %%xmm3\n\t" 535 " movaps 32(%%r10), %%xmm2\n\t" 536 " addps %%xmm3, %%xmm7\n\t" 537 " add $32, %%r10\n\t" 538 " xorps %%xmm8, %%xmm2\n\t" 542 " # We've handled the bulk of multiplies up to here.\n\t" 543 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 544 " # If so, we've got 2 more taps to do.\n\t" 547 " # The count was odd, do 2 more taps.\n\t" 548 " # Note that we've already got mm0/mm2 preloaded\n\t" 549 " # from the main loop.\n\t" 550 " movaps %%xmm0, %%xmm4\n\t" 551 " mulps %%xmm2, %%xmm0\n\t" 552 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 553 " addps %%xmm0, %%xmm6\n\t" 554 " mulps %%xmm4, %%xmm2\n\t" 555 " addps %%xmm2, %%xmm7\n\t" 557 " # neg inversor\n\t" 558 " xorps %%xmm1, %%xmm1\n\t" 559 " mov $0x80000000, %%r9\n\t" 560 " movd %%r9, %%xmm1\n\t" 561 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 563 " xorps %%xmm1, %%xmm6\n\t" 564 " movaps %%xmm6, %%xmm2\n\t" 565 " unpcklps %%xmm7, %%xmm6\n\t" 566 " unpckhps %%xmm7, %%xmm2\n\t" 567 " movaps %%xmm2, %%xmm3\n\t" 568 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 569 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 570 " addps %%xmm2, %%xmm6\n\t" 571 " # xmm6 = r1 i2 r3 i4\n\t" 572 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 573 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 574 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) " 581 [conjugator]
"r"(conjugator)
582 :
"rax",
"r8",
"r9",
"r10");
584 int getem = num_bytes % 16;
586 for (; getem > 0; getem -= 8) {
587 *result += (input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]));
592 #if LV_HAVE_SSE && LV_HAVE_32 593 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse_32(
lv_32fc_t* result,
596 unsigned int num_points)
599 const unsigned int num_bytes = num_points * 8;
602 static const uint32_t conjugator[4] = {
603 0x00000000, 0x80000000, 0x00000000, 0x80000000
606 int bound = num_bytes >> 4;
607 int leftovers = num_bytes % 16;
611 " #movl %%esp, %%ebp\n\t" 612 " #movl 12(%%ebp), %%eax # input\n\t" 613 " #movl 16(%%ebp), %%edx # taps\n\t" 614 " #movl 20(%%ebp), %%ecx # n_bytes\n\t" 615 " movaps 0(%[conjugator]), %%xmm1\n\t" 616 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 617 " movaps 0(%[eax]), %%xmm0\n\t" 618 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 619 " movaps 0(%[edx]), %%xmm2\n\t" 620 " movl %[ecx], (%[out])\n\t" 621 " shrl $5, %[ecx] # ecx = n_2_ccomplex_blocks / 2\n\t" 623 " xorps %%xmm1, %%xmm2\n\t" 624 " jmp .%=L1_test\n\t" 625 " # 4 taps / loop\n\t" 626 " # something like ?? cycles / loop\n\t" 628 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 629 "# movaps (%[eax]), %%xmmA\n\t" 630 "# movaps (%[edx]), %%xmmB\n\t" 631 "# movaps %%xmmA, %%xmmZ\n\t" 632 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 633 "# mulps %%xmmB, %%xmmA\n\t" 634 "# mulps %%xmmZ, %%xmmB\n\t" 635 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 636 "# xorps %%xmmPN, %%xmmA\n\t" 637 "# movaps %%xmmA, %%xmmZ\n\t" 638 "# unpcklps %%xmmB, %%xmmA\n\t" 639 "# unpckhps %%xmmB, %%xmmZ\n\t" 640 "# movaps %%xmmZ, %%xmmY\n\t" 641 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 642 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 643 "# addps %%xmmZ, %%xmmA\n\t" 644 "# addps %%xmmA, %%xmmC\n\t" 645 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 646 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 647 " movaps 16(%[edx]), %%xmm3\n\t" 648 " movaps %%xmm0, %%xmm4\n\t" 649 " xorps %%xmm1, %%xmm3\n\t" 650 " mulps %%xmm2, %%xmm0\n\t" 651 " movaps 16(%[eax]), %%xmm1\n\t" 652 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 653 " movaps %%xmm1, %%xmm5\n\t" 654 " addps %%xmm0, %%xmm6\n\t" 655 " mulps %%xmm3, %%xmm1\n\t" 656 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 657 " addps %%xmm1, %%xmm6\n\t" 658 " movaps 0(%[conjugator]), %%xmm1\n\t" 659 " mulps %%xmm4, %%xmm2\n\t" 660 " movaps 32(%[eax]), %%xmm0\n\t" 661 " addps %%xmm2, %%xmm7\n\t" 662 " mulps %%xmm5, %%xmm3\n\t" 663 " addl $32, %[eax]\n\t" 664 " movaps 32(%[edx]), %%xmm2\n\t" 665 " addps %%xmm3, %%xmm7\n\t" 666 " xorps %%xmm1, %%xmm2\n\t" 667 " addl $32, %[edx]\n\t" 671 " # We've handled the bulk of multiplies up to here.\n\t" 672 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 673 " # If so, we've got 2 more taps to do.\n\t" 674 " movl 0(%[out]), %[ecx] # n_2_ccomplex_blocks\n\t" 675 " shrl $4, %[ecx]\n\t" 676 " andl $1, %[ecx]\n\t" 678 " # The count was odd, do 2 more taps.\n\t" 679 " # Note that we've already got mm0/mm2 preloaded\n\t" 680 " # from the main loop.\n\t" 681 " movaps %%xmm0, %%xmm4\n\t" 682 " mulps %%xmm2, %%xmm0\n\t" 683 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 684 " addps %%xmm0, %%xmm6\n\t" 685 " mulps %%xmm4, %%xmm2\n\t" 686 " addps %%xmm2, %%xmm7\n\t" 688 " # neg inversor\n\t" 689 " #movl 8(%%ebp), %[eax] \n\t" 690 " xorps %%xmm1, %%xmm1\n\t" 691 " movl $0x80000000, (%[out])\n\t" 692 " movss (%[out]), %%xmm1\n\t" 693 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 695 " xorps %%xmm1, %%xmm6\n\t" 696 " movaps %%xmm6, %%xmm2\n\t" 697 " unpcklps %%xmm7, %%xmm6\n\t" 698 " unpckhps %%xmm7, %%xmm2\n\t" 699 " movaps %%xmm2, %%xmm3\n\t" 700 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 701 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 702 " addps %%xmm2, %%xmm6\n\t" 703 " # xmm6 = r1 i2 r3 i4\n\t" 704 " #movl 8(%%ebp), %[eax] # @result\n\t" 705 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 706 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 707 " movlps %%xmm6, (%[out]) # store low 2x32 bits (complex) " 713 [ecx]
"r"(num_bytes),
715 [conjugator]
"r"(conjugator));
717 for (; leftovers > 0; leftovers -= 8) {
718 *result += (input[(bound << 1)] *
lv_conj(taps[(bound << 1)]));
#define __VOLK_ASM
Definition: volk_common.h:63
#define __VOLK_VOLATILE
Definition: volk_common.h:64
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:227
#define lv_conj(x)
Definition: volk_complex.h:96
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:352
#define lv_cmake(r, i)
Definition: volk_complex.h:73
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:291
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:68
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:62
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:410
for i
Definition: volk_config_fixed.tmpl.h:25
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:56
float complex lv_32fc_t
Definition: volk_complex.h:70
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:109
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:171
#define lv_creal(x)
Definition: volk_complex.h:92
#define lv_cimag(x)
Definition: volk_complex.h:94