61 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
62 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
68 #ifdef LV_HAVE_GENERIC
73 unsigned int num_points)
76 for (
unsigned int i = 0;
i < num_points; ++
i) {
77 res += (*input++) *
lv_conj((*taps++));
84 #ifdef LV_HAVE_GENERIC
89 unsigned int num_points)
92 const unsigned int num_bytes = num_points * 8;
94 float* res = (
float*)result;
95 float* in = (
float*)input;
96 float* tp = (
float*)taps;
97 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
99 float sum0[2] = { 0, 0 };
100 float sum1[2] = { 0, 0 };
103 for (
i = 0;
i < n_2_ccomplex_blocks; ++
i) {
104 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
105 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
106 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
107 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
113 res[0] = sum0[0] + sum1[0];
114 res[1] = sum0[1] + sum1[1];
116 if (num_bytes >> 3 & 1) {
117 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
125 #include <immintrin.h>
130 unsigned int num_points)
133 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
134 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
136 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
148 __m256 a = _mm256_loadu_ps((
const float*)&input[
i]);
149 __m256 b = _mm256_loadu_ps((
const float*)&taps[
i]);
150 __m256 b_real = _mm256_moveldup_ps(b);
151 __m256 b_imag = _mm256_movehdup_ps(b);
154 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
156 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
160 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
162 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
166 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
168 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
170 __m128 lower = _mm256_extractf128_ps(sum, 0);
171 _mm_storel_pi((__m64*)result, lower);
174 for (
long unsigned i = num_points & ~3u;
i < num_points; ++
i) {
186 #include <pmmintrin.h>
187 #include <xmmintrin.h>
192 unsigned int num_points)
195 __m128 sum_a_mult_b_real = _mm_setzero_ps();
196 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
198 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
210 __m128 a = _mm_loadu_ps((
const float*)&input[
i]);
211 __m128 b = _mm_loadu_ps((
const float*)&taps[
i]);
212 __m128 b_real = _mm_moveldup_ps(b);
213 __m128 b_imag = _mm_movehdup_ps(b);
216 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
218 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
223 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
225 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
227 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
229 _mm_storel_pi((__m64*)result, sum);
232 if (num_points & 1u) {
244 #include <arm_neon.h>
248 unsigned int num_points)
251 unsigned int quarter_points = num_points / 4;
258 float32x4x2_t a_val, b_val, accumulator;
259 float32x4x2_t tmp_imag;
260 accumulator.val[0] = vdupq_n_f32(0);
261 accumulator.val[1] = vdupq_n_f32(0);
263 for (number = 0; number < quarter_points; ++number) {
264 a_val = vld2q_f32((
float*)a_ptr);
265 b_val = vld2q_f32((
float*)b_ptr);
270 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
271 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
274 tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
275 tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
277 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
278 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
285 vst2q_f32((
float*)accum_result, accumulator);
286 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
289 for (number = quarter_points * 4; number < num_points; ++number) {
290 *result += (*a_ptr++) *
lv_conj(*b_ptr++);
298 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
299 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
307 #include <immintrin.h>
312 unsigned int num_points)
315 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
316 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
318 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
330 __m256 a = _mm256_load_ps((
const float*)&input[
i]);
331 __m256 b = _mm256_load_ps((
const float*)&taps[
i]);
332 __m256 b_real = _mm256_moveldup_ps(b);
333 __m256 b_imag = _mm256_movehdup_ps(b);
336 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
338 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
342 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
344 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
348 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
350 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
352 __m128 lower = _mm256_extractf128_ps(sum, 0);
353 _mm_storel_pi((__m64*)result, lower);
356 for (
long unsigned i = num_points & ~3u;
i < num_points; ++
i) {
367 #include <pmmintrin.h>
368 #include <xmmintrin.h>
373 unsigned int num_points)
376 __m128 sum_a_mult_b_real = _mm_setzero_ps();
377 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
379 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
391 __m128 a = _mm_load_ps((
const float*)&input[
i]);
392 __m128 b = _mm_load_ps((
const float*)&taps[
i]);
393 __m128 b_real = _mm_moveldup_ps(b);
394 __m128 b_imag = _mm_movehdup_ps(b);
397 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
399 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
404 _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
406 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
408 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
410 _mm_storel_pi((__m64*)result, sum);
413 if (num_points & 1u) {
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:370
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:309
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:70
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:127
static void volk_32fc_x2_conjugate_dot_prod_32fc_block(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:86
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:189
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:245
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:68
#define lv_cimag(x)
Definition: volk_complex.h:98
#define lv_conj(x)
Definition: volk_complex.h:100
#define lv_cmake(r, i)
Definition: volk_complex.h:77
#define lv_creal(x)
Definition: volk_complex.h:96
float complex lv_32fc_t
Definition: volk_complex.h:74
for i
Definition: volk_config_fixed.tmpl.h:13