Vector Optimized Library of Kernels  3.2.0
Architecture-tuned implementations of math kernels
volk_16i_permute_and_scalar_add.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of VOLK
6  *
7  * SPDX-License-Identifier: LGPL-3.0-or-later
8  */
9 
50 #ifndef INCLUDED_volk_16i_permute_and_scalar_add_a_H
51 #define INCLUDED_volk_16i_permute_and_scalar_add_a_H
52 
53 #include <inttypes.h>
54 #include <stdio.h>
55 
56 #ifdef LV_HAVE_SSE2
57 
58 #include <emmintrin.h>
59 #include <xmmintrin.h>
60 
61 static inline void volk_16i_permute_and_scalar_add_a_sse2(short* target,
62  short* src0,
63  short* permute_indexes,
64  short* cntl0,
65  short* cntl1,
66  short* cntl2,
67  short* cntl3,
68  short* scalars,
69  unsigned int num_points)
70 {
71 
72  const unsigned int num_bytes = num_points * 2;
73 
74  __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
75 
76  __m128i *p_target, *p_cntl0, *p_cntl1, *p_cntl2, *p_cntl3, *p_scalars;
77 
78  short* p_permute_indexes = permute_indexes;
79 
80  p_target = (__m128i*)target;
81  p_cntl0 = (__m128i*)cntl0;
82  p_cntl1 = (__m128i*)cntl1;
83  p_cntl2 = (__m128i*)cntl2;
84  p_cntl3 = (__m128i*)cntl3;
85  p_scalars = (__m128i*)scalars;
86 
87  int i = 0;
88 
89  int bound = (num_bytes >> 4);
90  int leftovers = (num_bytes >> 1) & 7;
91 
92  xmm0 = _mm_load_si128(p_scalars);
93 
94  xmm1 = _mm_shufflelo_epi16(xmm0, 0);
95  xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
96  xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
97  xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
98 
99  xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
100  xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
101  xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
102  xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
103 
104 
105  for (; i < bound; ++i) {
106  xmm0 = _mm_setzero_si128();
107  xmm5 = _mm_setzero_si128();
108  xmm6 = _mm_setzero_si128();
109  xmm7 = _mm_setzero_si128();
110 
111  xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[0]], 0);
112  xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[1]], 1);
113  xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[2]], 2);
114  xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[3]], 3);
115  xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[4]], 4);
116  xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[5]], 5);
117  xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[6]], 6);
118  xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[7]], 7);
119 
120  xmm0 = _mm_add_epi16(xmm0, xmm5);
121  xmm6 = _mm_add_epi16(xmm6, xmm7);
122 
123  p_permute_indexes += 8;
124 
125  xmm0 = _mm_add_epi16(xmm0, xmm6);
126 
127  xmm5 = _mm_load_si128(p_cntl0);
128  xmm6 = _mm_load_si128(p_cntl1);
129  xmm7 = _mm_load_si128(p_cntl2);
130 
131  xmm5 = _mm_and_si128(xmm5, xmm1);
132  xmm6 = _mm_and_si128(xmm6, xmm2);
133  xmm7 = _mm_and_si128(xmm7, xmm3);
134 
135  xmm0 = _mm_add_epi16(xmm0, xmm5);
136 
137  xmm5 = _mm_load_si128(p_cntl3);
138 
139  xmm6 = _mm_add_epi16(xmm6, xmm7);
140 
141  p_cntl0 += 1;
142 
143  xmm5 = _mm_and_si128(xmm5, xmm4);
144 
145  xmm0 = _mm_add_epi16(xmm0, xmm6);
146 
147  p_cntl1 += 1;
148  p_cntl2 += 1;
149 
150  xmm0 = _mm_add_epi16(xmm0, xmm5);
151 
152  p_cntl3 += 1;
153 
154  _mm_store_si128(p_target, xmm0);
155 
156  p_target += 1;
157  }
158 
159  for (i = bound * 8; i < (bound * 8) + leftovers; ++i) {
160  target[i] = src0[permute_indexes[i]] + (cntl0[i] & scalars[0]) +
161  (cntl1[i] & scalars[1]) + (cntl2[i] & scalars[2]) +
162  (cntl3[i] & scalars[3]);
163  }
164 }
165 #endif /*LV_HAVE_SSE*/
166 
167 
168 #ifdef LV_HAVE_GENERIC
169 static inline void volk_16i_permute_and_scalar_add_generic(short* target,
170  short* src0,
171  short* permute_indexes,
172  short* cntl0,
173  short* cntl1,
174  short* cntl2,
175  short* cntl3,
176  short* scalars,
177  unsigned int num_points)
178 {
179  const unsigned int num_bytes = num_points * 2;
180 
181  int i = 0;
182 
183  int bound = num_bytes >> 1;
184 
185  for (i = 0; i < bound; ++i) {
186  target[i] = src0[permute_indexes[i]] + (cntl0[i] & scalars[0]) +
187  (cntl1[i] & scalars[1]) + (cntl2[i] & scalars[2]) +
188  (cntl3[i] & scalars[3]);
189  }
190 }
191 
192 #endif /*LV_HAVE_GENERIC*/
193 
194 #endif /*INCLUDED_volk_16i_permute_and_scalar_add_a_H*/
static void volk_16i_permute_and_scalar_add_a_sse2(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:61
static void volk_16i_permute_and_scalar_add_generic(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:169
for i
Definition: volk_config_fixed.tmpl.h:13