amiro-blt / Target / Demo / ARMCM4_STM32F405_Power_Management_GCC / Boot / lib / stdperiphlib / CMSIS / Include / core_cm4_simd.h @ 69661903
History | View | Annotate | Download (20.823 KB)
1 | 69661903 | Thomas Schöpping | /**************************************************************************//** |
---|---|---|---|
2 | * @file core_cm4_simd.h
|
||
3 | * @brief CMSIS Cortex-M4 SIMD Header File
|
||
4 | * @version V3.01
|
||
5 | * @date 06. March 2012
|
||
6 | *
|
||
7 | * @note
|
||
8 | * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
|
||
9 | *
|
||
10 | * @par
|
||
11 | * ARM Limited (ARM) is supplying this software for use with Cortex-M
|
||
12 | * processor based microcontrollers. This file can be freely distributed
|
||
13 | * within development tools that are supporting such ARM based processors.
|
||
14 | *
|
||
15 | * @par
|
||
16 | * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
|
||
17 | * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
|
||
18 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
|
||
19 | * ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
|
||
20 | * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
|
||
21 | *
|
||
22 | ******************************************************************************/
|
||
23 | |||
24 | #ifdef __cplusplus
|
||
25 | extern "C" { |
||
26 | #endif
|
||
27 | |||
28 | #ifndef __CORE_CM4_SIMD_H
|
||
29 | #define __CORE_CM4_SIMD_H
|
||
30 | |||
31 | |||
32 | /*******************************************************************************
|
||
33 | * Hardware Abstraction Layer
|
||
34 | ******************************************************************************/
|
||
35 | |||
36 | |||
37 | /* ################### Compiler specific Intrinsics ########################### */
|
||
38 | /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
|
||
39 | Access to dedicated SIMD instructions
|
||
40 | @{
|
||
41 | */
|
||
42 | |||
43 | #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/ |
||
44 | /* ARM armcc specific functions */
|
||
45 | |||
46 | /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
47 | #define __SADD8 __sadd8
|
||
48 | #define __QADD8 __qadd8
|
||
49 | #define __SHADD8 __shadd8
|
||
50 | #define __UADD8 __uadd8
|
||
51 | #define __UQADD8 __uqadd8
|
||
52 | #define __UHADD8 __uhadd8
|
||
53 | #define __SSUB8 __ssub8
|
||
54 | #define __QSUB8 __qsub8
|
||
55 | #define __SHSUB8 __shsub8
|
||
56 | #define __USUB8 __usub8
|
||
57 | #define __UQSUB8 __uqsub8
|
||
58 | #define __UHSUB8 __uhsub8
|
||
59 | #define __SADD16 __sadd16
|
||
60 | #define __QADD16 __qadd16
|
||
61 | #define __SHADD16 __shadd16
|
||
62 | #define __UADD16 __uadd16
|
||
63 | #define __UQADD16 __uqadd16
|
||
64 | #define __UHADD16 __uhadd16
|
||
65 | #define __SSUB16 __ssub16
|
||
66 | #define __QSUB16 __qsub16
|
||
67 | #define __SHSUB16 __shsub16
|
||
68 | #define __USUB16 __usub16
|
||
69 | #define __UQSUB16 __uqsub16
|
||
70 | #define __UHSUB16 __uhsub16
|
||
71 | #define __SASX __sasx
|
||
72 | #define __QASX __qasx
|
||
73 | #define __SHASX __shasx
|
||
74 | #define __UASX __uasx
|
||
75 | #define __UQASX __uqasx
|
||
76 | #define __UHASX __uhasx
|
||
77 | #define __SSAX __ssax
|
||
78 | #define __QSAX __qsax
|
||
79 | #define __SHSAX __shsax
|
||
80 | #define __USAX __usax
|
||
81 | #define __UQSAX __uqsax
|
||
82 | #define __UHSAX __uhsax
|
||
83 | #define __USAD8 __usad8
|
||
84 | #define __USADA8 __usada8
|
||
85 | #define __SSAT16 __ssat16
|
||
86 | #define __USAT16 __usat16
|
||
87 | #define __UXTB16 __uxtb16
|
||
88 | #define __UXTAB16 __uxtab16
|
||
89 | #define __SXTB16 __sxtb16
|
||
90 | #define __SXTAB16 __sxtab16
|
||
91 | #define __SMUAD __smuad
|
||
92 | #define __SMUADX __smuadx
|
||
93 | #define __SMLAD __smlad
|
||
94 | #define __SMLADX __smladx
|
||
95 | #define __SMLALD __smlald
|
||
96 | #define __SMLALDX __smlaldx
|
||
97 | #define __SMUSD __smusd
|
||
98 | #define __SMUSDX __smusdx
|
||
99 | #define __SMLSD __smlsd
|
||
100 | #define __SMLSDX __smlsdx
|
||
101 | #define __SMLSLD __smlsld
|
||
102 | #define __SMLSLDX __smlsldx
|
||
103 | #define __SEL __sel
|
||
104 | #define __QADD __qadd
|
||
105 | #define __QSUB __qsub
|
||
106 | |||
107 | #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ |
||
108 | ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
|
||
109 | |||
110 | #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ |
||
111 | ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
|
||
112 | |||
113 | |||
114 | /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
115 | |||
116 | |||
117 | |||
118 | #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/ |
||
119 | /* IAR iccarm specific functions */
|
||
120 | |||
121 | /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
122 | #include <cmsis_iar.h> |
||
123 | |||
124 | /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
125 | |||
126 | |||
127 | |||
128 | #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/ |
||
129 | /* TI CCS specific functions */
|
||
130 | |||
131 | /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
132 | #include <cmsis_ccs.h> |
||
133 | |||
134 | /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
135 | |||
136 | |||
137 | |||
138 | #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/ |
||
139 | /* GNU gcc specific functions */
|
||
140 | |||
141 | /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
142 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) |
||
143 | { |
||
144 | uint32_t result; |
||
145 | |||
146 | __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
147 | return(result);
|
||
148 | } |
||
149 | |||
150 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) |
||
151 | { |
||
152 | uint32_t result; |
||
153 | |||
154 | __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
155 | return(result);
|
||
156 | } |
||
157 | |||
158 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) |
||
159 | { |
||
160 | uint32_t result; |
||
161 | |||
162 | __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
163 | return(result);
|
||
164 | } |
||
165 | |||
166 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) |
||
167 | { |
||
168 | uint32_t result; |
||
169 | |||
170 | __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
171 | return(result);
|
||
172 | } |
||
173 | |||
174 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) |
||
175 | { |
||
176 | uint32_t result; |
||
177 | |||
178 | __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
179 | return(result);
|
||
180 | } |
||
181 | |||
182 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) |
||
183 | { |
||
184 | uint32_t result; |
||
185 | |||
186 | __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
187 | return(result);
|
||
188 | } |
||
189 | |||
190 | |||
191 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) |
||
192 | { |
||
193 | uint32_t result; |
||
194 | |||
195 | __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
196 | return(result);
|
||
197 | } |
||
198 | |||
199 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) |
||
200 | { |
||
201 | uint32_t result; |
||
202 | |||
203 | __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
204 | return(result);
|
||
205 | } |
||
206 | |||
207 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) |
||
208 | { |
||
209 | uint32_t result; |
||
210 | |||
211 | __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
212 | return(result);
|
||
213 | } |
||
214 | |||
215 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) |
||
216 | { |
||
217 | uint32_t result; |
||
218 | |||
219 | __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
220 | return(result);
|
||
221 | } |
||
222 | |||
223 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) |
||
224 | { |
||
225 | uint32_t result; |
||
226 | |||
227 | __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
228 | return(result);
|
||
229 | } |
||
230 | |||
231 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) |
||
232 | { |
||
233 | uint32_t result; |
||
234 | |||
235 | __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
236 | return(result);
|
||
237 | } |
||
238 | |||
239 | |||
240 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) |
||
241 | { |
||
242 | uint32_t result; |
||
243 | |||
244 | __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
245 | return(result);
|
||
246 | } |
||
247 | |||
248 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) |
||
249 | { |
||
250 | uint32_t result; |
||
251 | |||
252 | __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
253 | return(result);
|
||
254 | } |
||
255 | |||
256 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) |
||
257 | { |
||
258 | uint32_t result; |
||
259 | |||
260 | __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
261 | return(result);
|
||
262 | } |
||
263 | |||
264 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) |
||
265 | { |
||
266 | uint32_t result; |
||
267 | |||
268 | __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
269 | return(result);
|
||
270 | } |
||
271 | |||
272 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) |
||
273 | { |
||
274 | uint32_t result; |
||
275 | |||
276 | __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
277 | return(result);
|
||
278 | } |
||
279 | |||
280 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) |
||
281 | { |
||
282 | uint32_t result; |
||
283 | |||
284 | __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
285 | return(result);
|
||
286 | } |
||
287 | |||
288 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) |
||
289 | { |
||
290 | uint32_t result; |
||
291 | |||
292 | __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
293 | return(result);
|
||
294 | } |
||
295 | |||
296 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) |
||
297 | { |
||
298 | uint32_t result; |
||
299 | |||
300 | __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
301 | return(result);
|
||
302 | } |
||
303 | |||
304 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) |
||
305 | { |
||
306 | uint32_t result; |
||
307 | |||
308 | __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
309 | return(result);
|
||
310 | } |
||
311 | |||
312 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) |
||
313 | { |
||
314 | uint32_t result; |
||
315 | |||
316 | __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
317 | return(result);
|
||
318 | } |
||
319 | |||
320 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) |
||
321 | { |
||
322 | uint32_t result; |
||
323 | |||
324 | __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
325 | return(result);
|
||
326 | } |
||
327 | |||
328 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) |
||
329 | { |
||
330 | uint32_t result; |
||
331 | |||
332 | __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
333 | return(result);
|
||
334 | } |
||
335 | |||
336 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2) |
||
337 | { |
||
338 | uint32_t result; |
||
339 | |||
340 | __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
341 | return(result);
|
||
342 | } |
||
343 | |||
344 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2) |
||
345 | { |
||
346 | uint32_t result; |
||
347 | |||
348 | __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
349 | return(result);
|
||
350 | } |
||
351 | |||
352 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) |
||
353 | { |
||
354 | uint32_t result; |
||
355 | |||
356 | __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
357 | return(result);
|
||
358 | } |
||
359 | |||
360 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2) |
||
361 | { |
||
362 | uint32_t result; |
||
363 | |||
364 | __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
365 | return(result);
|
||
366 | } |
||
367 | |||
368 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) |
||
369 | { |
||
370 | uint32_t result; |
||
371 | |||
372 | __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
373 | return(result);
|
||
374 | } |
||
375 | |||
376 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) |
||
377 | { |
||
378 | uint32_t result; |
||
379 | |||
380 | __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
381 | return(result);
|
||
382 | } |
||
383 | |||
384 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) |
||
385 | { |
||
386 | uint32_t result; |
||
387 | |||
388 | __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
389 | return(result);
|
||
390 | } |
||
391 | |||
392 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) |
||
393 | { |
||
394 | uint32_t result; |
||
395 | |||
396 | __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
397 | return(result);
|
||
398 | } |
||
399 | |||
400 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) |
||
401 | { |
||
402 | uint32_t result; |
||
403 | |||
404 | __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
405 | return(result);
|
||
406 | } |
||
407 | |||
408 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2) |
||
409 | { |
||
410 | uint32_t result; |
||
411 | |||
412 | __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
413 | return(result);
|
||
414 | } |
||
415 | |||
416 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) |
||
417 | { |
||
418 | uint32_t result; |
||
419 | |||
420 | __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
421 | return(result);
|
||
422 | } |
||
423 | |||
424 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) |
||
425 | { |
||
426 | uint32_t result; |
||
427 | |||
428 | __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
429 | return(result);
|
||
430 | } |
||
431 | |||
432 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) |
||
433 | { |
||
434 | uint32_t result; |
||
435 | |||
436 | __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
437 | return(result);
|
||
438 | } |
||
439 | |||
440 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) |
||
441 | { |
||
442 | uint32_t result; |
||
443 | |||
444 | __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
||
445 | return(result);
|
||
446 | } |
||
447 | |||
448 | #define __SSAT16(ARG1,ARG2) \
|
||
449 | ({ \ |
||
450 | uint32_t __RES, __ARG1 = (ARG1); \ |
||
451 | __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ |
||
452 | __RES; \ |
||
453 | }) |
||
454 | |||
455 | #define __USAT16(ARG1,ARG2) \
|
||
456 | ({ \ |
||
457 | uint32_t __RES, __ARG1 = (ARG1); \ |
||
458 | __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ |
||
459 | __RES; \ |
||
460 | }) |
||
461 | |||
462 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1) |
||
463 | { |
||
464 | uint32_t result; |
||
465 | |||
466 | __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); |
||
467 | return(result);
|
||
468 | } |
||
469 | |||
470 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) |
||
471 | { |
||
472 | uint32_t result; |
||
473 | |||
474 | __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
475 | return(result);
|
||
476 | } |
||
477 | |||
478 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1) |
||
479 | { |
||
480 | uint32_t result; |
||
481 | |||
482 | __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); |
||
483 | return(result);
|
||
484 | } |
||
485 | |||
486 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) |
||
487 | { |
||
488 | uint32_t result; |
||
489 | |||
490 | __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
491 | return(result);
|
||
492 | } |
||
493 | |||
494 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) |
||
495 | { |
||
496 | uint32_t result; |
||
497 | |||
498 | __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
499 | return(result);
|
||
500 | } |
||
501 | |||
502 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) |
||
503 | { |
||
504 | uint32_t result; |
||
505 | |||
506 | __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
507 | return(result);
|
||
508 | } |
||
509 | |||
510 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) |
||
511 | { |
||
512 | uint32_t result; |
||
513 | |||
514 | __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
||
515 | return(result);
|
||
516 | } |
||
517 | |||
518 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) |
||
519 | { |
||
520 | uint32_t result; |
||
521 | |||
522 | __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
||
523 | return(result);
|
||
524 | } |
||
525 | |||
526 | #define __SMLALD(ARG1,ARG2,ARG3) \
|
||
527 | ({ \ |
||
528 | uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \ |
||
529 | __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \ |
||
530 | (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
|
||
531 | }) |
||
532 | |||
533 | #define __SMLALDX(ARG1,ARG2,ARG3) \
|
||
534 | ({ \ |
||
535 | uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \ |
||
536 | __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \ |
||
537 | (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
|
||
538 | }) |
||
539 | |||
540 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) |
||
541 | { |
||
542 | uint32_t result; |
||
543 | |||
544 | __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
545 | return(result);
|
||
546 | } |
||
547 | |||
548 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) |
||
549 | { |
||
550 | uint32_t result; |
||
551 | |||
552 | __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
553 | return(result);
|
||
554 | } |
||
555 | |||
556 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) |
||
557 | { |
||
558 | uint32_t result; |
||
559 | |||
560 | __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
||
561 | return(result);
|
||
562 | } |
||
563 | |||
564 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) |
||
565 | { |
||
566 | uint32_t result; |
||
567 | |||
568 | __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
||
569 | return(result);
|
||
570 | } |
||
571 | |||
572 | #define __SMLSLD(ARG1,ARG2,ARG3) \
|
||
573 | ({ \ |
||
574 | uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \ |
||
575 | __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \ |
||
576 | (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
|
||
577 | }) |
||
578 | |||
579 | #define __SMLSLDX(ARG1,ARG2,ARG3) \
|
||
580 | ({ \ |
||
581 | uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \ |
||
582 | __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \ |
||
583 | (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
|
||
584 | }) |
||
585 | |||
586 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2) |
||
587 | { |
||
588 | uint32_t result; |
||
589 | |||
590 | __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
591 | return(result);
|
||
592 | } |
||
593 | |||
594 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2) |
||
595 | { |
||
596 | uint32_t result; |
||
597 | |||
598 | __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
599 | return(result);
|
||
600 | } |
||
601 | |||
602 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2) |
||
603 | { |
||
604 | uint32_t result; |
||
605 | |||
606 | __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
||
607 | return(result);
|
||
608 | } |
||
609 | |||
610 | #define __PKHBT(ARG1,ARG2,ARG3) \
|
||
611 | ({ \ |
||
612 | uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ |
||
613 | __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ |
||
614 | __RES; \ |
||
615 | }) |
||
616 | |||
617 | #define __PKHTB(ARG1,ARG2,ARG3) \
|
||
618 | ({ \ |
||
619 | uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ |
||
620 | if (ARG3 == 0) \ |
||
621 | __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ |
||
622 | else \
|
||
623 | __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ |
||
624 | __RES; \ |
||
625 | }) |
||
626 | |||
627 | /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
628 | |||
629 | |||
630 | |||
631 | #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/ |
||
632 | /* TASKING carm specific functions */
|
||
633 | |||
634 | |||
635 | /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
636 | /* not yet supported */
|
||
637 | /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
|
||
638 | |||
639 | |||
640 | #endif
|
||
641 | |||
642 | /*@} end of group CMSIS_SIMD_intrinsics */
|
||
643 | |||
644 | |||
645 | #endif /* __CORE_CM4_SIMD_H */ |
||
646 | |||
647 | #ifdef __cplusplus
|
||
648 | } |
||
649 | #endif
|