iLLD_TC27xC  1.0
IfxCpu_IntrinsicsGnuc.h
Go to the documentation of this file.
1 /**
2  * \file IfxCpu_IntrinsicsGnuc.h
3  * \version iLLD_0_1_0_10
4  * \copyright Copyright (c) 2012 Infineon Technologies AG. All rights reserved.
5  *
6  *
7  *
8  * IMPORTANT NOTICE
9  *
10  *
11  * Infineon Technologies AG (Infineon) is supplying this file for use
12  * exclusively with Infineon's microcontroller products. This file can be freely
13  * distributed within development tools that are supporting such microcontroller
14  * products.
15  *
16  * THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
17  * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
18  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
19  * INFINEON SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL,
20  * OR CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
21  *
22  * \defgroup IfxLld_Cpu_Intrinsics_Gnuc Intrinsics for GNU compiler
23  * \ingroup IfxLld_Cpu_Intrinsics
24  *
25  */
26 
27 #ifndef IFXCPU_INTRINSICSGNUC_H
28 #define IFXCPU_INTRINSICSGNUC_H
29 
30 
31 /* old style intrinsics handling for AGENtiX environment */
32 #if defined(SCTB_EMBEDDED)
33 # define IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS 0
34 #else
35 # define IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS 1
36 #endif
37 
38 /******************************************************************************/
39 #include "Ifx_Types.h"
40 
41 #if IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
42 #include "machine/intrinsics.h"
43 #endif
44 
45 /******************************************************************************/
46 /* *INDENT-OFF* */
47 #define STRINGIFY(x) #x
48 
49 /** Function call without return
50  */
51 #define __non_return_call(fun) __asm__ volatile ("ja "#fun)
52 
53 /** Jump and link
54  */
55 IFX_INLINE void __jump_and_link(void (*fun)(void))
56 {
57  __asm__ volatile ("jli %0"::"a"(fun));
58 }
59 
60 /** \defgroup IfxLld_Cpu_Intrinsics_Gnuc_any_type Cross type arithmetic operation
61  *
62  * Macro compatible with float, fix point, signed integer and unsigned integer
63  *
64  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
65  * \{
66  */
67 #define __minX(X,Y) ( ((X) < (Y)) ? (X) : (Y) )
68 #define __maxX(X,Y) ( ((X) > (Y)) ? (X) : (Y) )
69 #define __saturateX(X,Min,Max) ( __minX(__maxX(X, Min), Max) )
70 #define __checkrangeX(X,Min,Max) (((X) >= (Min)) && ((X) <= (Max)))
71 /** \} */
72 
73 /** \defgroup IfxLld_Cpu_Intrinsics_Gnuc_singed_integer Signed integer operation
74  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
75  * \{
76  */
77 #define __saturate(X,Min,Max) ( __min(__max(X, Min), Max) )
78 /** \} */
79 
80 /** \defgroup IfxLld_Cpu_Intrinsics_Gnuc_unsinged_integer Unsigned integer operation
81  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
82  * \{
83  */
84 #define __saturateu(X,Min,Max) ( __minu(__maxu(X, Min), Max) )
85 /** \} */
86 
87 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucmin_max Minimum and Maximum of (sint16) Integers
88  These intrinsic functions return the minimum or maximum of a sint16, uint16 or sint16
89  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
90  * \{
91  */
92 
93 /** Return maximum of two integers
94  */
96 {
97  sint32 res;
98  __asm__ volatile ("max %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
99  return res;
100 }
101 
102 /** Return maximum of two sint16
103  */
105 {
106  sint32 res;
107  __asm__ volatile ("max.h %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
108  return res;
109 }
110 /** Return maximum of two unsigned integers
111  */
113 {
114  uint32 res;
115  __asm__ volatile ("max.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
116  return res;
117 }
118 
119 /** Return minimum of two integers
120  */
122 {
123  sint32 res;
124  __asm__ volatile ("min %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
125  return res;
126 }
127 
128 /** Return minimum of two sint16
129  */
131 {
132  sint16 res;
133  __asm__ volatile ("min.h %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
134  return res;
135 }
136 
137 /** Return minimum of two unsigned integers
138  */
140 {
141  uint32 res;
142  __asm__ volatile ("min.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
143  return res;
144 }
145 
146 /** \} */
147 
148 /** \defgroup intrinsicsgnuc_float Floating point operation
149  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
150  * \{
151  */
152 
153 #define __sqrf(X) ((X) * (X))
154 #define __sqrtf(X) sqrtf(X)
155 #define __checkrange(X,Min,Max) (((X) >= (Min)) && ((X) <= (Max)))
156 
157 #define __roundf(X) ((((X) - (sint32)(X)) > 0.5) ? (1 + (sint32)(X)) : ((sint32)(X)))
158 #define __absf(X) ( ((X) < 0.0) ? -(X) : (X) )
159 #define __minf(X,Y) ( ((X) < (Y)) ? (X) : (Y) )
160 #define __maxf(X,Y) ( ((X) > (Y)) ? (X) : (Y) )
161 #define __saturatef(X,Min,Max) ( __minf(__maxf(X, Min), Max) )
162 #define __checkrangef(X,Min,Max) (((X) >= (Min)) && ((X) <= (Max)))
163 
164 #define __abs_stdreal(X) ( ((X) > 0.0) ? (X) : -(X) )
165 #define __min_stdreal(X,Y) ( ((X) < (Y)) ? (X) : (Y) )
166 #define __max_stdreal(X,Y) ( ((X) > (Y)) ? (X) : (Y) )
167 #define __saturate_stdreal(X,Min,Max) ( __min_stdreal(__max_stdreal(X, Min), Max) )
168 
169 #define __neqf(X,Y) ( ((X) > (Y)) || ((X) < (Y)) ) /**< X != Y */
170 #define __leqf(X,Y) ( !((X) > (Y)) ) /**< X <= Y */
171 #define __geqf(X,Y) ( !((X) < (Y)) ) /**< X >= Y */
172 /** \} */
173 
174 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucfractional Fractional Arithmetic Support
175  The next table provides an overview of intrinsic functions to convert fractional values. Note that the
176  TASKING VX-toolset C compiler for TriCore fully supports the fractional type so normally you should not
177  need these intrinsic functions (except for __mulfractlong). For compatibility reasons the TASKING C
178  compiler does support these functions.
179  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
180  * \{
181  */
182 
183 /** Count the consecutive number of bits that have the same value as bit 15 of an sfract
184  */
186 {
187  sint16 res;
188  __asm__ volatile ("cls %0,%1":"=d"(res):"d"(a):"memory");
189  return res;
190 }
191 
192 /** Convert fract to float
193  */
195 {
196  float res;
197  __asm__ volatile ("q31tof %0,%1,%2":"=d"(res):"d"(a), "d"(0):"memory");
198  return res;
199 }
200 
201 /** Convert float to fract
202  */
204 {
205  fract res;
206  __asm__ volatile ("ftoq31 %0,%1,%2":"=d"(res):"d"(a), "d"(0):"memory");
207  return res;
208 }
209 
210 /** Convert laccum to fract
211  */
213 {
214  fract res;
215  __asm__ volatile ("dextr %0,%H1,%L1,0x11":"=&d" (res):"d" (a):"memory");
216  return res;
217 }
218 
219 /** Multiply-add with rounding. Returns the rounded result of ( a + b * c )
220  */
222 {
223  sfract res;
224  __asm__ volatile ("maddrs.q %0,%1,%2U,%3U,1":"=d"(res):"d"(a), "d"(b), "d"(c):"memory");
225  return res;
226 }
227 
228 /** Multiply-add sfract. Returns ( a + b * c )
229  */
231 {
232  sfract res;
233  __asm__ volatile ("madds.q %0,%1,%2U,%3U,1":"=d"(res):"d"(a), "d"(b), "d"(c):"memory");
234  return res;
235 }
236 
237 /** Integer part of the multiplication of a fract and a fract
238  */
240 {
241  long res;
242  __asm__ volatile ("mul.q %0,%1,%2,1":"=d"(res):"d"(a), "d"(b):"memory");
243  return res;
244 }
245 
246 /** Integer part of the multiplication of a fract and a long
247  */
249 {
250  long res;
251  __asm__ volatile ("mul.q %0,%1,%2,1":"=d"(res):"d"(a), "d"(b):"memory");
252  return res;
253 }
254 
255 /** Convert fract to sfract
256  */
258 {
259  sfract res;
260  __asm__ volatile ("mov.u %0,0x8000 \n\
261  adds %0,%1 \n\
262  insert %0,%0,0,0,0x10 "
263  :"=&d"(res):"d"(a):"memory");
264  return res;
265 }
266 
267 /** Convert signed sint16 to sfract
268  */
270 {
271  sfract res;
272  __asm__ volatile ("sh %0,%1,16":"=d"(res):"d"(a):"memory");
273  return res;
274 }
275 
276 /** Convert sfract to sint16
277  */
279 {
280  sint16 res;
281  __asm__ volatile ("sh %0,%1,-16":"=d"(res):"d"(a):"memory");
282  return res;
283 }
284 
285 /** Convert sfract to uint16
286  */
288 {
289  uint16 res;
290  __asm__ volatile ("sh %0,%1,-16":"=d"(res):"d"(a):"memory");
291  return res;
292 }
293 
294 /** Left/right shift of an laccum
295  */
297 {
298  laccum res;
299  __asm__ volatile ("jge %2,0,0f \n\
300  sha %H0,%H1,%2 \n\
301  rsub %2,%2,0 \n\
302  dextr %L0,%H1,%L1,%2 \n\
303  j 1f \n\
304  0:dextr %H0,%H1,%L1,%2 \n\
305  sha %L0,%L1,%2 \n\
306  1:"
307  :"=d"(res):"d"(a), "d"(b):"memory");
308  return res;
309 }
310 
311 /** Left/right shift of an fract
312  */
314 {
315  fract res;
316  __asm__ volatile ("shas %0,%1,%2":"=d"(res):"d"(a), "d"(b):"memory");
317  return res;
318 }
319 
320 /** Left/right shift of an sfract
321  */
323 {
324  sfract res;
325  __asm__ volatile ("shas %0,%1,%2":"=d"(res):"d"(a), "d"(b):"memory");
326  return res;
327 }
328 
329 /** Convert uint16 to sfract
330  */
332 {
333  sfract res;
334  __asm__ volatile ("sh %0,%1,16":"=d"(res):"d"(a):"memory");
335  return res;
336 }
337 
338 /** \} */
339 
340 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucinsert Insert / Extract Bit-fields and Bits
341  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
342  * \{
343  */
344 
345 /** Extract a bit-field (bit pos to bit pos+width) from value
346  */
348 {
349  sint32 res;
350  __asm__ volatile ("mov %%d14,%2 \n\
351  mov %%d15,%3 \n\
352  extr %0,%1,%%e14"
353  : "=d" (res) : "d" (a), "d" (p), "d" (w):"d14", "d15");
354  return res;
355 }
356 
357 /** Same as __extr() but return bit-field as unsigned integer
358  */
360 {
361  uint32 res;
362  __asm__ volatile ("mov %%d14,%2 \n\
363  mov %%d15,%3 \n\
364  extr.u %0,%1,%%e14"
365  : "=d" (res) : "d" (a), "d" (p), "d" (w):"d14", "d15");
366  return res;
367 }
368 
369 /** Load a single bit.
370  */
371 //#define __getbit(address,bitoffset ) __extru( *(address), bitoffset, 1 )
372 #define __getbit(address, bitoffset) ((*(address) & (1U << (bitoffset))) != 0)
373 
374 /** Atomic load-modify-store.
375  */
376 #define __imaskldmst(address, value, bitoffset, bits) \
377  {long long tmp;\
378  __asm("imask %A0,%1,%2,%3":"=d"((long long)tmp):"d"(value),"d"(bitoffset),"i"(bits): "memory");\
379  __asm("ldmst %0,%A1"::"i"(address),"d"(tmp));}
380 
381 /** Return trg but replace trgbit by srcbit in src.
382  */
383 IFX_INLINE sint32 __ins(sint32 trg, const sint32 trgbit, sint32 src, const sint32 srcbit)
384 {
385  sint32 res;
386  __asm__ volatile ("ins.t %0,%1,%2,%3,%4":"=d"(res):"d"(trg), "i"(trgbit), "d"(src), "i"(srcbit));
387  return res;
388 }
389 
390 /** Extract bit-field (width bits starting at bit 0) from src and insert it in trg at pos.
391  */
393 {
394  sint32 res;
395  __asm__ volatile ("mov %%d14,%2 \n\
396  mov %%d15,%3 \n\
397  insert %0,%1,%2,%%e14"
398  :"=d"(res):"d"(a), "d"(b), "d"(p), "d"(w):"d14", "d15");
399  return res;
400 }
401 
402 /** Return trg but replace trgbit by inverse of srcbit in src.
403  */
404 IFX_INLINE sint32 __insn(sint32 trg, const sint32 trgbit, sint32 src, const sint32 srcbit)
405 {
406  sint32 res;
407  __asm__ volatile ("insn.t %0,%1,%2,%3,%4":"=d"(res):"d"(trg), "i"(trgbit), "d"(src), "i"(srcbit));
408  return res;
409 }
410 
411 /** Store a single bit.
412  */
413 #define __putbit(value,address,bitoffset ) __imaskldmst(address, value, bitoffset,1)
414 
415 /** \} */
416 
417 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucinterrupt_handling Interrupt Handling
418  The next table provides an overview of the intrinsic functions to read or set interrupt handling.
419  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
420  * \{
421  */
422 
423 #if !IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
424 /** Set CPU priority number [0..255] (or [0..1023] for TriCore 1.6.x) and enable interrupts immediately at function entry
425  */
426 #define __bisr(intlvl) __asm__ volatile ("bisr "#intlvl : : : "memory")
427 #endif
428 
429 /** Disable interrupts. Only supported for TriCore1
430  */
431 #define __disable() __asm__ volatile ("disable" : : : "memory")
432 
433 /** Disable interrupts and return previous interrupt state (enabled or disabled). Direct supported for TriCore1.6. Emulated on TC1.3.1
434  */
436 {
437  sint32 res;
438  __asm__ volatile("disable %0":"=d"(res));
439  return res;
440 }
441 
442 /** Enable interrupts immediately at function entry
443  */
444 #define __enable() __asm__ volatile ("enable" : : : "memory")
445 
446 /** Restore interrupt state. Direct supported for TriCore1.6. Emulated on TC1.3.1
447  */
449 {
450  __asm__ volatile ("restore %0"::"d"(ie));
451 }
452 
453 #if !IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
454 /** Call a system call function number
455  */
456 #define __syscall(svcno) __tric_syscall(svcno)
457 #define __tric_syscall(svcno) __asm__ volatile ("syscall "STRINGIFY(svcno) : : : "memory")
458 #endif
459 
460 /** \} */
461 
462 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucmiscellaneous Miscellaneous Intrinsic Functions
463  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
464  * \{
465  */
466 
467 /** Write back and invalidate cache address "p". Generates CACHEA.WI [Ab].
468  */
470 {
471  __asm__ volatile("cachea.wi [%0]0"::"a"(p));
472 }
473 /** Write back and invalidate cache index "p". Generates CACHEI.WI [Ab].
474  */
476 {
477  __asm__ volatile("cachei.wi [%0]0"::"a"(p));
478 }
479 
480 /** Write back and invalidate cache address \"p\" and return post incremented
481  * value of \"p\". Generates CACHEA.WI [Ab+].
482  */
484 {
485  __asm__ volatile("cachea.wi [%0+]0"::"a"(p));
486  return p;
487 }
488 
489 /** Multiply two 32-bit numbers to an intermediate 64-bit result, and scale
490  * back the result to 32 bits. To scale back the result, 32 bits are extracted
491  * from the intermediate 64-bit result: bit 63-offset to bit 31-offset.
492  */
494 {
495  sint32 res;
496  __asm__ volatile("mul %%e12,%1,%2 \n\
497  dextr %0,%%d13,%%d12,%3"
498  :"=d"(res):"d"(a), "d"(b), "d"(offset):"d12", "d13");
499  return res;
500 }
501 
502 /** Rotate operand left count times. The bits that are shifted out are inserted at the right side (bit 31 is shifted to bit 0).
503  */
505 {
506  uint32 res;
507  __asm__ volatile("dextr %0,%1,%1,%2":"=d"(res):"d"(operand), "d"(count):"memory");
508  return res;
509 }
510 
511 /** Rotate operand right count times. The bits that are shifted out are inserted at the left side (bit 0 is shifted to bit 31).
512  */
514 {
515  uint32 res;
516  __asm__ volatile("rsub %2,%2,0 \n\
517  dextr %0,%1,%1,%2"
518  :"=d"(res):"d"(operand), "d"(count):"memory");
519  return res;
520 }
521 /** \} */
522 
523 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucpacked Packed Data Type Support
524  The next table provides an overview of the intrinsic functions for initialization of packed data type.
525  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
526  * \{
527  */
528 
529 /** Absolute value of __packb
530  */
532 {
533  __packb res;
534  __asm__ volatile ("abs.b %0,%1"
535  :"=d"(res):"d"(a):"memory");
536  return res;
537 }
538 
539 /** Absolute value of __packhw
540  */
542 {
543  __packhw res;
544  __asm__ volatile ("abs.h %0,%1"
545  :"=d"(res):"d"(a):"memory");
546  return res;
547 }
548 
549 /** Absolute value of __packhw using saturation
550  */
552 {
553  __packb res;
554  __asm__ volatile ("abss.h %0,%1"
555  :"=d"(res):"d"(a):"memory");
556  return res;
557 }
558 
559 /** Extract first byte from a __packb
560  */
562 {
563  sint8 res;
564  __asm__ volatile ("extr %0,%1,0,8"
565  :"=d"(res):"d"(a):"memory");
566  return res;
567 }
568 
569 /** Extract second byte from a __packb
570  */
572 {
573  sint8 res;
574  __asm__ volatile ("extr %0,%1,8,8"
575  :"=d"(res):"d"(a):"memory");
576  return res;
577 }
578 
579 /** Extract third byte from a __packb
580  */
582 {
583  sint8 res;
584  __asm__ volatile ("extr %0,%1,16,8"
585  :"=d"(res):"d"(a):"memory");
586  return res;
587 }
588 
589 /** Extract fourth byte from a __packb
590  */
592 {
593  sint8 res;
594  __asm__ volatile ("extr %0,%1,24,8"
595  :"=d"(res):"d"(a):"memory");
596  return res;
597 }
598 
599 /** Extract first sint16 from a __packhw
600  */
602 {
603  sint16 res;
604  __asm__ volatile ("extr %0,%1,0,16"
605  :"=d"(res):"d"(a):"memory");
606  return res;
607 }
608 
609 /** Extract second sint16 from a __packhw
610  */
612 {
613  sint16 res;
614  __asm__ volatile ("extr %0,%1,16,16"
615  :"=d"(res):"d"(a):"memory");
616  return res;
617 }
618 
619 /** Extract first uint8 from a __packb
620  */
622 {
623  uint8 res;
624  __asm__ volatile ("extr %0,%1,0,8"
625  :"=d"(res):"d"(a):"memory");
626  return res;
627 }
628 
629 /** Extract second uint8 from a __packb
630  */
632 {
633  uint8 res;
634  __asm__ volatile ("extr %0,%1,8,8"
635  :"=d"(res):"d"(a):"memory");
636  return res;
637 }
638 
639 /** Extract third uint8 from a __packb
640  */
642 {
643  uint8 res;
644  __asm__ volatile ("extr %0,%1,16,8"
645  :"=d"(res):"d"(a):"memory");
646  return res;
647 }
648 
649 /** Extract fourth uint8 from a __packb
650  */
652 {
653  uint8 res;
654  __asm__ volatile ("extr %0,%1,24,8"
655  :"=d"(res):"d"(a):"memory");
656  return res;
657 }
658 
659 /** Extract first uint16 from a __packhw
660  */
662 {
663  uint16 res;
664  __asm__ volatile ("extr %0,%1,0,16"
665  :"=d"(res):"d"(a):"memory");
666  return res;
667 }
668 
669 /** Extract second uint16 from a __packhw
670  */
672 {
673  uint16 res;
674  __asm__ volatile ("extr %0,%1,16,16"
675  :"=d"(res):"d"(a):"memory");
676  return res;
677 }
678 
679 /** Extract first byte from a __packb
680  */
682 {
683  sint8 res;
684  __asm__ volatile ("ld.w %0,[%1]0 \n\
685  extr %0,%0,0,8"
686  :"=d"(res):"a"(a):"memory");
687  return res;
688 
689 }
690 
691 /** Extract second byte from a __packb
692  */
694 {
695  sint8 res;
696  __asm__ volatile ("ld.w %0,[%1]0 \n\
697  extr %0,%0,8,8"
698  :"=d"(res):"a"(a):"memory");
699  return res;
700 
701 }
702 
703 /** Extract third byte from a __packb
704  */
706 {
707  sint8 res;
708  __asm__ volatile ("ld.w %0,[%1]0 \n\
709  extr %0,%0,16,8"
710  :"=d"(res):"a"(a):"memory");
711  return res;
712 
713 }
714 
715 /** Extract fourth byte from a __packb
716  */
718 {
719  sint8 res;
720  __asm__ volatile ("ld.w %0,[%1]0 \n\
721  extr %0,%0,24,8"
722  :"=d"(res):"a"(a):"memory");
723  return res;
724 
725 }
726 
727 /** Extract first sint16 from a __packhw
728  */
730 {
731  sint16 res;
732  __asm__ volatile ("ld.w %0,[%1]0 \n\
733  extr %0,%0,0,16"
734  :"=d"(res):"a"(a):"memory");
735  return res;
736 }
737 /** Extract second sint16 from a __packhw
738  */
740 {
741  sint16 res;
742  __asm__ volatile ("ld.w %0,[%1]0 \n\
743  extr %0,%0,16,16"
744  :"=d"(res):"a"(a):"memory");
745  return res;
746 }
747 
748 /** Extract first uint8 from a __packb
749  */
751 {
752  uint8 res;
753  __asm__ volatile ("ld.w %0,[%1]0 \n\
754  extr %0,%0,0,8"
755  :"=d"(res):"a"(a):"memory");
756  return res;
757 }
758 
759 /** Extract second uint8 from a __packb
760  */
762 {
763  uint8 res;
764  __asm__ volatile ("ld.w %0,[%1]0 \n\
765  extr %0,%0,8,8"
766  :"=d"(res):"a"(a):"memory");
767  return res;
768 }
769 
770 /** Extract third uint8 from a __packb
771  */
773 {
774  uint8 res;
775  __asm__ volatile ("ld.w %0,[%1]0 \n\
776  extr %0,%0,16,8"
777  :"=d"(res):"a"(a):"memory");
778  return res;
779 }
780 
781 /** Extract fourth uint8 from a __packb
782  */
784 {
785  uint8 res;
786  __asm__ volatile ("ld.w %0,[%1]0 \n\
787  extr %0,%0,24,8"
788  :"=d"(res):"a"(a):"memory");
789  return res;
790 }
791 
792 /** Extract first uint16 from a __packhw
793  */
795 {
796  uint16 res;
797  __asm__ volatile ("ld.w %0,[%1]0 \n\
798  extr %0,%0,0,16"
799  :"=d"(res):"a"(a):"memory");
800  return res;
801 }
802 
803 /** Extract second uint16 from a __packhw
804  */
806 {
807  uint16 res;
808  __asm__ volatile ("ld.w %0,[%1]0 \n\
809  extr %0,%0,16,16"
810  :"=d"(res):"a"(a):"memory");
811  return res;
812 }
813 
814 /** Initialize __packb with four integers
815  */
817 {
818  __packb res;
819  __asm__ volatile ("insert %3,%3,%4,8,8 \n\
820  insert %4,%1,%2,8,8 \n\
821  insert %0,%4,%3,16,16 "
822  :"=d"(res):"d"(a), "d"(b), "d"(c), "d"(d):"memory");
823  return res;
824 }
825 
826 /** Initialize __packb with a long integer
827  */
829 {
830  return (__packb) a;
831 }
832 
833 /** Initialize __packhw with two integers
834  */
836 {
837  __packhw res;
838  __asm__ volatile ("insert %0,%1,%2,16,16"
839  :"=d"(res):"d"(a), "d"(b):"memory");
840  return res;
841 }
842 
843 /** Initialize __packhw with a long integer
844  */
846 {
847  return a;
848 }
849 
850 /** Initialize __packb with four unsigned integers
851  */
853 {
854  __upackb res;
855  __asm__ volatile ("insert %3,%3,%4,8,8 \n\
856  insert %1,%1,%2,8,8 \n\
857  insert %0,%1,%3,16,16"
858  :"=d"(res):"d"(a), "d"(b), "d"(c), "d"(d):"memory");
859  return res;
860 }
861 
862 /** Initialize __packhw with two unsigned integers
863  */
865 {
866  __upackhw res;
867  __asm__ volatile ("insert %0,%1,%2,16,16"
868  :"=d"(res):"d"(a), "d"(b):"memory");
869  return res;
870 }
871 
872 /** Insert sint8 into first byte of a __packb
873  */
875 {
876  __packb res;
877  __asm__ volatile ("insert %0,%1,%2,0,8"
878  :"=d"(res):"d"(a), "d"(b):"memory");
879  return res;
880 }
881 
882 /** Insert sint8 into second byte of a __packb
883  */
885 {
886  __packb res;
887  __asm__ volatile ("insert %0,%1,%2,8,8"
888  :"=d"(res):"d"(a), "d"(b):"memory");
889  return res;
890 }
891 
892 /** Insert sint8 into third byte of a __packb
893  */
895 {
896  __packb res;
897  __asm__ volatile ("insert %0,%1,%2,16,8"
898  :"=d"(res):"d"(a), "d"(b):"memory");
899  return res;
900 }
901 
902 /** Insert sint8 into fourth byte of a __packb
903  */
905 {
906  __packb res;
907  __asm__ volatile ("insert %0,%1,%2,24,8"
908  :"=d"(res):"d"(a), "d"(b):"memory");
909  return res;
910 }
911 
912 /** Insert sint8 into first uint8 of a __packb
913  */
915 {
916  __upackb res;
917  __asm__ volatile ("insert %0,%1,%2,0,8"
918  :"=d"(res):"d"(a), "d"(b):"memory");
919  return res;
920 }
921 
922 /** Insert sint8 into second uint8 of a __packb
923  */
925 {
926  __upackb res;
927  __asm__ volatile ("insert %0,%1,%2,8,8"
928  :"=d"(res):"d"(a), "d"(b):"memory");
929  return res;
930 }
931 
932 /** Insert sint8 into third uint8 of a __packb
933  */
935 {
936  __upackb res;
937  __asm__ volatile ("insert %0,%1,%2,16,8"
938  :"=d"(res):"d"(a), "d"(b):"memory");
939  return res;
940 }
941 
942 /** Insert sint8 into fourth uint8 of a __packb
943  */
945 {
946  __upackb res;
947  __asm__ volatile ("insert %0,%1,%2,24,8"
948  :"=d"(res):"d"(a), "d"(b):"memory");
949  return res;
950 }
951 
952 /** Insert sint16 into first halfword of a __packhw
953  */
955 {
956  __packhw res;
957  __asm__ volatile ("insert %0,%1,%2,0,16"
958  :"=d"(res):"d"(a), "d"(b):"memory");
959  return res;
960 }
961 
962 /** Insert sint16 into second halfword of a __packhw
963  */
965 {
966  __packhw res;
967  __asm__ volatile ("insert %0,%1,%2,16,16"
968  :"=d"(res):"d"(a), "d"(b):"memory");
969  return res;
970 }
971 
972 /** Insert uint16 into first halfword of a __upackhw
973  */
975 {
976  __upackhw res;
977  __asm__ volatile ("insert %0,%1,%2,0,16"
978  :"=d"(res):"d"(a), "d"(b):"memory");
979  return res;
980 }
981 
982 /** Insert uint16 into second halfword of a __upackhw
983  */
985 {
986  __upackhw res;
987  __asm__ volatile ("insert %0,%1,%2,16,16"
988  :"=d"(res):"d"(a), "d"(b):"memory");
989  return res;
990 }
991 
992 /** Minimum of two __packb values
993  */
995 {
996  __packb res;
997  __asm__ volatile ("min.b %0,%1,%2"
998  :"=d"(res):"d"(a), "d"(b):"memory");
999  return res;
1000 }
1001 
1002 /** Minimum of two __upackb values
1003  */
1005 {
1006  __upackb res;
1007  __asm__ volatile ("min.bu %0,%1,%2"
1008  :"=d"(res):"d"(a), "d"(b):"memory");
1009  return res;
1010 }
1011 
1012 /** Minimum of two __packhw values
1013  */
1015 {
1016  __packhw res;
1017  __asm__ volatile ("min.h %0,%1,%2"
1018  :"=d"(res):"d"(a), "d"(b):"memory");
1019  return res;
1020 }
1021 
1022 /** Minimum of two __upackhw values
1023  */
1025 {
1026  __upackhw res;
1027  __asm__ volatile ("min.hu %0,%1,%2"
1028  :"=d"(res):"d"(a), "d"(b):"memory");
1029  return res;
1030 }
1031 
1032 /** Insert sint8 into first byte of a __packb
1033  */
1035 {
1036  __asm__ volatile ("ld.w %%d15,[%0] \n\
1037  insert %%d15,%%d15,%1,0,8 \n\
1038  st.w [%0],%%d15"
1039  ::"a"(a), "d"(b):"d15", "memory");
1040 }
1041 
1042 /** Insert sint8 into second byte of a __packb
1043  */
1045 {
1046  __asm__ volatile ("ld.w %%d15,[%0] \n\
1047  insert %%d15,%%d15,%1,8,8 \n\
1048  st.w [%0],%%d15"
1049  ::"a"(a), "d"(b):"d15", "memory");
1050 }
1051 
1052 /** Insert sint8 into third byte of a __packb
1053  */
1055 {
1056  __asm__ volatile ("ld.w %%d15,[%0] \n\
1057  insert %%d15,%%d15,%1,16,8 \n\
1058  st.w [%0],%%d15"
1059  ::"a"(a), "d"(b):"d15", "memory");
1060 }
1061 
1062 /** Insert sint8 into fourth byte of a __packb
1063  */
1065 {
1066  __asm__ volatile ("ld.w %%d15,[%0] \n\
1067  insert %%d15,%%d15,%1,24,8 \n\
1068  st.w [%0],%%d15"
1069  ::"a"(a), "d"(b):"d15", "memory");
1070 }
1071 
1072 /** Insert sint16 into first halfword of a __packhw
1073  */
1075 {
1076  __asm__ volatile ("ld.w %%d15,[%0] \n\
1077  insert %%d15,%%d15,%1,0,16 \n\
1078  st.w [%0],%%d15"
1079  ::"a"(a), "d"(b):"d15", "memory");
1080 }
1081 
1082 /** Insert sint16 into second halfword of a __packhw
1083  */
1085 {
1086  __asm__ volatile ("ld.w %%d15,[%0] \n\
1087  insert %%d15,%%d15,%1,16,16 \n\
1088  st.w [%0],%%d15"
1089  ::"a"(a), "d"(b):"d15", "memory");
1090 }
1091 
1092 /** Insert uint8 into first byte of a __upackb
1093  */
1095 {
1096  __asm__ volatile ("ld.w %%d15,[%0] \n\
1097  insert %%d15,%%d15,%1,0,8 \n\
1098  st.w [%0],%%d15"
1099  ::"a"(a), "d"(b):"d15", "memory");
1100 }
1101 
1102 /** Insert uint8 into second byte of a __upackb
1103  */
1105 {
1106  __asm__ volatile ("ld.w %%d15,[%0] \n\
1107  insert %%d15,%%d15,%1,8,8 \n\
1108  st.w [%0],%%d15"
1109  ::"a"(a), "d"(b):"d15", "memory");
1110 }
1111 
1112 /** Insert uint8 into third byte of a __upackb
1113  */
1115 {
1116  __asm__ volatile ("ld.w %%d15,[%0] \n\
1117  insert %%d15,%%d15,%1,16,8 \n\
1118  st.w [%0],%%d15"
1119  ::"a"(a), "d"(b):"d15", "memory");
1120 }
1121 
1122 /** Insert uint8 into fourth byte of a __upackb
1123  */
1125 {
1126  __asm__ volatile ("ld.w %%d15,[%0] \n\
1127  insert %%d15,%%d15,%1,24,8 \n\
1128  st.w [%0],%%d15"
1129  ::"a"(a), "d"(b):"d15", "memory");
1130 }
1131 
1132 /** Insert uint16 into first halfword of a __upackhw
1133  */
1135 {
1136  __asm__ volatile ("ld.w %%d15,[%0] \n\
1137  insert %%d15,%%d15,%1,0,16 \n\
1138  st.w [%0],%%d15"
1139  ::"a"(a), "d"(b):"d15", "memory");
1140 }
1141 
1142 /** Insert uint16 into second halfword of a __upackhw
1143  */
1145 {
1146  __asm__ volatile ("ld.w %%d15,[%0] \n\
1147  insert %%d15,%%d15,%1,16,16 \n\
1148  st.w [%0],%%d15"
1149  ::"a"(a), "d"(b):"d15", "memory");
1150 }
1151 
1152 /** \} */
1153 
1154 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucregister Register Handling
1155  The next table provides an overview of the intrinsic functions that you can use to access control registers.
1156  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
1157  * \{
1158  */
1159 
1160 /** Return absolute value
1161  */
1162 #define __abs(a) __builtin_abs(a)
1163 
1164 /** Return absolue difference of two integers
1165  */
1167 {
1168  sint32 res;
1169  __asm__ volatile ("absdif %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1170  return res;
1171 }
1172 
1173 /** Return absolute value with saturation
1174  */
1176 {
1177  sint32 res;
1178  __asm__ volatile ("abss %0, %1": "=d" (res) : "d" (a));
1179  return res;
1180 }
1181 
1182 /** Count leading ones in int
1183  */
1185 {
1186  sint32 res;
1187  __asm__ volatile ("clo %0,%1":"=d"(res):"d"(a));
1188  return res;
1189 }
1190 
1191 /** Count number of redundant sign bits (all consecutive bits with the same value as bit 31
1192  */
1194 {
1195  sint32 res;
1196  __asm__ volatile ("cls %0,%1":"=d"(res):"d"(a));
1197  return res;
1198 }
1199 
1200 /** Count leading zeros in int
1201  */
1202 #define __clz(a) __builtin_clz(a)
1203 
1204 /** Return absolute double precision floating-point value
1205  */
1206 IFX_INLINE double __fabs(double d)
1207 {
1208  double res;
1209  __asm__ volatile ("insert %0,%1,0,31,1": "=d" (res) : "d" (d):"memory");
1210  return res;
1211 }
1212 
1213 /** Return absolute floating-point value
1214  */
1215 IFX_INLINE float __fabsf(float f)
1216 {
1217  float res;
1218  __asm__ volatile ("insert %0,%1,0,31,1": "=d" (res) : "d" (f):"memory");
1219  return res;
1220 }
1221 
1222 
1223 #if !IFXCPU_INTRINSICSGNUC_USE_MACHINE_INTRINSICS
1224 /** Move contents of the addressed core SFR into a data register
1225  */
1226 #define __mfcr(regaddr) \
1227  ({ sint32 res; __asm__ volatile ("mfcr %0,%1": "=d" (res) :"i"(regaddr): "memory"); res; })
1228 
1229 //({ sint32 res; __asm__ volatile ("mfcr %0,"#regaddr : "=d" (res) : : "memory"); res; })
1230 
1231 /** Move contents of a data register (second int) to the addressed core SFR (first int)
1232  */
1233 #define __mtcr(regaddr,val) __asm__ volatile ("mtcr %0,%1\n\tisync"::"i"(regaddr),"d"(val):"memory")
1234 #endif
1235 
1236 
1237 /** Return parity
1238  */
1240 {
1241  sint32 res;
1242  __asm__ volatile ("parity %0,%1": "=d" (res) : "d" (a):"memory");
1243  return res;
1244 }
1245 
1246 /** Return saturated byte
1247  */
1249 {
1250  sint8 res;
1251  __asm__ volatile ("sat.b %0,%1":"=d"(res):"d"(a));
1252  return res;
1253 }
1254 
1255 /** Return saturated uint8
1256  */
1258 {
1259  uint8 res;
1260  __asm__ volatile ("sat.bu %0,%1":"=d"(res):"d"(a));
1261  return res;
1262 }
1263 
1264 /** Return saturated halfword
1265  */
1267 {
1268  sint8 res;
1269  __asm__ volatile ("sat.h %0,%1":"=d"(res):"d"(a));
1270  return res;
1271 }
1272 
1273 /** Return saturated unsigned halfword
1274  */
1276 {
1277  sint8 res;
1278  __asm__ volatile ("sat.hu %0,%1":"=d"(res):"d"(a));
1279  return res;
1280 }
1281 
1282 /** \} */
1283 
1284 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucsaturation Saturation Arithmetic Support
1285  These intrinsics support saturation arithmetic
1286  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
1287  * \{
1288  */
1289 
1290 /** add signed with saturation
1291  */
1293 {
1294  sint32 res;
1295  __asm__ volatile ("adds %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1296  return res;
1297 }
1298 
1299 /** add unsigned with saturation
1300  */
1302 {
1303  uint32 res;
1304  __asm__ volatile ("adds.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1305  return res;
1306 }
1307 
1308 /** substract signed with saturation
1309  */
1311 {
1312  sint32 res;
1313  __asm__ volatile ("subs %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1314  return res;
1315 }
1316 
1317 /** substract unsigned with saturation
1318  */
1320 {
1321  uint32 res;
1322  __asm__ volatile ("subs.u %0, %1, %2": "=d" (res) : "d" (a), "d" (b));
1323  return res;
1324 }
1325 
1326 /** \} */
1327 
1328 /** \defgroup IfxLld_Cpu_Intrinsics_Gnucsingle_assembly Insert Single Assembly Instruction
1329  The next table provides an overview of the intrinsic functions that you can use to insert a single assembly
1330  instruction.You can also use inline assembly but these intrinsics provide a shorthand for frequently used
1331  assembly instructions.
1332  * \ingroup IfxLld_Cpu_Intrinsics_Gnuc
1333  * \{
1334  */
1335 
1336 /** Insert DEBUG instruction
1337  */
1339 {
1340  __asm__ volatile ("debug" : : : "memory");
1341 }
1342 
1343 /** Insert DSYNC instruction
1344  */
1346 {
1347  __asm__ volatile ("dsync" : : : "memory");
1348 }
1349 
1350 /** Insert ISYNC instruction
1351  */
1353 {
1354  __asm__ volatile ("isync" : : : "memory");
1355 }
1356 
1357 /** Insert LDMST instruction. Note that all operands must be word-aligned.
1358  */
1359 IFX_INLINE void __ldmst(volatile void* address, uint32 mask, uint32 value)
1360 {
1361  __asm__ volatile("mov %H2,%1 \n\
1362  ldmst [%0]0,%A2"
1363  ::"a"(address), "d"(mask), "d"((long long)value));
1364 }
1365 
1366 /** Insert NOP instruction
1367  */
1368 IFX_INLINE void __nop(void)
1369 {
1370  __asm__ volatile ("nop" : : : "memory");
1371 }
1372 
1373 /** Insert a loop over cnt NOP instruction
1374  */
1375 IFX_INLINE void __nops(void* cnt)
1376 {
1377  __asm__ volatile ("0: nop \n\
1378  loop %0,0b"
1379  ::"a"(((sint8*)cnt)-1));
1380 }
1381 
1382 /** Insert RSLCX instruction
1383  */
1385 {
1386  __asm__ volatile ("rslcx" : : : "memory");
1387 }
1388 
1389 /** Insert SVLCX instruction
1390  */
1392 {
1393  __asm__ volatile ("svlcx" : : : "memory");
1394 }
1395 
1396 /** Insert SWAP instruction. Note that all operands must be word-aligned.
1397  */
1398 IFX_INLINE uint32 __swap(void* place, uint32 value)
1399 {
1400  uint32 res;
1401  __asm__ volatile("swap.w [%1]0,%2":"=d"(res):"a"(place), "0"(value));
1402  return res;
1403 }
1404 
1405 /** Insert n NOP instruction
1406  */
1407 #define NOP(n) __asm(".rept " #n "\n\tnop\n\t.endr\n")
1408 
1409 /** \} */
1410 
1411 /* FIXME use inline instead of #define */
1412 #define __extru(src,start,size) \
1413  ({ sint32 res; asm volatile (" extr.u\t %0,%1,%2,%3" : "=d" (res) : \
1414  "d" (src),"i" (start),"i" (size) : "memory"); res; })
1415 
1416 /* FIXME use inline instead of #define */
1417 #define __setareg(areg,val) \
1418  { uint32 reg_val= (uint32)val; \
1419  asm volatile (" mov.a\t %%"#areg",%0"::"d"(reg_val)); }
1420 
1421 /**__mtcr (CPU_CCTRL, 0);
1422 */
1424 {
1425  __asm__ volatile("mov %%d0,0\n\
1426  mtcr 0xFC00,%%d0\n\
1427  isync\n"
1428  : : :"d0"); /* FIXME check that the parameter d0 is understood by the compiler as a register used by the inline */
1429 }
1430 
1431 /** \brief This function is a implementation of a binary semaphore using compare and swap instruction
1432  * \param address address of resource.
1433  * \param value This variable is updated with status of address
1434  * \param condition if the value of address matches with the value of condition, then swap of value & address occurs.
1435  *
1436  */
1437 IFX_INLINE unsigned int __cmpAndSwap (unsigned int volatile *address,
1438  unsigned int value, unsigned int condition)
1439 {
1440  /* Gnu C compiler with Tricore 1.6 support is required to use cmpswap instruction */
1441 #ifdef IFX_USE_GNUC_TRICORE_1_6
1442  __extension__ unsigned long long reg64
1443  = value | (unsigned long long) condition << 32;
1444 
1445  __asm__ __volatile__ ("cmpswap.w [%[addr]]0, %A[reg]"
1446  : [reg] "+d" (reg64)
1447  : [addr] "a" (address)
1448  : "memory");
1449  return reg64;
1450 #else
1451  sint32 ie;
1452  uint32 retval = 1;
1453  ie = __disable_and_save();
1454  if (condition == *address)
1455  {
1456  __swap((void *)address,value );
1457  retval = 0;
1458  }
1459  __restore(ie);
1460  return retval;
1461 #endif
1462 }
1463 /******************************************************************************/
1464 /* *INDENT-ON* */
1465 #endif /* IFXCPU_INTRINSICSGNUC_H */