===================================================================
@@ -174,7 +174,7 @@
vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 };
vresult = vec_vpkshss (vm1, vm1);
- return (__m64) ((vector long long) vresult)[0];
+ return (__m64) ((__vector long long) vresult)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -194,7 +194,7 @@
vm1 = (__vector signed int) (__vector unsigned long long) { __m2, __m1 };
vresult = vec_vpkswss (vm1, vm1);
- return (__m64) ((vector long long) vresult)[0];
+ return (__m64) ((__vector long long) vresult)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -214,7 +214,7 @@
vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 };
vresult = vec_vpkshus (vm1, vm1);
- return (__m64) ((vector long long) vresult)[0];
+ return (__m64) ((__vector long long) vresult)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -235,7 +235,7 @@
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_mergel (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -316,7 +316,7 @@
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_mergel (a, b);
- return (__m64) ((vector long long) c)[1];
+ return (__m64) ((__vector long long) c)[1];
#else
__m64_union m1, m2, res;
@@ -397,7 +397,7 @@
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_add (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -433,7 +433,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_add (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -465,7 +465,7 @@
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = vec_add (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -495,7 +495,7 @@
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_sub (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -531,7 +531,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_sub (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -563,7 +563,7 @@
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = vec_sub (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -753,7 +753,7 @@
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = (__vector signed char)vec_cmpgt (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -790,7 +790,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = (__vector signed short)vec_cmpeq (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -821,7 +821,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = (__vector signed short)vec_cmpgt (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -854,7 +854,7 @@
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = (__vector signed int)vec_cmpeq (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -883,7 +883,7 @@
a = (__vector signed int)vec_splats (__m1);
b = (__vector signed int)vec_splats (__m2);
c = (__vector signed int)vec_cmpgt (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
#else
__m64_union m1, m2, res;
@@ -914,7 +914,7 @@
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -932,7 +932,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -950,7 +950,7 @@
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -969,7 +969,7 @@
a = (__vector unsigned short)vec_splats (__m1);
b = (__vector unsigned short)vec_splats (__m2);
c = vec_adds (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -988,7 +988,7 @@
a = (__vector signed char)vec_splats (__m1);
b = (__vector signed char)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1007,7 +1007,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1026,7 +1026,7 @@
a = (__vector unsigned char)vec_splats (__m1);
b = (__vector unsigned char)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1045,7 +1045,7 @@
a = (__vector unsigned short)vec_splats (__m1);
b = (__vector unsigned short)vec_splats (__m2);
c = vec_subs (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1067,7 +1067,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = vec_vmsumshm (a, b, zero);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1095,7 +1095,7 @@
w1 = vec_vmulosh (a, b);
c = (__vector signed short)vec_perm (w0, w1, xform1);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1114,7 +1114,7 @@
a = (__vector signed short)vec_splats (__m1);
b = (__vector signed short)vec_splats (__m2);
c = a * b;
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1135,7 +1135,7 @@
m = (__vector signed short)vec_splats (__m);
c = (__vector unsigned short)vec_splats ((unsigned short)__count);
r = vec_sl (m, (__vector unsigned short)c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
else
return (0);
@@ -1204,7 +1204,7 @@
m = (__vector signed short)vec_splats (__m);
c = (__vector unsigned short)vec_splats ((unsigned short)__count);
r = vec_sra (m, (__vector unsigned short)c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
else
return (0);
@@ -1273,7 +1273,7 @@
m = (__vector unsigned short)vec_splats (__m);
c = (__vector unsigned short)vec_splats ((unsigned short)__count);
r = vec_sr (m, (__vector unsigned short)c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
else
return (0);
@@ -1416,7 +1416,7 @@
__vector signed short w;
w = (__vector signed short)vec_splats (__w);
- return (__m64) ((vector long long) w)[0];
+ return (__m64) ((__vector long long) w)[0];
#else
__m64_union res;
@@ -1436,7 +1436,7 @@
__vector signed char b;
b = (__vector signed char)vec_splats (__b);
- return (__m64) ((vector long long) b)[0];
+ return (__m64) ((__vector long long) b)[0];
#else
__m64_union res;
===================================================================
@@ -996,7 +996,7 @@
rounded = vec_rint(temp);
result = (__vector unsigned long long) vec_cts (rounded, 0);
- return (__m64) ((vector long long) result)[0];
+ return (__m64) ((__vector long long) result)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1053,7 +1053,7 @@
temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
result = (__vector unsigned long long) vec_cts (temp, 0);
- return (__m64) ((vector long long) result)[0];
+ return (__m64) ((__vector long long) result)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1190,7 +1190,7 @@
/* Convert the four signed 32-bit values in A and B to SPFP form. */
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
+_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
{
__vector signed int vi4;
__vector float vf4;
@@ -1202,7 +1202,7 @@
/* Convert the four SPFP values in A to four signed 16-bit integers. */
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16(__m128 __A)
+_mm_cvtps_pi16 (__m128 __A)
{
__v4sf rounded;
__vector signed int temp;
@@ -1212,12 +1212,12 @@
temp = vec_cts (rounded, 0);
result = (__vector unsigned long long) vec_pack (temp, temp);
- return (__m64) ((vector long long) result)[0];
+ return (__m64) ((__vector long long) result)[0];
}
/* Convert the four SPFP values in A to four signed 8-bit integers. */
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8(__m128 __A)
+_mm_cvtps_pi8 (__m128 __A)
{
__v4sf rounded;
__vector signed int tmp_i;
@@ -1229,7 +1229,7 @@
tmp_i = vec_cts (rounded, 0);
tmp_s = vec_pack (tmp_i, zero);
res_v = vec_pack (tmp_s, tmp_s);
- return (__m64) ((vector long long) res_v)[0];
+ return (__m64) ((__vector long long) res_v)[0];
}
/* Selects four specific SPFP values from A and B based on MASK. */
@@ -1429,7 +1429,7 @@
b = (__vector signed short)vec_splats (__B);
c = (__vector __bool short)vec_cmpgt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
@@ -1467,7 +1467,7 @@
b = (__vector unsigned char)vec_splats (__B);
c = (__vector __bool char)vec_cmpgt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
long i;
@@ -1503,7 +1503,7 @@
b = (__vector signed short)vec_splats (__B);
c = (__vector __bool short)vec_cmplt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
@@ -1541,7 +1541,7 @@
b = (__vector unsigned char)vec_splats (__B);
c = (__vector __bool char)vec_cmplt (a, b);
r = vec_sel (b, a, c);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
#else
__m64_union m1, m2, res;
long i;
@@ -1600,7 +1600,7 @@
w1 = vec_vmulouh (a, b);
c = (__vector unsigned short)vec_perm (w0, w1, xform1);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1643,7 +1643,7 @@
p = vec_splats (t.as_m64);
a = vec_splats (__A);
r = vec_perm (a, a, (__vector unsigned char)p);
- return (__m64) ((vector long long) r)[0];
+ return (__m64) ((__vector long long) r)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1683,7 +1683,7 @@
a = (__vector unsigned char)vec_splats (__A);
b = (__vector unsigned char)vec_splats (__B);
c = vec_avg (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1701,7 +1701,7 @@
a = (__vector unsigned short)vec_splats (__A);
b = (__vector unsigned short)vec_splats (__B);
c = vec_avg (a, b);
- return (__m64) ((vector long long) c)[0];
+ return (__m64) ((__vector long long) c)[0];
}
extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))