diff mbox series

[rs6000] Consistently use '__vector' instead of 'vector'

Message ID b7fab6e1-0131-d831-baed-256f3f2b9ea4@us.ibm.com
State New
Headers show
Series [rs6000] Consistently use '__vector' instead of 'vector' | expand

Commit Message

Paul A. Clarke Oct. 29, 2018, 7:39 p.m. UTC
Revision r265535 committed changes that used 'vector' instead of the
preferred '__vector'.  There is a reason that '__vector' is preferred,
because it ensures no conflicts with C++ namespace.  Indeed,
gcc/config/rs6000/xmmintrin.h undefines it, leading to errors:

  gcc/include/xmmintrin.h:999:20: error: 'vector' undeclared (first use in this function); did you mean 'vec_or'?
  gcc/include/xmmintrin.h:999:20: note: each undeclared identifier is reported only once for each function it appears in
  gcc/include/xmmintrin.h:999:26: error: expected ')' before 'long'
  gcc/include/xmmintrin.h:999:37: error: expected ')' before 'result'

Also fixed a few whitespace issues.

(Committing as obvious.)

[gcc]

2018-10-29  Paul A. Clarke  <pc@us.ibm.com>

	* gcc/config/rs6000/mmintrin.h (_mm_packs_pi16, _mm_packs_pi32,
	_mm_packs_pu16, _mm_unpackhi_pi8, _mm_unpacklo_pi8, _mm_add_pi8,
	_mm_add_pi16, _mm_add_pi32, _mm_sub_pi8, _mm_sub_pi16, _mm_sub_pi32,
	_mm_cmpgt_pi8, _mm_cmpeq_pi16, _mm_cmpgt_pi16, _mm_cmpeq_pi32,
	_mm_cmpgt_pi32, _mm_adds_pi8, _mm_adds_pi16, _mm_adds_pu8,
	_mm_adds_pu16, _mm_subs_pi8, _mm_subs_pi16, _mm_subs_pu8,
	_mm_subs_pu16, _mm_madd_pi16, _mm_mulhi_pi16, _mm_mullo_pi16,
	_mm_sll_pi16, _mm_sra_pi16, _mm_srl_pi16, _mm_set1_pi16, _mm_set1_pi8):
	Change 'vector' to '__vector'.
	* gcc/config/rs6000/xmmintrin.h (_mm_cvtps_pi32, _mm_cvttps_pi32,
	_mm_cvtps_pi16, _mm_cvtps_pi8, _mm_max_pi16, _mm_max_pu8, _mm_min_pi16,
	_mm_min_pu8, _mm_mulhi_pu16, _mm_shuffle_pi16, _mm_avg_pu8,
	_mm_avg_pu16): Likewise.  And, whitespace corrections.

--
PC
diff mbox series

Patch

Index: gcc/config/rs6000/mmintrin.h
===================================================================
diff --git a/trunk/gcc/config/rs6000/mmintrin.h b/trunk/gcc/config/rs6000/mmintrin.h
--- a/trunk/gcc/config/rs6000/mmintrin.h	(revision 265559)
+++ b/trunk/gcc/config/rs6000/mmintrin.h	(working copy)
@@ -174,7 +174,7 @@ 
 
   vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 };
   vresult = vec_vpkshss (vm1, vm1);
-  return (__m64) ((vector long long) vresult)[0];
+  return (__m64) ((__vector long long) vresult)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -194,7 +194,7 @@ 
 
   vm1 = (__vector signed int) (__vector unsigned long long) { __m2, __m1 };
   vresult = vec_vpkswss (vm1, vm1);
-  return (__m64) ((vector long long) vresult)[0];
+  return (__m64) ((__vector long long) vresult)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -214,7 +214,7 @@ 
 
   vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 };
   vresult = vec_vpkshus (vm1, vm1);
-  return (__m64) ((vector long long) vresult)[0];
+  return (__m64) ((__vector long long) vresult)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -235,7 +235,7 @@ 
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_mergel (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -316,7 +316,7 @@ 
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_mergel (a, b);
-  return (__m64) ((vector long long) c)[1];
+  return (__m64) ((__vector long long) c)[1];
 #else
   __m64_union m1, m2, res;
 
@@ -397,7 +397,7 @@ 
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_add (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -433,7 +433,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_add (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -465,7 +465,7 @@ 
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
   c = vec_add (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -495,7 +495,7 @@ 
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_sub (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -531,7 +531,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_sub (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -563,7 +563,7 @@ 
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
   c = vec_sub (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -753,7 +753,7 @@ 
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = (__vector signed char)vec_cmpgt (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -790,7 +790,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = (__vector signed short)vec_cmpeq (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -821,7 +821,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = (__vector signed short)vec_cmpgt (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -854,7 +854,7 @@ 
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
   c = (__vector signed int)vec_cmpeq (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -883,7 +883,7 @@ 
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
   c = (__vector signed int)vec_cmpgt (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -914,7 +914,7 @@ 
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -932,7 +932,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -950,7 +950,7 @@ 
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -969,7 +969,7 @@ 
   a = (__vector unsigned short)vec_splats (__m1);
   b = (__vector unsigned short)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -988,7 +988,7 @@ 
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1007,7 +1007,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1026,7 +1026,7 @@ 
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1045,7 +1045,7 @@ 
   a = (__vector unsigned short)vec_splats (__m1);
   b = (__vector unsigned short)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1067,7 +1067,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_vmsumshm (a, b, zero);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1095,7 +1095,7 @@ 
   w1 = vec_vmulosh (a, b);
   c = (__vector signed short)vec_perm (w0, w1, xform1);
 
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1114,7 +1114,7 @@ 
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = a * b;
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1135,7 +1135,7 @@ 
       m = (__vector signed short)vec_splats (__m);
       c = (__vector unsigned short)vec_splats ((unsigned short)__count);
       r = vec_sl (m, (__vector unsigned short)c);
-      return (__m64) ((vector long long) r)[0];
+      return (__m64) ((__vector long long) r)[0];
     }
   else
   return (0);
@@ -1204,7 +1204,7 @@ 
 	m = (__vector signed short)vec_splats (__m);
 	c = (__vector unsigned short)vec_splats ((unsigned short)__count);
 	r = vec_sra (m, (__vector unsigned short)c);
-        return (__m64) ((vector long long) r)[0];
+        return (__m64) ((__vector long long) r)[0];
     }
   else
   return (0);
@@ -1273,7 +1273,7 @@ 
 	m = (__vector unsigned short)vec_splats (__m);
 	c = (__vector unsigned short)vec_splats ((unsigned short)__count);
 	r = vec_sr (m, (__vector unsigned short)c);
-        return (__m64) ((vector long long) r)[0];
+        return (__m64) ((__vector long long) r)[0];
     }
   else
     return (0);
@@ -1416,7 +1416,7 @@ 
   __vector signed short w;
 
   w = (__vector signed short)vec_splats (__w);
-  return (__m64) ((vector long long) w)[0];
+  return (__m64) ((__vector long long) w)[0];
 #else
   __m64_union res;
 
@@ -1436,7 +1436,7 @@ 
   __vector signed char b;
 
   b = (__vector signed char)vec_splats (__b);
-  return (__m64) ((vector long long) b)[0];
+  return (__m64) ((__vector long long) b)[0];
 #else
   __m64_union res;
 
Index: gcc/config/rs6000/xmmintrin.h
===================================================================
diff --git a/trunk/gcc/config/rs6000/xmmintrin.h b/trunk/gcc/config/rs6000/xmmintrin.h
--- a/trunk/gcc/config/rs6000/xmmintrin.h	(revision 265559)
+++ b/trunk/gcc/config/rs6000/xmmintrin.h	(working copy)
@@ -996,7 +996,7 @@ 
   rounded = vec_rint(temp);
   result = (__vector unsigned long long) vec_cts (rounded, 0);
 
-  return (__m64) ((vector long long) result)[0];
+  return (__m64) ((__vector long long) result)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1053,7 +1053,7 @@ 
   temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
   result = (__vector unsigned long long) vec_cts (temp, 0);
 
-  return (__m64) ((vector long long) result)[0];
+  return (__m64) ((__vector long long) result)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1190,7 +1190,7 @@ 
 
 /* Convert the four signed 32-bit values in A and B to SPFP form.  */
 extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
+_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
 {
   __vector signed int vi4;
   __vector float vf4;
@@ -1202,7 +1202,7 @@ 
 
 /* Convert the four SPFP values in A to four signed 16-bit integers.  */
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16(__m128 __A)
+_mm_cvtps_pi16 (__m128 __A)
 {
   __v4sf rounded;
   __vector signed int temp;
@@ -1212,12 +1212,12 @@ 
   temp = vec_cts (rounded, 0);
   result = (__vector unsigned long long) vec_pack (temp, temp);
 
-  return (__m64) ((vector long long) result)[0];
+  return (__m64) ((__vector long long) result)[0];
 }
 
 /* Convert the four SPFP values in A to four signed 8-bit integers.  */
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8(__m128 __A)
+_mm_cvtps_pi8 (__m128 __A)
 {
   __v4sf rounded;
   __vector signed int tmp_i;
@@ -1229,7 +1229,7 @@ 
   tmp_i = vec_cts (rounded, 0);
   tmp_s = vec_pack (tmp_i, zero);
   res_v = vec_pack (tmp_s, tmp_s);
-  return (__m64) ((vector long long) res_v)[0];
+  return (__m64) ((__vector long long) res_v)[0];
 }
 
 /* Selects four specific SPFP values from A and B based on MASK.  */
@@ -1429,7 +1429,7 @@ 
   b = (__vector signed short)vec_splats (__B);
   c = (__vector __bool short)vec_cmpgt (a, b);
   r = vec_sel (b, a, c);
-  return (__m64) ((vector long long) r)[0];
+  return (__m64) ((__vector long long) r)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -1467,7 +1467,7 @@ 
   b = (__vector unsigned char)vec_splats (__B);
   c = (__vector __bool char)vec_cmpgt (a, b);
   r = vec_sel (b, a, c);
-  return (__m64) ((vector long long) r)[0];
+  return (__m64) ((__vector long long) r)[0];
 #else
   __m64_union m1, m2, res;
   long i;
@@ -1503,7 +1503,7 @@ 
   b = (__vector signed short)vec_splats (__B);
   c = (__vector __bool short)vec_cmplt (a, b);
   r = vec_sel (b, a, c);
-  return (__m64) ((vector long long) r)[0];
+  return (__m64) ((__vector long long) r)[0];
 #else
   __m64_union m1, m2, res;
 
@@ -1541,7 +1541,7 @@ 
   b = (__vector unsigned char)vec_splats (__B);
   c = (__vector __bool char)vec_cmplt (a, b);
   r = vec_sel (b, a, c);
-  return (__m64) ((vector long long) r)[0];
+  return (__m64) ((__vector long long) r)[0];
 #else
   __m64_union m1, m2, res;
   long i;
@@ -1600,7 +1600,7 @@ 
   w1 = vec_vmulouh (a, b);
   c = (__vector unsigned short)vec_perm (w0, w1, xform1);
 
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1643,7 +1643,7 @@ 
   p = vec_splats (t.as_m64);
   a = vec_splats (__A);
   r = vec_perm (a, a, (__vector unsigned char)p);
-  return (__m64) ((vector long long) r)[0];
+  return (__m64) ((__vector long long) r)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1683,7 +1683,7 @@ 
   a = (__vector unsigned char)vec_splats (__A);
   b = (__vector unsigned char)vec_splats (__B);
   c = vec_avg (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1701,7 +1701,7 @@ 
   a = (__vector unsigned short)vec_splats (__A);
   b = (__vector unsigned short)vec_splats (__B);
   c = vec_avg (a, b);
-  return (__m64) ((vector long long) c)[0];
+  return (__m64) ((__vector long long) c)[0];
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))