diff mbox

change specific int128 -> generic intN

Message ID 201405042109.s44L9oDt020216@greed.delorie.com
State New
Headers show

Commit Message

DJ Delorie May 4, 2014, 9:09 p.m. UTC
> I'm not aware of any reason those macros need to have decimal values.  I'd 
> suggest removing the precomputed table and printing them in hex, which is 
> easy for values of any precision.

Here's an independent change that removes the decimal table and
replaces it with generated hex values.  I included the relevent output
of gcc -E -dM also.



 $ ./cc1 -quiet -E -dM dj.c | egrep -i '(min|max)' | grep 0x | sort
#define __INT16_MAX__ 0x7fff
#define __INT20_MAX__ 0x7ffffL
#define __INT32_MAX__ 0x7fffffffL
#define __INT64_MAX__ 0x7fffffffffffffffLL
#define __INT8_MAX__ 0x7f
#define __INTMAX_MAX__ 0x7fffffffffffffffLL
#define __INTPTR_MAX__ 0x7fff
#define __INT_FAST16_MAX__ 0x7fff
#define __INT_FAST32_MAX__ 0x7fffffffL
#define __INT_FAST64_MAX__ 0x7fffffffffffffffLL
#define __INT_FAST8_MAX__ 0x7fff
#define __INT_LEAST16_MAX__ 0x7fff
#define __INT_LEAST32_MAX__ 0x7fffffffL
#define __INT_LEAST64_MAX__ 0x7fffffffffffffffLL
#define __INT_LEAST8_MAX__ 0x7f
#define __INT_MAX__ 0x7fff
#define __LONG_LONG_MAX__ 0x7fffffffffffffffLL
#define __LONG_MAX__ 0x7fffffffL
#define __PTRDIFF_MAX__ 0x7fff
#define __SCHAR_MAX__ 0x7f
#define __SHRT_MAX__ 0x7fff
#define __SIG_ATOMIC_MAX__ 0x7fff
#define __SIZE_MAX__ 0xffffU
#define __UINT16_MAX__ 0xffffU
#define __UINT20_MAX__ 0xfffffUL
#define __UINT32_MAX__ 0xffffffffUL
#define __UINT64_MAX__ 0xffffffffffffffffULL
#define __UINT8_MAX__ 0xff
#define __UINTMAX_MAX__ 0xffffffffffffffffULL
#define __UINTPTR_MAX__ 0xffffU
#define __UINT_FAST16_MAX__ 0xffffU
#define __UINT_FAST32_MAX__ 0xffffffffUL
#define __UINT_FAST64_MAX__ 0xffffffffffffffffULL
#define __UINT_FAST8_MAX__ 0xffffU
#define __UINT_LEAST16_MAX__ 0xffffU
#define __UINT_LEAST32_MAX__ 0xffffffffUL
#define __UINT_LEAST64_MAX__ 0xffffffffffffffffULL
#define __UINT_LEAST8_MAX__ 0xff
#define __WCHAR_MAX__ 0x7fffffffL
#define __WINT_MAX__ 0xffffU

Comments

Mike Stump May 5, 2014, 6:58 p.m. UTC | #1
On May 4, 2014, at 2:09 PM, DJ Delorie <dj@redhat.com> wrote:
> Here's an independent change that removes the decimal table and
> replaces it with generated hex values.  I included the relevent output
> of gcc -E -dM also.

> +  /* Allows bit sizes up to 128 bits.  */
> +#define PBOH_SZ (128/4+4)

After tomorrow, this should be:

MAX_BITSIZE_MODE_ANY_INT, not 128.

> +  char value[PBOH_SZ];

:-)
DJ Delorie May 5, 2014, 7:01 p.m. UTC | #2
> After tomorrow, this should be:
> 
> MAX_BITSIZE_MODE_ANY_INT, not 128.

Heh.

Ok, after tomorrow, assume my patch has that instead :-)
Joseph Myers May 7, 2014, 4:43 p.m. UTC | #3
On Sun, 4 May 2014, DJ Delorie wrote:

> > I'm not aware of any reason those macros need to have decimal values.  I'd 
> > suggest removing the precomputed table and printing them in hex, which is 
> > easy for values of any precision.
> 
> Here's an independent change that removes the decimal table and
> replaces it with generated hex values.  I included the relevent output
> of gcc -E -dM also.

OK (presuming the usual bootstrap and regression test, which should 
provide a reasonably thorough test of this code through the <stdint.h> 
tests).
DJ Delorie May 8, 2014, 3:08 a.m. UTC | #4
> OK (presuming the usual bootstrap and regression test, which should 
> provide a reasonably thorough test of this code through the <stdint.h> 
> tests).

Bootstrapped with and without the patch on x86-64, no regressions.
Committed.  Thanks!
diff mbox

Patch

Index: c-family/c-cppbuiltin.c
===================================================================
--- c-family/c-cppbuiltin.c	(revision 209391)
+++ c-family/c-cppbuiltin.c	(working copy)
@@ -1285,45 +1295,71 @@  builtin_define_constants (const char *ma
 static void
 builtin_define_type_max (const char *macro, tree type)
 {
   builtin_define_type_minmax (NULL, macro, type);
 }
 
+static void
+print_bits_of_hex (char *buf, int bufsz, int count)
+{
+  gcc_assert (bufsz > 3);
+  *buf++ = '0';
+  *buf++ = 'x';
+  bufsz -= 2;
+
+  gcc_assert (count > 0);
+
+  switch (count % 4) {
+  case 0:
+    break;
+  case 1:
+    *buf++ = '1';
+    bufsz --;
+    count -= 1;
+    break;
+  case 2:
+    *buf++ = '3';
+    bufsz --;
+    count -= 2;
+    break;
+  case 3:
+    *buf++ = '7';
+    bufsz --;
+    count -= 3;
+    break;
+  }
+  while (count >= 4)
+    {
+      gcc_assert (bufsz > 1);
+      *buf++ = 'f';
+      bufsz --;
+      count -= 4;
+    }
+  gcc_assert (bufsz > 0);
+  *buf++ = 0;
+}
+
 /* Define MIN_MACRO (if not NULL) and MAX_MACRO for TYPE based on the
    precision of the type.  */
 
 static void
 builtin_define_type_minmax (const char *min_macro, const char *max_macro,
 			    tree type)
 {
-  static const char *const values[]
-    = { "127", "255",
-	"32767", "65535",
-	"2147483647", "4294967295",
-	"9223372036854775807", "18446744073709551615",
-	"170141183460469231731687303715884105727",
-	"340282366920938463463374607431768211455" };
+  /* Allows bit sizes up to 128 bits.  */
+#define PBOH_SZ (128/4+4)
+  char value[PBOH_SZ];
 
-  const char *value, *suffix;
+  const char *suffix;
   char *buf;
-  size_t idx;
+  int bits;
 
-  /* Pre-rendering the values mean we don't have to futz with printing a
-     multi-word decimal value.  There are also a very limited number of
-     precisions that we support, so it's really a waste of time.  */
-  switch (TYPE_PRECISION (type))
-    {
-    case 8:	idx = 0; break;
-    case 16:	idx = 2; break;
-    case 32:	idx = 4; break;
-    case 64:	idx = 6; break;
-    case 128:	idx = 8; break;
-    default:    gcc_unreachable ();
-    }
+  bits = TYPE_PRECISION (type) + (TYPE_UNSIGNED (type) ? 0 : -1);
+
+  print_bits_of_hex (value, PBOH_SZ, bits);
 
-  value = values[idx + TYPE_UNSIGNED (type)];
   suffix = type_suffix (type);
 
   buf = (char *) alloca (strlen (max_macro) + 1 + strlen (value)
                          + strlen (suffix) + 1);
   sprintf (buf, "%s=%s%s", max_macro, value, suffix);