Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1475724/?format=api
{ "id": 1475724, "url": "http://patchwork.ozlabs.org/api/patches/1475724/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20210508014802.892561-16-richard.henderson@linaro.org/", "project": { "id": 14, "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api", "name": "QEMU Development", "link_name": "qemu-devel", "list_id": "qemu-devel.nongnu.org", "list_email": "qemu-devel@nongnu.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20210508014802.892561-16-richard.henderson@linaro.org>", "list_archive_url": null, "date": "2021-05-08T01:47:05", "name": "[15/72] softfloat: Rename FloatParts to FloatParts64", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "c1f979ac43a3744935c6223d0d22acfc3662a334", "submitter": { "id": 72104, "url": "http://patchwork.ozlabs.org/api/people/72104/?format=api", "name": "Richard Henderson", "email": "richard.henderson@linaro.org" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20210508014802.892561-16-richard.henderson@linaro.org/mbox/", "series": [ { "id": 242770, "url": "http://patchwork.ozlabs.org/api/series/242770/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=242770", "date": "2021-05-08T01:46:53", "name": "Convert floatx80 and float128 to FloatParts", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/242770/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1475724/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1475724/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org", "Authentication-Results": [ "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=nongnu.org\n (client-ip=209.51.188.17; helo=lists.gnu.org;\n envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n receiver=<UNKNOWN>)", "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=linaro.org header.i=@linaro.org header.a=rsa-sha256\n header.s=google header.b=u21InNkF;\n\tdkim-atps=neutral" ], "Received": [ "from lists.gnu.org (lists.gnu.org [209.51.188.17])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4FcVyb2txwz9sWp\n\tfor <incoming@patchwork.ozlabs.org>; Sat, 8 May 2021 12:05:27 +1000 (AEST)", "from localhost ([::1]:57860 helo=lists1p.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.90_1)\n\t(envelope-from <qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1lfCLN-0007sy-Fw\n\tfor incoming@patchwork.ozlabs.org; Fri, 07 May 2021 22:05:25 -0400", "from eggs.gnu.org ([2001:470:142:3::10]:40750)\n by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <richard.henderson@linaro.org>)\n id 1lfC4x-0004Cs-9X\n for qemu-devel@nongnu.org; Fri, 07 May 2021 21:48:29 -0400", "from mail-pl1-x62b.google.com ([2607:f8b0:4864:20::62b]:38806)\n by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_128_GCM_SHA256:128)\n (Exim 4.90_1) (envelope-from <richard.henderson@linaro.org>)\n id 1lfC4k-0003at-Ur\n for qemu-devel@nongnu.org; Fri, 07 May 2021 21:48:27 -0400", "by mail-pl1-x62b.google.com with SMTP id 69so1535915plc.5\n for <qemu-devel@nongnu.org>; Fri, 07 May 2021 18:48:14 -0700 (PDT)", "from localhost.localdomain ([71.212.144.24])\n by smtp.gmail.com with ESMTPSA id t4sm5819681pfq.165.2021.05.07.18.48.13\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Fri, 07 May 2021 18:48:13 -0700 (PDT)" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=CTft5gBrUD6MnXZkZyjYyUchTRH6b2xyA6TvzbAPqzc=;\n b=u21InNkF/bOCTjlyvEKcsibWT0f6bGinlGaZxEyZRwA/FP3WL2Bwr/lu01xOriI/zg\n b9i4h9P3en63XW6qaMcpqramw9DrZtuVLWn3kQqcai9zq7SUSTKIaTioCDZAaWfna9Ct\n GtIKWesRE1fdPZV7LTiS2MbLUEY8y1bXPkUA6eYZMGounQ4Q1vWjHwe5Wszn8g3HYWYB\n dVNo1nYTJi3d2Ty14NBPPdTV5v8H0eMol46VAt4puY55SuD3hnYkZL3Uq1WJssO/J98I\n BZZRz+NS5asUn6peUdktBwqCJyZRA55EooaIfL32l3T5Z8W/i3NueWNNVzy7B7kZPK73\n CUDw==", "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=CTft5gBrUD6MnXZkZyjYyUchTRH6b2xyA6TvzbAPqzc=;\n b=qvhkMESUJ652MBePESFodjCNn1/dBox7Mf7Jg58WDhoBSyQWEGRXC7nxsDXQtm++70\n 3Ch3v3Lkw79UYmkWxYNDqI2pS8TIqpFmF8EGtg9aufZv9XoOxnbn9xvZxNt652S7eVcO\n OeAzHK4p42mh07vTy8knpEhxYDSR/k3nYsDheUxnl4/CKKkGtqqKs5soj+G/hXLuk9ef\n 12twAXO2Ktsw6jRhRg12m7RqeQmICePTu6VLpbZbN9A1Rkhsl+QToWbm6u6n8RRjZkKg\n bok0UiNwP3Ty+0oVI1iN7HqZ/JOOLwtHWoYGRX9vBZdg0sLDWq9MxCn8z0vhTxN2HulX\n ggyg==", "X-Gm-Message-State": "AOAM5332OJS03uQ8yPXEqcZVV5Ro8COM5ChSyS+sR/IWN/rqgATGjFHC\n fk0AfhPM0/uUJgpu0KaPQuvsezgSMsTcQw==", "X-Google-Smtp-Source": "\n ABdhPJx9YhT9Cy8h8KdugV78dbVaC2JOMRcaWc0rIKKNDsGFEmoz9JzJ837Y51PtPx+8NhWLVlonng==", "X-Received": "by 2002:a17:90a:ad09:: with SMTP id\n r9mr27177608pjq.2.1620438493514;\n Fri, 07 May 2021 18:48:13 -0700 (PDT)", "From": "Richard Henderson <richard.henderson@linaro.org>", "To": "qemu-devel@nongnu.org", "Subject": "[PATCH 15/72] softfloat: Rename FloatParts to FloatParts64", "Date": "Fri, 7 May 2021 18:47:05 -0700", "Message-Id": "<20210508014802.892561-16-richard.henderson@linaro.org>", "X-Mailer": "git-send-email 2.25.1", "In-Reply-To": "<20210508014802.892561-1-richard.henderson@linaro.org>", "References": "<20210508014802.892561-1-richard.henderson@linaro.org>", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "Received-SPF": "pass client-ip=2607:f8b0:4864:20::62b;\n envelope-from=richard.henderson@linaro.org; helo=mail-pl1-x62b.google.com", "X-Spam_score_int": "-20", "X-Spam_score": "-2.1", "X-Spam_bar": "--", "X-Spam_report": "(-2.1 / 5.0 requ) BAYES_00=-1.9, DKIM_SIGNED=0.1,\n DKIM_VALID=-0.1, DKIM_VALID_AU=-0.1, DKIM_VALID_EF=-0.1,\n RCVD_IN_DNSWL_NONE=-0.0001, SPF_HELO_NONE=0.001,\n SPF_PASS=-0.001 autolearn=ham autolearn_force=no", "X-Spam_action": "no action", "X-BeenThere": "qemu-devel@nongnu.org", "X-Mailman-Version": "2.1.23", "Precedence": "list", "List-Id": "<qemu-devel.nongnu.org>", "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>", "List-Archive": "<https://lists.nongnu.org/archive/html/qemu-devel>", "List-Post": "<mailto:qemu-devel@nongnu.org>", "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>", "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=subscribe>", "Cc": "alex.bennee@linaro.org, david@redhat.com", "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org", "Sender": "\"Qemu-devel\"\n <qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>" }, "content": "Signed-off-by: Richard Henderson <richard.henderson@linaro.org>\n---\n fpu/softfloat.c | 362 ++++++++++++++++-----------------\n fpu/softfloat-specialize.c.inc | 6 +-\n 2 files changed, 184 insertions(+), 184 deletions(-)", "diff": "diff --git a/fpu/softfloat.c b/fpu/softfloat.c\nindex 6589f00b23..27b51659c9 100644\n--- a/fpu/softfloat.c\n+++ b/fpu/softfloat.c\n@@ -515,7 +515,7 @@ typedef struct {\n int32_t exp;\n FloatClass cls;\n bool sign;\n-} FloatParts;\n+} FloatParts64;\n \n #define DECOMPOSED_BINARY_POINT 63\n #define DECOMPOSED_IMPLICIT_BIT (1ull << DECOMPOSED_BINARY_POINT)\n@@ -580,11 +580,11 @@ static const FloatFmt float64_params = {\n };\n \n /* Unpack a float to parts, but do not canonicalize. */\n-static inline FloatParts unpack_raw(FloatFmt fmt, uint64_t raw)\n+static inline FloatParts64 unpack_raw(FloatFmt fmt, uint64_t raw)\n {\n const int sign_pos = fmt.frac_size + fmt.exp_size;\n \n- return (FloatParts) {\n+ return (FloatParts64) {\n .cls = float_class_unclassified,\n .sign = extract64(raw, sign_pos, 1),\n .exp = extract64(raw, fmt.frac_size, fmt.exp_size),\n@@ -592,50 +592,50 @@ static inline FloatParts unpack_raw(FloatFmt fmt, uint64_t raw)\n };\n }\n \n-static inline FloatParts float16_unpack_raw(float16 f)\n+static inline FloatParts64 float16_unpack_raw(float16 f)\n {\n return unpack_raw(float16_params, f);\n }\n \n-static inline FloatParts bfloat16_unpack_raw(bfloat16 f)\n+static inline FloatParts64 bfloat16_unpack_raw(bfloat16 f)\n {\n return unpack_raw(bfloat16_params, f);\n }\n \n-static inline FloatParts float32_unpack_raw(float32 f)\n+static inline FloatParts64 float32_unpack_raw(float32 f)\n {\n return unpack_raw(float32_params, f);\n }\n \n-static inline FloatParts float64_unpack_raw(float64 f)\n+static inline FloatParts64 float64_unpack_raw(float64 f)\n {\n return unpack_raw(float64_params, f);\n }\n \n /* Pack a float from parts, but do not canonicalize. */\n-static inline uint64_t pack_raw(FloatFmt fmt, FloatParts p)\n+static inline uint64_t pack_raw(FloatFmt fmt, FloatParts64 p)\n {\n const int sign_pos = fmt.frac_size + fmt.exp_size;\n uint64_t ret = deposit64(p.frac, fmt.frac_size, fmt.exp_size, p.exp);\n return deposit64(ret, sign_pos, 1, p.sign);\n }\n \n-static inline float16 float16_pack_raw(FloatParts p)\n+static inline float16 float16_pack_raw(FloatParts64 p)\n {\n return make_float16(pack_raw(float16_params, p));\n }\n \n-static inline bfloat16 bfloat16_pack_raw(FloatParts p)\n+static inline bfloat16 bfloat16_pack_raw(FloatParts64 p)\n {\n return pack_raw(bfloat16_params, p);\n }\n \n-static inline float32 float32_pack_raw(FloatParts p)\n+static inline float32 float32_pack_raw(FloatParts64 p)\n {\n return make_float32(pack_raw(float32_params, p));\n }\n \n-static inline float64 float64_pack_raw(FloatParts p)\n+static inline float64 float64_pack_raw(FloatParts64 p)\n {\n return make_float64(pack_raw(float64_params, p));\n }\n@@ -651,7 +651,7 @@ static inline float64 float64_pack_raw(FloatParts p)\n #include \"softfloat-specialize.c.inc\"\n \n /* Canonicalize EXP and FRAC, setting CLS. */\n-static FloatParts sf_canonicalize(FloatParts part, const FloatFmt *parm,\n+static FloatParts64 sf_canonicalize(FloatParts64 part, const FloatFmt *parm,\n float_status *status)\n {\n if (part.exp == parm->exp_max && !parm->arm_althp) {\n@@ -689,7 +689,7 @@ static FloatParts sf_canonicalize(FloatParts part, const FloatFmt *parm,\n * by EXP_BIAS and must be bounded by [EXP_MAX-1, 0].\n */\n \n-static FloatParts round_canonical(FloatParts p, float_status *s,\n+static FloatParts64 round_canonical(FloatParts64 p, float_status *s,\n const FloatFmt *parm)\n {\n const uint64_t frac_lsb = parm->frac_lsb;\n@@ -838,59 +838,59 @@ static FloatParts round_canonical(FloatParts p, float_status *s,\n }\n \n /* Explicit FloatFmt version */\n-static FloatParts float16a_unpack_canonical(float16 f, float_status *s,\n+static FloatParts64 float16a_unpack_canonical(float16 f, float_status *s,\n const FloatFmt *params)\n {\n return sf_canonicalize(float16_unpack_raw(f), params, s);\n }\n \n-static FloatParts float16_unpack_canonical(float16 f, float_status *s)\n+static FloatParts64 float16_unpack_canonical(float16 f, float_status *s)\n {\n return float16a_unpack_canonical(f, s, &float16_params);\n }\n \n-static FloatParts bfloat16_unpack_canonical(bfloat16 f, float_status *s)\n+static FloatParts64 bfloat16_unpack_canonical(bfloat16 f, float_status *s)\n {\n return sf_canonicalize(bfloat16_unpack_raw(f), &bfloat16_params, s);\n }\n \n-static float16 float16a_round_pack_canonical(FloatParts p, float_status *s,\n+static float16 float16a_round_pack_canonical(FloatParts64 p, float_status *s,\n const FloatFmt *params)\n {\n return float16_pack_raw(round_canonical(p, s, params));\n }\n \n-static float16 float16_round_pack_canonical(FloatParts p, float_status *s)\n+static float16 float16_round_pack_canonical(FloatParts64 p, float_status *s)\n {\n return float16a_round_pack_canonical(p, s, &float16_params);\n }\n \n-static bfloat16 bfloat16_round_pack_canonical(FloatParts p, float_status *s)\n+static bfloat16 bfloat16_round_pack_canonical(FloatParts64 p, float_status *s)\n {\n return bfloat16_pack_raw(round_canonical(p, s, &bfloat16_params));\n }\n \n-static FloatParts float32_unpack_canonical(float32 f, float_status *s)\n+static FloatParts64 float32_unpack_canonical(float32 f, float_status *s)\n {\n return sf_canonicalize(float32_unpack_raw(f), &float32_params, s);\n }\n \n-static float32 float32_round_pack_canonical(FloatParts p, float_status *s)\n+static float32 float32_round_pack_canonical(FloatParts64 p, float_status *s)\n {\n return float32_pack_raw(round_canonical(p, s, &float32_params));\n }\n \n-static FloatParts float64_unpack_canonical(float64 f, float_status *s)\n+static FloatParts64 float64_unpack_canonical(float64 f, float_status *s)\n {\n return sf_canonicalize(float64_unpack_raw(f), &float64_params, s);\n }\n \n-static float64 float64_round_pack_canonical(FloatParts p, float_status *s)\n+static float64 float64_round_pack_canonical(FloatParts64 p, float_status *s)\n {\n return float64_pack_raw(round_canonical(p, s, &float64_params));\n }\n \n-static FloatParts return_nan(FloatParts a, float_status *s)\n+static FloatParts64 return_nan(FloatParts64 a, float_status *s)\n {\n g_assert(is_nan(a.cls));\n if (is_snan(a.cls)) {\n@@ -904,7 +904,7 @@ static FloatParts return_nan(FloatParts a, float_status *s)\n return parts_default_nan(s);\n }\n \n-static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)\n+static FloatParts64 pick_nan(FloatParts64 a, FloatParts64 b, float_status *s)\n {\n if (is_snan(a.cls) || is_snan(b.cls)) {\n float_raise(float_flag_invalid, s);\n@@ -925,7 +925,7 @@ static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)\n return a;\n }\n \n-static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,\n+static FloatParts64 pick_nan_muladd(FloatParts64 a, FloatParts64 b, FloatParts64 c,\n bool inf_zero, float_status *s)\n {\n int which;\n@@ -971,7 +971,7 @@ static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,\n * Arithmetic.\n */\n \n-static FloatParts addsub_floats(FloatParts a, FloatParts b, bool subtract,\n+static FloatParts64 addsub_floats(FloatParts64 a, FloatParts64 b, bool subtract,\n float_status *s)\n {\n bool a_sign = a.sign;\n@@ -1062,18 +1062,18 @@ static FloatParts addsub_floats(FloatParts a, FloatParts b, bool subtract,\n \n float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pb = float16_unpack_canonical(b, status);\n- FloatParts pr = addsub_floats(pa, pb, false, status);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pb = float16_unpack_canonical(b, status);\n+ FloatParts64 pr = addsub_floats(pa, pb, false, status);\n \n return float16_round_pack_canonical(pr, status);\n }\n \n float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pb = float16_unpack_canonical(b, status);\n- FloatParts pr = addsub_floats(pa, pb, true, status);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pb = float16_unpack_canonical(b, status);\n+ FloatParts64 pr = addsub_floats(pa, pb, true, status);\n \n return float16_round_pack_canonical(pr, status);\n }\n@@ -1081,9 +1081,9 @@ float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)\n static float32 QEMU_SOFTFLOAT_ATTR\n soft_f32_addsub(float32 a, float32 b, bool subtract, float_status *status)\n {\n- FloatParts pa = float32_unpack_canonical(a, status);\n- FloatParts pb = float32_unpack_canonical(b, status);\n- FloatParts pr = addsub_floats(pa, pb, subtract, status);\n+ FloatParts64 pa = float32_unpack_canonical(a, status);\n+ FloatParts64 pb = float32_unpack_canonical(b, status);\n+ FloatParts64 pr = addsub_floats(pa, pb, subtract, status);\n \n return float32_round_pack_canonical(pr, status);\n }\n@@ -1101,9 +1101,9 @@ static inline float32 soft_f32_sub(float32 a, float32 b, float_status *status)\n static float64 QEMU_SOFTFLOAT_ATTR\n soft_f64_addsub(float64 a, float64 b, bool subtract, float_status *status)\n {\n- FloatParts pa = float64_unpack_canonical(a, status);\n- FloatParts pb = float64_unpack_canonical(b, status);\n- FloatParts pr = addsub_floats(pa, pb, subtract, status);\n+ FloatParts64 pa = float64_unpack_canonical(a, status);\n+ FloatParts64 pb = float64_unpack_canonical(b, status);\n+ FloatParts64 pr = addsub_floats(pa, pb, subtract, status);\n \n return float64_round_pack_canonical(pr, status);\n }\n@@ -1199,18 +1199,18 @@ float64_sub(float64 a, float64 b, float_status *s)\n */\n bfloat16 QEMU_FLATTEN bfloat16_add(bfloat16 a, bfloat16 b, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pb = bfloat16_unpack_canonical(b, status);\n- FloatParts pr = addsub_floats(pa, pb, false, status);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, status);\n+ FloatParts64 pr = addsub_floats(pa, pb, false, status);\n \n return bfloat16_round_pack_canonical(pr, status);\n }\n \n bfloat16 QEMU_FLATTEN bfloat16_sub(bfloat16 a, bfloat16 b, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pb = bfloat16_unpack_canonical(b, status);\n- FloatParts pr = addsub_floats(pa, pb, true, status);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, status);\n+ FloatParts64 pr = addsub_floats(pa, pb, true, status);\n \n return bfloat16_round_pack_canonical(pr, status);\n }\n@@ -1221,7 +1221,7 @@ bfloat16 QEMU_FLATTEN bfloat16_sub(bfloat16 a, bfloat16 b, float_status *status)\n * for Binary Floating-Point Arithmetic.\n */\n \n-static FloatParts mul_floats(FloatParts a, FloatParts b, float_status *s)\n+static FloatParts64 mul_floats(FloatParts64 a, FloatParts64 b, float_status *s)\n {\n bool sign = a.sign ^ b.sign;\n \n@@ -1267,9 +1267,9 @@ static FloatParts mul_floats(FloatParts a, FloatParts b, float_status *s)\n \n float16 QEMU_FLATTEN float16_mul(float16 a, float16 b, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pb = float16_unpack_canonical(b, status);\n- FloatParts pr = mul_floats(pa, pb, status);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pb = float16_unpack_canonical(b, status);\n+ FloatParts64 pr = mul_floats(pa, pb, status);\n \n return float16_round_pack_canonical(pr, status);\n }\n@@ -1277,9 +1277,9 @@ float16 QEMU_FLATTEN float16_mul(float16 a, float16 b, float_status *status)\n static float32 QEMU_SOFTFLOAT_ATTR\n soft_f32_mul(float32 a, float32 b, float_status *status)\n {\n- FloatParts pa = float32_unpack_canonical(a, status);\n- FloatParts pb = float32_unpack_canonical(b, status);\n- FloatParts pr = mul_floats(pa, pb, status);\n+ FloatParts64 pa = float32_unpack_canonical(a, status);\n+ FloatParts64 pb = float32_unpack_canonical(b, status);\n+ FloatParts64 pr = mul_floats(pa, pb, status);\n \n return float32_round_pack_canonical(pr, status);\n }\n@@ -1287,9 +1287,9 @@ soft_f32_mul(float32 a, float32 b, float_status *status)\n static float64 QEMU_SOFTFLOAT_ATTR\n soft_f64_mul(float64 a, float64 b, float_status *status)\n {\n- FloatParts pa = float64_unpack_canonical(a, status);\n- FloatParts pb = float64_unpack_canonical(b, status);\n- FloatParts pr = mul_floats(pa, pb, status);\n+ FloatParts64 pa = float64_unpack_canonical(a, status);\n+ FloatParts64 pb = float64_unpack_canonical(b, status);\n+ FloatParts64 pr = mul_floats(pa, pb, status);\n \n return float64_round_pack_canonical(pr, status);\n }\n@@ -1325,9 +1325,9 @@ float64_mul(float64 a, float64 b, float_status *s)\n \n bfloat16 QEMU_FLATTEN bfloat16_mul(bfloat16 a, bfloat16 b, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pb = bfloat16_unpack_canonical(b, status);\n- FloatParts pr = mul_floats(pa, pb, status);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, status);\n+ FloatParts64 pr = mul_floats(pa, pb, status);\n \n return bfloat16_round_pack_canonical(pr, status);\n }\n@@ -1344,7 +1344,7 @@ bfloat16 QEMU_FLATTEN bfloat16_mul(bfloat16 a, bfloat16 b, float_status *status)\n * NaNs.)\n */\n \n-static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,\n+static FloatParts64 muladd_floats(FloatParts64 a, FloatParts64 b, FloatParts64 c,\n int flags, float_status *s)\n {\n bool inf_zero, p_sign;\n@@ -1520,10 +1520,10 @@ static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,\n float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,\n int flags, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pb = float16_unpack_canonical(b, status);\n- FloatParts pc = float16_unpack_canonical(c, status);\n- FloatParts pr = muladd_floats(pa, pb, pc, flags, status);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pb = float16_unpack_canonical(b, status);\n+ FloatParts64 pc = float16_unpack_canonical(c, status);\n+ FloatParts64 pr = muladd_floats(pa, pb, pc, flags, status);\n \n return float16_round_pack_canonical(pr, status);\n }\n@@ -1532,10 +1532,10 @@ static float32 QEMU_SOFTFLOAT_ATTR\n soft_f32_muladd(float32 a, float32 b, float32 c, int flags,\n float_status *status)\n {\n- FloatParts pa = float32_unpack_canonical(a, status);\n- FloatParts pb = float32_unpack_canonical(b, status);\n- FloatParts pc = float32_unpack_canonical(c, status);\n- FloatParts pr = muladd_floats(pa, pb, pc, flags, status);\n+ FloatParts64 pa = float32_unpack_canonical(a, status);\n+ FloatParts64 pb = float32_unpack_canonical(b, status);\n+ FloatParts64 pc = float32_unpack_canonical(c, status);\n+ FloatParts64 pr = muladd_floats(pa, pb, pc, flags, status);\n \n return float32_round_pack_canonical(pr, status);\n }\n@@ -1544,10 +1544,10 @@ static float64 QEMU_SOFTFLOAT_ATTR\n soft_f64_muladd(float64 a, float64 b, float64 c, int flags,\n float_status *status)\n {\n- FloatParts pa = float64_unpack_canonical(a, status);\n- FloatParts pb = float64_unpack_canonical(b, status);\n- FloatParts pc = float64_unpack_canonical(c, status);\n- FloatParts pr = muladd_floats(pa, pb, pc, flags, status);\n+ FloatParts64 pa = float64_unpack_canonical(a, status);\n+ FloatParts64 pb = float64_unpack_canonical(b, status);\n+ FloatParts64 pc = float64_unpack_canonical(c, status);\n+ FloatParts64 pr = muladd_floats(pa, pb, pc, flags, status);\n \n return float64_round_pack_canonical(pr, status);\n }\n@@ -1705,10 +1705,10 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)\n bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,\n int flags, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pb = bfloat16_unpack_canonical(b, status);\n- FloatParts pc = bfloat16_unpack_canonical(c, status);\n- FloatParts pr = muladd_floats(pa, pb, pc, flags, status);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, status);\n+ FloatParts64 pc = bfloat16_unpack_canonical(c, status);\n+ FloatParts64 pr = muladd_floats(pa, pb, pc, flags, status);\n \n return bfloat16_round_pack_canonical(pr, status);\n }\n@@ -1719,7 +1719,7 @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,\n * the IEC/IEEE Standard for Binary Floating-Point Arithmetic.\n */\n \n-static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s)\n+static FloatParts64 div_floats(FloatParts64 a, FloatParts64 b, float_status *s)\n {\n bool sign = a.sign ^ b.sign;\n \n@@ -1786,9 +1786,9 @@ static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s)\n \n float16 float16_div(float16 a, float16 b, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pb = float16_unpack_canonical(b, status);\n- FloatParts pr = div_floats(pa, pb, status);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pb = float16_unpack_canonical(b, status);\n+ FloatParts64 pr = div_floats(pa, pb, status);\n \n return float16_round_pack_canonical(pr, status);\n }\n@@ -1796,9 +1796,9 @@ float16 float16_div(float16 a, float16 b, float_status *status)\n static float32 QEMU_SOFTFLOAT_ATTR\n soft_f32_div(float32 a, float32 b, float_status *status)\n {\n- FloatParts pa = float32_unpack_canonical(a, status);\n- FloatParts pb = float32_unpack_canonical(b, status);\n- FloatParts pr = div_floats(pa, pb, status);\n+ FloatParts64 pa = float32_unpack_canonical(a, status);\n+ FloatParts64 pb = float32_unpack_canonical(b, status);\n+ FloatParts64 pr = div_floats(pa, pb, status);\n \n return float32_round_pack_canonical(pr, status);\n }\n@@ -1806,9 +1806,9 @@ soft_f32_div(float32 a, float32 b, float_status *status)\n static float64 QEMU_SOFTFLOAT_ATTR\n soft_f64_div(float64 a, float64 b, float_status *status)\n {\n- FloatParts pa = float64_unpack_canonical(a, status);\n- FloatParts pb = float64_unpack_canonical(b, status);\n- FloatParts pr = div_floats(pa, pb, status);\n+ FloatParts64 pa = float64_unpack_canonical(a, status);\n+ FloatParts64 pb = float64_unpack_canonical(b, status);\n+ FloatParts64 pr = div_floats(pa, pb, status);\n \n return float64_round_pack_canonical(pr, status);\n }\n@@ -1878,9 +1878,9 @@ float64_div(float64 a, float64 b, float_status *s)\n \n bfloat16 bfloat16_div(bfloat16 a, bfloat16 b, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pb = bfloat16_unpack_canonical(b, status);\n- FloatParts pr = div_floats(pa, pb, status);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, status);\n+ FloatParts64 pr = div_floats(pa, pb, status);\n \n return bfloat16_round_pack_canonical(pr, status);\n }\n@@ -1896,7 +1896,7 @@ bfloat16 bfloat16_div(bfloat16 a, bfloat16 b, float_status *status)\n * invalid exceptions and handling the conversion on NaNs.\n */\n \n-static FloatParts float_to_float(FloatParts a, const FloatFmt *dstf,\n+static FloatParts64 float_to_float(FloatParts64 a, const FloatFmt *dstf,\n float_status *s)\n {\n if (dstf->arm_althp) {\n@@ -1934,32 +1934,32 @@ static FloatParts float_to_float(FloatParts a, const FloatFmt *dstf,\n float32 float16_to_float32(float16 a, bool ieee, float_status *s)\n {\n const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;\n- FloatParts p = float16a_unpack_canonical(a, s, fmt16);\n- FloatParts pr = float_to_float(p, &float32_params, s);\n+ FloatParts64 p = float16a_unpack_canonical(a, s, fmt16);\n+ FloatParts64 pr = float_to_float(p, &float32_params, s);\n return float32_round_pack_canonical(pr, s);\n }\n \n float64 float16_to_float64(float16 a, bool ieee, float_status *s)\n {\n const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;\n- FloatParts p = float16a_unpack_canonical(a, s, fmt16);\n- FloatParts pr = float_to_float(p, &float64_params, s);\n+ FloatParts64 p = float16a_unpack_canonical(a, s, fmt16);\n+ FloatParts64 pr = float_to_float(p, &float64_params, s);\n return float64_round_pack_canonical(pr, s);\n }\n \n float16 float32_to_float16(float32 a, bool ieee, float_status *s)\n {\n const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;\n- FloatParts p = float32_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, fmt16, s);\n+ FloatParts64 p = float32_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, fmt16, s);\n return float16a_round_pack_canonical(pr, s, fmt16);\n }\n \n static float64 QEMU_SOFTFLOAT_ATTR\n soft_float32_to_float64(float32 a, float_status *s)\n {\n- FloatParts p = float32_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, &float64_params, s);\n+ FloatParts64 p = float32_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, &float64_params, s);\n return float64_round_pack_canonical(pr, s);\n }\n \n@@ -1982,43 +1982,43 @@ float64 float32_to_float64(float32 a, float_status *s)\n float16 float64_to_float16(float64 a, bool ieee, float_status *s)\n {\n const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;\n- FloatParts p = float64_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, fmt16, s);\n+ FloatParts64 p = float64_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, fmt16, s);\n return float16a_round_pack_canonical(pr, s, fmt16);\n }\n \n float32 float64_to_float32(float64 a, float_status *s)\n {\n- FloatParts p = float64_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, &float32_params, s);\n+ FloatParts64 p = float64_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, &float32_params, s);\n return float32_round_pack_canonical(pr, s);\n }\n \n float32 bfloat16_to_float32(bfloat16 a, float_status *s)\n {\n- FloatParts p = bfloat16_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, &float32_params, s);\n+ FloatParts64 p = bfloat16_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, &float32_params, s);\n return float32_round_pack_canonical(pr, s);\n }\n \n float64 bfloat16_to_float64(bfloat16 a, float_status *s)\n {\n- FloatParts p = bfloat16_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, &float64_params, s);\n+ FloatParts64 p = bfloat16_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, &float64_params, s);\n return float64_round_pack_canonical(pr, s);\n }\n \n bfloat16 float32_to_bfloat16(float32 a, float_status *s)\n {\n- FloatParts p = float32_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, &bfloat16_params, s);\n+ FloatParts64 p = float32_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, &bfloat16_params, s);\n return bfloat16_round_pack_canonical(pr, s);\n }\n \n bfloat16 float64_to_bfloat16(float64 a, float_status *s)\n {\n- FloatParts p = float64_unpack_canonical(a, s);\n- FloatParts pr = float_to_float(p, &bfloat16_params, s);\n+ FloatParts64 p = float64_unpack_canonical(a, s);\n+ FloatParts64 pr = float_to_float(p, &bfloat16_params, s);\n return bfloat16_round_pack_canonical(pr, s);\n }\n \n@@ -2029,7 +2029,7 @@ bfloat16 float64_to_bfloat16(float64 a, float_status *s)\n * Arithmetic.\n */\n \n-static FloatParts round_to_int(FloatParts a, FloatRoundMode rmode,\n+static FloatParts64 round_to_int(FloatParts64 a, FloatRoundMode rmode,\n int scale, float_status *s)\n {\n switch (a.cls) {\n@@ -2132,22 +2132,22 @@ static FloatParts round_to_int(FloatParts a, FloatRoundMode rmode,\n \n float16 float16_round_to_int(float16 a, float_status *s)\n {\n- FloatParts pa = float16_unpack_canonical(a, s);\n- FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n+ FloatParts64 pa = float16_unpack_canonical(a, s);\n+ FloatParts64 pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n return float16_round_pack_canonical(pr, s);\n }\n \n float32 float32_round_to_int(float32 a, float_status *s)\n {\n- FloatParts pa = float32_unpack_canonical(a, s);\n- FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n+ FloatParts64 pa = float32_unpack_canonical(a, s);\n+ FloatParts64 pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n return float32_round_pack_canonical(pr, s);\n }\n \n float64 float64_round_to_int(float64 a, float_status *s)\n {\n- FloatParts pa = float64_unpack_canonical(a, s);\n- FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n+ FloatParts64 pa = float64_unpack_canonical(a, s);\n+ FloatParts64 pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n return float64_round_pack_canonical(pr, s);\n }\n \n@@ -2158,8 +2158,8 @@ float64 float64_round_to_int(float64 a, float_status *s)\n \n bfloat16 bfloat16_round_to_int(bfloat16 a, float_status *s)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, s);\n- FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, s);\n+ FloatParts64 pr = round_to_int(pa, s->float_rounding_mode, 0, s);\n return bfloat16_round_pack_canonical(pr, s);\n }\n \n@@ -2174,13 +2174,13 @@ bfloat16 bfloat16_round_to_int(bfloat16 a, float_status *s)\n * is returned.\n */\n \n-static int64_t round_to_int_and_pack(FloatParts in, FloatRoundMode rmode,\n+static int64_t round_to_int_and_pack(FloatParts64 in, FloatRoundMode rmode,\n int scale, int64_t min, int64_t max,\n float_status *s)\n {\n uint64_t r;\n int orig_flags = get_float_exception_flags(s);\n- FloatParts p = round_to_int(in, rmode, scale, s);\n+ FloatParts64 p = round_to_int(in, rmode, scale, s);\n \n switch (p.cls) {\n case float_class_snan:\n@@ -2452,12 +2452,12 @@ int64_t bfloat16_to_int64_round_to_zero(bfloat16 a, float_status *s)\n * flag.\n */\n \n-static uint64_t round_to_uint_and_pack(FloatParts in, FloatRoundMode rmode,\n+static uint64_t round_to_uint_and_pack(FloatParts64 in, FloatRoundMode rmode,\n int scale, uint64_t max,\n float_status *s)\n {\n int orig_flags = get_float_exception_flags(s);\n- FloatParts p = round_to_int(in, rmode, scale, s);\n+ FloatParts64 p = round_to_int(in, rmode, scale, s);\n uint64_t r;\n \n switch (p.cls) {\n@@ -2726,9 +2726,9 @@ uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *s)\n * to the IEC/IEEE Standard for Binary Floating-Point Arithmetic.\n */\n \n-static FloatParts int_to_float(int64_t a, int scale, float_status *status)\n+static FloatParts64 int_to_float(int64_t a, int scale, float_status *status)\n {\n- FloatParts r = { .sign = false };\n+ FloatParts64 r = { .sign = false };\n \n if (a == 0) {\n r.cls = float_class_zero;\n@@ -2753,7 +2753,7 @@ static FloatParts int_to_float(int64_t a, int scale, float_status *status)\n \n float16 int64_to_float16_scalbn(int64_t a, int scale, float_status *status)\n {\n- FloatParts pa = int_to_float(a, scale, status);\n+ FloatParts64 pa = int_to_float(a, scale, status);\n return float16_round_pack_canonical(pa, status);\n }\n \n@@ -2789,7 +2789,7 @@ float16 int8_to_float16(int8_t a, float_status *status)\n \n float32 int64_to_float32_scalbn(int64_t a, int scale, float_status *status)\n {\n- FloatParts pa = int_to_float(a, scale, status);\n+ FloatParts64 pa = int_to_float(a, scale, status);\n return float32_round_pack_canonical(pa, status);\n }\n \n@@ -2820,7 +2820,7 @@ float32 int16_to_float32(int16_t a, float_status *status)\n \n float64 int64_to_float64_scalbn(int64_t a, int scale, float_status *status)\n {\n- FloatParts pa = int_to_float(a, scale, status);\n+ FloatParts64 pa = int_to_float(a, scale, status);\n return float64_round_pack_canonical(pa, status);\n }\n \n@@ -2856,7 +2856,7 @@ float64 int16_to_float64(int16_t a, float_status *status)\n \n bfloat16 int64_to_bfloat16_scalbn(int64_t a, int scale, float_status *status)\n {\n- FloatParts pa = int_to_float(a, scale, status);\n+ FloatParts64 pa = int_to_float(a, scale, status);\n return bfloat16_round_pack_canonical(pa, status);\n }\n \n@@ -2893,9 +2893,9 @@ bfloat16 int16_to_bfloat16(int16_t a, float_status *status)\n * IEC/IEEE Standard for Binary Floating-Point Arithmetic.\n */\n \n-static FloatParts uint_to_float(uint64_t a, int scale, float_status *status)\n+static FloatParts64 uint_to_float(uint64_t a, int scale, float_status *status)\n {\n- FloatParts r = { .sign = false };\n+ FloatParts64 r = { .sign = false };\n int shift;\n \n if (a == 0) {\n@@ -2913,7 +2913,7 @@ static FloatParts uint_to_float(uint64_t a, int scale, float_status *status)\n \n float16 uint64_to_float16_scalbn(uint64_t a, int scale, float_status *status)\n {\n- FloatParts pa = uint_to_float(a, scale, status);\n+ FloatParts64 pa = uint_to_float(a, scale, status);\n return float16_round_pack_canonical(pa, status);\n }\n \n@@ -2949,7 +2949,7 @@ float16 uint8_to_float16(uint8_t a, float_status *status)\n \n float32 uint64_to_float32_scalbn(uint64_t a, int scale, float_status *status)\n {\n- FloatParts pa = uint_to_float(a, scale, status);\n+ FloatParts64 pa = uint_to_float(a, scale, status);\n return float32_round_pack_canonical(pa, status);\n }\n \n@@ -2980,7 +2980,7 @@ float32 uint16_to_float32(uint16_t a, float_status *status)\n \n float64 uint64_to_float64_scalbn(uint64_t a, int scale, float_status *status)\n {\n- FloatParts pa = uint_to_float(a, scale, status);\n+ FloatParts64 pa = uint_to_float(a, scale, status);\n return float64_round_pack_canonical(pa, status);\n }\n \n@@ -3016,7 +3016,7 @@ float64 uint16_to_float64(uint16_t a, float_status *status)\n \n bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int scale, float_status *status)\n {\n- FloatParts pa = uint_to_float(a, scale, status);\n+ FloatParts64 pa = uint_to_float(a, scale, status);\n return bfloat16_round_pack_canonical(pa, status);\n }\n \n@@ -3061,7 +3061,7 @@ bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status)\n * minnummag() and maxnummag() functions correspond to minNumMag()\n * and minNumMag() from the IEEE-754 2008.\n */\n-static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin,\n+static FloatParts64 minmax_floats(FloatParts64 a, FloatParts64 b, bool ismin,\n bool ieee, bool ismag, float_status *s)\n {\n if (unlikely(is_nan(a.cls) || is_nan(b.cls))) {\n@@ -3136,9 +3136,9 @@ static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin,\n float ## sz float ## sz ## _ ## name(float ## sz a, float ## sz b, \\\n float_status *s) \\\n { \\\n- FloatParts pa = float ## sz ## _unpack_canonical(a, s); \\\n- FloatParts pb = float ## sz ## _unpack_canonical(b, s); \\\n- FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \\\n+ FloatParts64 pa = float ## sz ## _unpack_canonical(a, s); \\\n+ FloatParts64 pb = float ## sz ## _unpack_canonical(b, s); \\\n+ FloatParts64 pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \\\n \\\n return float ## sz ## _round_pack_canonical(pr, s); \\\n }\n@@ -3169,9 +3169,9 @@ MINMAX(64, maxnummag, false, true, true)\n #define BF16_MINMAX(name, ismin, isiee, ismag) \\\n bfloat16 bfloat16_ ## name(bfloat16 a, bfloat16 b, float_status *s) \\\n { \\\n- FloatParts pa = bfloat16_unpack_canonical(a, s); \\\n- FloatParts pb = bfloat16_unpack_canonical(b, s); \\\n- FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \\\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, s); \\\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, s); \\\n+ FloatParts64 pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \\\n \\\n return bfloat16_round_pack_canonical(pr, s); \\\n }\n@@ -3186,7 +3186,7 @@ BF16_MINMAX(maxnummag, false, true, true)\n #undef BF16_MINMAX\n \n /* Floating point compare */\n-static FloatRelation compare_floats(FloatParts a, FloatParts b, bool is_quiet,\n+static FloatRelation compare_floats(FloatParts64 a, FloatParts64 b, bool is_quiet,\n float_status *s)\n {\n if (is_nan(a.cls) || is_nan(b.cls)) {\n@@ -3247,8 +3247,8 @@ static FloatRelation compare_floats(FloatParts a, FloatParts b, bool is_quiet,\n static int attr \\\n name(float ## sz a, float ## sz b, bool is_quiet, float_status *s) \\\n { \\\n- FloatParts pa = float ## sz ## _unpack_canonical(a, s); \\\n- FloatParts pb = float ## sz ## _unpack_canonical(b, s); \\\n+ FloatParts64 pa = float ## sz ## _unpack_canonical(a, s); \\\n+ FloatParts64 pb = float ## sz ## _unpack_canonical(b, s); \\\n return compare_floats(pa, pb, is_quiet, s); \\\n }\n \n@@ -3349,8 +3349,8 @@ FloatRelation float64_compare_quiet(float64 a, float64 b, float_status *s)\n static FloatRelation QEMU_FLATTEN\n soft_bf16_compare(bfloat16 a, bfloat16 b, bool is_quiet, float_status *s)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, s);\n- FloatParts pb = bfloat16_unpack_canonical(b, s);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, s);\n+ FloatParts64 pb = bfloat16_unpack_canonical(b, s);\n return compare_floats(pa, pb, is_quiet, s);\n }\n \n@@ -3365,16 +3365,16 @@ FloatRelation bfloat16_compare_quiet(bfloat16 a, bfloat16 b, float_status *s)\n }\n \n /* Multiply A by 2 raised to the power N. */\n-static FloatParts scalbn_decomposed(FloatParts a, int n, float_status *s)\n+static FloatParts64 scalbn_decomposed(FloatParts64 a, int n, float_status *s)\n {\n if (unlikely(is_nan(a.cls))) {\n return return_nan(a, s);\n }\n if (a.cls == float_class_normal) {\n- /* The largest float type (even though not supported by FloatParts)\n+ /* The largest float type (even though not supported by FloatParts64)\n * is float128, which has a 15 bit exponent. Bounding N to 16 bits\n * still allows rounding to infinity, without allowing overflow\n- * within the int32_t that backs FloatParts.exp.\n+ * within the int32_t that backs FloatParts64.exp.\n */\n n = MIN(MAX(n, -0x10000), 0x10000);\n a.exp += n;\n@@ -3384,29 +3384,29 @@ static FloatParts scalbn_decomposed(FloatParts a, int n, float_status *s)\n \n float16 float16_scalbn(float16 a, int n, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pr = scalbn_decomposed(pa, n, status);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pr = scalbn_decomposed(pa, n, status);\n return float16_round_pack_canonical(pr, status);\n }\n \n float32 float32_scalbn(float32 a, int n, float_status *status)\n {\n- FloatParts pa = float32_unpack_canonical(a, status);\n- FloatParts pr = scalbn_decomposed(pa, n, status);\n+ FloatParts64 pa = float32_unpack_canonical(a, status);\n+ FloatParts64 pr = scalbn_decomposed(pa, n, status);\n return float32_round_pack_canonical(pr, status);\n }\n \n float64 float64_scalbn(float64 a, int n, float_status *status)\n {\n- FloatParts pa = float64_unpack_canonical(a, status);\n- FloatParts pr = scalbn_decomposed(pa, n, status);\n+ FloatParts64 pa = float64_unpack_canonical(a, status);\n+ FloatParts64 pr = scalbn_decomposed(pa, n, status);\n return float64_round_pack_canonical(pr, status);\n }\n \n bfloat16 bfloat16_scalbn(bfloat16 a, int n, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pr = scalbn_decomposed(pa, n, status);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pr = scalbn_decomposed(pa, n, status);\n return bfloat16_round_pack_canonical(pr, status);\n }\n \n@@ -3422,7 +3422,7 @@ bfloat16 bfloat16_scalbn(bfloat16 a, int n, float_status *status)\n * especially for 64 bit floats.\n */\n \n-static FloatParts sqrt_float(FloatParts a, float_status *s, const FloatFmt *p)\n+static FloatParts64 sqrt_float(FloatParts64 a, float_status *s, const FloatFmt *p)\n {\n uint64_t a_frac, r_frac, s_frac;\n int bit, last_bit;\n@@ -3482,24 +3482,24 @@ static FloatParts sqrt_float(FloatParts a, float_status *s, const FloatFmt *p)\n \n float16 QEMU_FLATTEN float16_sqrt(float16 a, float_status *status)\n {\n- FloatParts pa = float16_unpack_canonical(a, status);\n- FloatParts pr = sqrt_float(pa, status, &float16_params);\n+ FloatParts64 pa = float16_unpack_canonical(a, status);\n+ FloatParts64 pr = sqrt_float(pa, status, &float16_params);\n return float16_round_pack_canonical(pr, status);\n }\n \n static float32 QEMU_SOFTFLOAT_ATTR\n soft_f32_sqrt(float32 a, float_status *status)\n {\n- FloatParts pa = float32_unpack_canonical(a, status);\n- FloatParts pr = sqrt_float(pa, status, &float32_params);\n+ FloatParts64 pa = float32_unpack_canonical(a, status);\n+ FloatParts64 pr = sqrt_float(pa, status, &float32_params);\n return float32_round_pack_canonical(pr, status);\n }\n \n static float64 QEMU_SOFTFLOAT_ATTR\n soft_f64_sqrt(float64 a, float_status *status)\n {\n- FloatParts pa = float64_unpack_canonical(a, status);\n- FloatParts pr = sqrt_float(pa, status, &float64_params);\n+ FloatParts64 pa = float64_unpack_canonical(a, status);\n+ FloatParts64 pr = sqrt_float(pa, status, &float64_params);\n return float64_round_pack_canonical(pr, status);\n }\n \n@@ -3559,8 +3559,8 @@ float64 QEMU_FLATTEN float64_sqrt(float64 xa, float_status *s)\n \n bfloat16 QEMU_FLATTEN bfloat16_sqrt(bfloat16 a, float_status *status)\n {\n- FloatParts pa = bfloat16_unpack_canonical(a, status);\n- FloatParts pr = sqrt_float(pa, status, &bfloat16_params);\n+ FloatParts64 pa = bfloat16_unpack_canonical(a, status);\n+ FloatParts64 pr = sqrt_float(pa, status, &bfloat16_params);\n return bfloat16_round_pack_canonical(pr, status);\n }\n \n@@ -3570,28 +3570,28 @@ bfloat16 QEMU_FLATTEN bfloat16_sqrt(bfloat16 a, float_status *status)\n \n float16 float16_default_nan(float_status *status)\n {\n- FloatParts p = parts_default_nan(status);\n+ FloatParts64 p = parts_default_nan(status);\n p.frac >>= float16_params.frac_shift;\n return float16_pack_raw(p);\n }\n \n float32 float32_default_nan(float_status *status)\n {\n- FloatParts p = parts_default_nan(status);\n+ FloatParts64 p = parts_default_nan(status);\n p.frac >>= float32_params.frac_shift;\n return float32_pack_raw(p);\n }\n \n float64 float64_default_nan(float_status *status)\n {\n- FloatParts p = parts_default_nan(status);\n+ FloatParts64 p = parts_default_nan(status);\n p.frac >>= float64_params.frac_shift;\n return float64_pack_raw(p);\n }\n \n float128 float128_default_nan(float_status *status)\n {\n- FloatParts p = parts_default_nan(status);\n+ FloatParts64 p = parts_default_nan(status);\n float128 r;\n \n /* Extrapolate from the choices made by parts_default_nan to fill\n@@ -3608,7 +3608,7 @@ float128 float128_default_nan(float_status *status)\n \n bfloat16 bfloat16_default_nan(float_status *status)\n {\n- FloatParts p = parts_default_nan(status);\n+ FloatParts64 p = parts_default_nan(status);\n p.frac >>= bfloat16_params.frac_shift;\n return bfloat16_pack_raw(p);\n }\n@@ -3619,7 +3619,7 @@ bfloat16 bfloat16_default_nan(float_status *status)\n \n float16 float16_silence_nan(float16 a, float_status *status)\n {\n- FloatParts p = float16_unpack_raw(a);\n+ FloatParts64 p = float16_unpack_raw(a);\n p.frac <<= float16_params.frac_shift;\n p = parts_silence_nan(p, status);\n p.frac >>= float16_params.frac_shift;\n@@ -3628,7 +3628,7 @@ float16 float16_silence_nan(float16 a, float_status *status)\n \n float32 float32_silence_nan(float32 a, float_status *status)\n {\n- FloatParts p = float32_unpack_raw(a);\n+ FloatParts64 p = float32_unpack_raw(a);\n p.frac <<= float32_params.frac_shift;\n p = parts_silence_nan(p, status);\n p.frac >>= float32_params.frac_shift;\n@@ -3637,7 +3637,7 @@ float32 float32_silence_nan(float32 a, float_status *status)\n \n float64 float64_silence_nan(float64 a, float_status *status)\n {\n- FloatParts p = float64_unpack_raw(a);\n+ FloatParts64 p = float64_unpack_raw(a);\n p.frac <<= float64_params.frac_shift;\n p = parts_silence_nan(p, status);\n p.frac >>= float64_params.frac_shift;\n@@ -3646,7 +3646,7 @@ float64 float64_silence_nan(float64 a, float_status *status)\n \n bfloat16 bfloat16_silence_nan(bfloat16 a, float_status *status)\n {\n- FloatParts p = bfloat16_unpack_raw(a);\n+ FloatParts64 p = bfloat16_unpack_raw(a);\n p.frac <<= bfloat16_params.frac_shift;\n p = parts_silence_nan(p, status);\n p.frac >>= bfloat16_params.frac_shift;\n@@ -3658,7 +3658,7 @@ bfloat16 bfloat16_silence_nan(bfloat16 a, float_status *status)\n | input-denormal exception and return zero. Otherwise just return the value.\n *----------------------------------------------------------------------------*/\n \n-static bool parts_squash_denormal(FloatParts p, float_status *status)\n+static bool parts_squash_denormal(FloatParts64 p, float_status *status)\n {\n if (p.exp == 0 && p.frac != 0) {\n float_raise(float_flag_input_denormal, status);\n@@ -3671,7 +3671,7 @@ static bool parts_squash_denormal(FloatParts p, float_status *status)\n float16 float16_squash_input_denormal(float16 a, float_status *status)\n {\n if (status->flush_inputs_to_zero) {\n- FloatParts p = float16_unpack_raw(a);\n+ FloatParts64 p = float16_unpack_raw(a);\n if (parts_squash_denormal(p, status)) {\n return float16_set_sign(float16_zero, p.sign);\n }\n@@ -3682,7 +3682,7 @@ float16 float16_squash_input_denormal(float16 a, float_status *status)\n float32 float32_squash_input_denormal(float32 a, float_status *status)\n {\n if (status->flush_inputs_to_zero) {\n- FloatParts p = float32_unpack_raw(a);\n+ FloatParts64 p = float32_unpack_raw(a);\n if (parts_squash_denormal(p, status)) {\n return float32_set_sign(float32_zero, p.sign);\n }\n@@ -3693,7 +3693,7 @@ float32 float32_squash_input_denormal(float32 a, float_status *status)\n float64 float64_squash_input_denormal(float64 a, float_status *status)\n {\n if (status->flush_inputs_to_zero) {\n- FloatParts p = float64_unpack_raw(a);\n+ FloatParts64 p = float64_unpack_raw(a);\n if (parts_squash_denormal(p, status)) {\n return float64_set_sign(float64_zero, p.sign);\n }\n@@ -3704,7 +3704,7 @@ float64 float64_squash_input_denormal(float64 a, float_status *status)\n bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status)\n {\n if (status->flush_inputs_to_zero) {\n- FloatParts p = bfloat16_unpack_raw(a);\n+ FloatParts64 p = bfloat16_unpack_raw(a);\n if (parts_squash_denormal(p, status)) {\n return bfloat16_set_sign(bfloat16_zero, p.sign);\n }\ndiff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc\nindex 5988830c16..52fc76d800 100644\n--- a/fpu/softfloat-specialize.c.inc\n+++ b/fpu/softfloat-specialize.c.inc\n@@ -129,7 +129,7 @@ static bool parts_is_snan_frac(uint64_t frac, float_status *status)\n | The pattern for a default generated deconstructed floating-point NaN.\n *----------------------------------------------------------------------------*/\n \n-static FloatParts parts_default_nan(float_status *status)\n+static FloatParts64 parts_default_nan(float_status *status)\n {\n bool sign = 0;\n uint64_t frac;\n@@ -164,7 +164,7 @@ static FloatParts parts_default_nan(float_status *status)\n }\n #endif\n \n- return (FloatParts) {\n+ return (FloatParts64) {\n .cls = float_class_qnan,\n .sign = sign,\n .exp = INT_MAX,\n@@ -177,7 +177,7 @@ static FloatParts parts_default_nan(float_status *status)\n | floating-point parts.\n *----------------------------------------------------------------------------*/\n \n-static FloatParts parts_silence_nan(FloatParts a, float_status *status)\n+static FloatParts64 parts_silence_nan(FloatParts64 a, float_status *status)\n {\n g_assert(!no_signaling_nans(status));\n g_assert(!status->default_nan_mode);\n", "prefixes": [ "15/72" ] }