diff mbox series

Fix all the misspellings -- BZ 25337

Message ID 20230522025528.2222670-1-ppluzhnikov@google.com
State New
Headers show
Series Fix all the misspellings -- BZ 25337 | expand

Commit Message

Paul Pluzhnikov May 22, 2023, 2:55 a.m. UTC
On Sun, May 21, 2023 at 4:27 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:

> I thought you only intended to modify the SVML functions.

Here are all the fixes for sysdeps/x86_64/fpu/multiarch/

Thanks,

---
 sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S  | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S  | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S    | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S    | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S  | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S  | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S    | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S    | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S  | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S     | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S     | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S  | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S    | 6 +++---
 sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S    | 6 +++---
 sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S  | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S  | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S  | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S      | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S      | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S    | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S     | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S    | 2 +-
 .../x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S    | 2 +-
 .../x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S   | 2 +-
 .../x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S    | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S   | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S   | 8 ++++----
 .../x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S    | 2 +-
 .../x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S    | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S   | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S   | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S    | 2 +-
 .../x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S    | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S   | 2 +-
 .../x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S    | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S   | 6 +++---
 sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S   | 6 +++---
 .../x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S   | 2 +-
 .../x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S   | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S    | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S  | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S     | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S     | 8 ++++----
 sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S | 2 +-
 sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S    | 4 ++--
 sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S    | 2 +-
 112 files changed, 169 insertions(+), 169 deletions(-)

Comments

Noah Goldstein May 22, 2023, 3:22 a.m. UTC | #1
On Sun, May 21, 2023 at 9:57 PM Paul Pluzhnikov <ppluzhnikov@google.com> wrote:
>
> On Sun, May 21, 2023 at 4:27 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> > I thought you only intended to modify the SVML functions.
>
> Here are all the fixes for sysdeps/x86_64/fpu/multiarch/
>
> Thanks,

This looks good.
Can you make it a proper patch w/ commit message and etc? Likewise
for any other directory you want to do.
Just verify no change to obj files (put as much in the commit message
for testing) and I'll approve.
>
> ---
>  sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S  | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S  | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S    | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S    | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S  | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S  | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S    | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S    | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S  | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S     | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S     | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S  | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S    | 6 +++---
>  sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S    | 6 +++---
>  sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S  | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S  | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S  | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S      | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S      | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S    | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S     | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S    | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S    | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S   | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S    | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S   | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S   | 8 ++++----
>  .../x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S    | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S    | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S   | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S   | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S    | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S    | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S   | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S    | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S   | 6 +++---
>  sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S   | 6 +++---
>  .../x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S   | 2 +-
>  .../x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S   | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S    | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S  | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S     | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S     | 8 ++++----
>  sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S | 2 +-
>  sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S    | 4 ++--
>  sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S    | 2 +-
>  112 files changed, 169 insertions(+), 169 deletions(-)
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S
> index 840c3d6a17..a46ddc136e 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S
> @@ -222,7 +222,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S
> index 3c75200578..808ea2fe95 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S
> @@ -204,7 +204,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S
> index 0647a2e1f7..878d1454c6 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S
> @@ -226,7 +226,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S
> index 8a56813ff0..b69e5cef8b 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S
> @@ -321,7 +321,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S
> index f16f539fb6..825b231173 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S
> @@ -366,7 +366,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S
> index 1a3211bf43..32ed85e368 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S
> @@ -311,7 +311,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S
> index 9fb9ddcf3d..7bba3b5272 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S
> @@ -211,7 +211,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S
> index af6fa771c5..c7dbb727e3 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S
> @@ -196,7 +196,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S
> index 2a0f6d4378..c23665b9b2 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S
> @@ -218,7 +218,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S
> index a3630b1145..f4da4b2c32 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S
> @@ -474,7 +474,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S
> index d97a5f845f..3ecec43c66 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S
> @@ -423,7 +423,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S
> index b4d88848b5..82bd52407d 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S
> @@ -337,7 +337,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S
> index 7d14cb8cb4..39d86480e4 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S
> @@ -229,11 +229,11 @@ ENTRY(_ZGVbN2vv_atan2_sse4)
>         /*  Special branch for fast (vector) processing of zero arguments  */
>         testb   $3, %cl
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -316,7 +316,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -96)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -336,7 +336,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx rbp r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S
> index 35b635dac7..a4bcf9c375 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S
> @@ -170,11 +170,11 @@ ENTRY(_ZGVdN4vv_atan2_avx2)
>         /*  Special branch for fast (vector) processing of zero arguments  */
>         testl   %eax, %eax
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 edx xmm3 ymm0 ymm1 ymm2 ymm4 ymm5 ymm6 ymm7 ymm8
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -271,7 +271,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -291,7 +291,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S
> index 49662bc8c9..def7af38dc 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S
> @@ -188,11 +188,11 @@ ENTRY(_ZGVeN8vv_atan2_skx)
>         vmovups 64(%rsp), %zmm9
>         testl   %eax, %eax
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 edx ymm6 zmm0 zmm2 zmm3 zmm4 zmm5 zmm7 zmm9 zmm11
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -289,7 +289,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm11
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -309,7 +309,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S
> index 50345f026d..0a87c8cd81 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S
> @@ -367,7 +367,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S
> index 0e2f6cadae..44517bea88 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S
> @@ -333,7 +333,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S
> index 7ba45c0056..99141c1f39 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S
> @@ -268,7 +268,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S
> index aa90322722..98b276f2e2 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S
> @@ -241,7 +241,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm6
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S
> index d0de65fde8..45f395dccb 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S
> @@ -256,7 +256,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S
> index c2a13245a8..dd89de036f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S
> @@ -260,7 +260,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S
> index c152307a25..8330968063 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S
> @@ -276,7 +276,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S
> index b4b2284a16..3e2aa620b2 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S
> @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S
> index 5934986b52..a5f2f11508 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S
> @@ -251,7 +251,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S
> index 2948e6b3c3..376be17b34 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S
> @@ -252,7 +252,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S
> index 5c92653e20..debba0c365 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S
> @@ -255,7 +255,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S
> index 65abd70168..db25e5b14d 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S
> @@ -68,7 +68,7 @@ ENTRY(_ZGVbN2v_exp10_sse4)
>         /*  R  */
>         movaps  %xmm0, %xmm12
>
> -       /*  Load arument  */
> +       /*  Load argument  */
>         movups  _dbLg2_10+__svml_dexp10_data_internal(%rip), %xmm13
>         lea     __svml_dexp10_data_internal(%rip), %rsi
>         mulpd   %xmm0, %xmm13
> @@ -214,7 +214,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S
> index 1c7c8e2db8..c5cec289a7 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S
> @@ -73,7 +73,7 @@ ENTRY(_ZGVdN4v_exp10_avx2)
>         vmovapd %ymm0, %ymm2
>         vmovupd _dbShifter+__svml_dexp10_data_internal(%rip), %ymm3
>
> -       /*  Load arument  */
> +       /*  Load argument  */
>         vmovupd _dbLg2_10+__svml_dexp10_data_internal(%rip), %ymm0
>         vfmadd213pd %ymm3, %ymm2, %ymm0
>         vsubpd  %ymm3, %ymm0, %ymm1
> @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S
> index 2f45c9292d..9ea6a3d204 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S
> @@ -23,7 +23,7 @@
>   *    - all arguments processed in the main path
>   *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
>   *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
> - *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
> + *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
>   *        - SAE used to avoid spurious flag settings
>   *
>   */
> @@ -185,7 +185,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S
> index 0ffb56d9d4..4c24aa8a2e 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S
> @@ -67,7 +67,7 @@ ENTRY(_ZGVbN2v_exp2_sse4)
>         /* out, basePtr, iIndex, iBaseOfs, iSize, iGran, iOfs */
>         lea     __svml_dexp2_data_internal(%rip), %rsi
>
> -       /*  Load arument  */
> +       /*  Load argument  */
>         movaps  %xmm1, %xmm10
>         addpd   %xmm0, %xmm10
>         movaps  %xmm10, %xmm6
> @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S
> index 9337921c63..1e55f3db85 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S
> @@ -71,7 +71,7 @@ ENTRY(_ZGVdN4v_exp2_avx2)
>         vmovupd _lIndexMask+__svml_dexp2_data_internal(%rip), %ymm3
>         vmovapd %ymm0, %ymm1
>
> -       /*  Load arument  */
> +       /*  Load argument  */
>         vaddpd  %ymm4, %ymm1, %ymm2
>         vsubpd  %ymm4, %ymm2, %ymm0
>
> @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S
> index ab3db00910..7e759c445f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S
> @@ -221,7 +221,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S
> index 7e1df110e4..05be9079f5 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S
> @@ -206,7 +206,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm6
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S
> index 815ef34935..ad0b49978c 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S
> @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S
> index f38c694eb1..968801ab00 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S
> @@ -24,7 +24,7 @@
>   *    - all arguments processed in the main path
>   *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
>   *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
> - *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
> + *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
>   *
>   *
>   */
> @@ -205,7 +205,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S
> index 136f5ebd8d..07c3156cf7 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S
> @@ -47,7 +47,7 @@
>   *    No multiprecision branch for _LA_ and _EP_
>   *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
>   *
> - *    Check _z exponent to be withing borders [3BC ; 441] else goto Callout
> + *    Check _z exponent to be within borders [3BC ; 441] else goto Callout
>   *
>   *    _s  ~ 1.0/sqrt(_z)
>   *    _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O)
> @@ -127,7 +127,7 @@ ENTRY(_ZGVbN2vv_hypot_sse4)
>         mulpd   %xmm10, %xmm11
>         mulpd   %xmm10, %xmm2
>
> -       /* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */
> +       /* Check _z exponent to be within borders [3BC ; 441] else goto Callout */
>         movq    _LowBoundary+__svml_dhypot_data_internal(%rip), %xmm5
>         movq    _HighBoundary+__svml_dhypot_data_internal(%rip), %xmm3
>         pshufd  $221, %xmm10, %xmm4
> @@ -215,7 +215,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -96)
>         # LOE rbx rbp r12 r13 r14 r15 xmm2
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S
> index 61d12c9795..d8c6a3ac43 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S
> @@ -47,7 +47,7 @@
>   *    No multiprecision branch for _LA_ and _EP_
>   *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
>   *
> - *    Check _z exponent to be withing borders [3BC ; 441] else goto Callout
> + *    Check _z exponent to be within borders [3BC ; 441] else goto Callout
>   *
>   *    _s  ~ 1.0/sqrt(_z)
>   *    _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O)
> @@ -111,7 +111,7 @@ ENTRY(_ZGVdN4vv_hypot_avx2)
>          */
>         vcvtpd2ps %ymm0, %xmm12
>
> -       /* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */
> +       /* Check _z exponent to be within borders [3BC ; 441] else goto Callout */
>         vextractf128 $1, %ymm0, %xmm3
>         vrsqrtps %xmm12, %xmm13
>         vshufps $221, %xmm3, %xmm0, %xmm5
> @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S
> index fb53d5dbd7..24ab764b7a 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S
> @@ -47,7 +47,7 @@
>   *    No multiprecision branch for _LA_ and _EP_
>   *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
>   *
> - *    Check _z exponent to be withing borders [3BC ; 441] else goto Callout
> + *    Check _z exponent to be within borders [3BC ; 441] else goto Callout
>   *
>   *    _s  ~ 1.0/sqrt(_z)
>   *    _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O)
> @@ -188,7 +188,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm2
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S
> index b2e75c1f23..de1583b394 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S
> @@ -227,7 +227,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm3
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S
> index 2e6ebac0ee..8a9b8a84fb 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S
> @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S
> index b7593067c0..b4e5a9ccea 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S
> @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S
> index d0372e82c6..618b7e1e09 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S
> @@ -265,7 +265,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S
> index d114653c71..dc2ccb3255 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S
> @@ -257,7 +257,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S
> index 283c40b689..f5ec27ddb1 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S
> @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S
> index 93bf27092d..29465643cd 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S
> @@ -225,7 +225,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm3
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S
> index 83d8d4c462..30fa3e4473 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S
> @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
> index bc9db384e9..351e00d182 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
> @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
> index 03a703f5f1..3b01840d73 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
> @@ -260,7 +260,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
> index 26075187ae..585e2e51bf 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
> @@ -274,7 +274,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
> index ce08de9dd0..8158d1455f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
> @@ -265,7 +265,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S
> index 9fac5fa4bc..9c208765af 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S
> @@ -181,11 +181,11 @@ ENTRY(_ZGVbN2v_tan_sse4)
>         movmskpd %xmm4, %edx
>         testl   %edx, %edx
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1 xmm4 xmm5
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -264,7 +264,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -283,7 +283,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx rbp r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S
> index 8586565ddb..82d2ceff9a 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S
> @@ -166,11 +166,11 @@ ENTRY(_ZGVdN4v_tan_avx2)
>         vxorpd  %ymm0, %ymm8, %ymm0
>         testl   %eax, %eax
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 r9d ymm0 ymm1 ymm14 ymm15
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -261,7 +261,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -280,7 +280,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S
> index 79deb21b2a..c5738cef99 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S
> @@ -96,11 +96,11 @@ ENTRY(_ZGVeN8v_tan_skx)
>         vfnmadd231pd {rn-sae}, %zmm8, %zmm3, %zmm5
>         vfnmadd213pd {rn-sae}, %zmm5, %zmm4, %zmm8
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm8 zmm11 k1
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -233,7 +233,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -252,7 +252,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S
> index 6fef5f0856..cbcb0d6a43 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S
> @@ -259,7 +259,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S
> index c05f4c2079..cf0182bf8a 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S
> @@ -266,7 +266,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S
> index 70f0880049..b3477a346b 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S
> @@ -280,7 +280,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xfe, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S
> index 1c68130a87..5bdc356429 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S
> @@ -199,7 +199,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S
> index 372beff631..ac099d38c5 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S
> @@ -198,7 +198,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm7
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S
> index 9e2f3b0dfe..76296d91c0 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S
> @@ -192,7 +192,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S
> index 9ba81506ca..ff7063499f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S
> @@ -284,7 +284,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S
> index 6c3cbf0c3b..6a213dc2e4 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S
> @@ -299,7 +299,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm9
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S
> index 45aede28ea..17f6a19b3d 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S
> @@ -280,7 +280,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S
> index daa5cfa91e..2ffe24e1ff 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S
> @@ -192,7 +192,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S
> index 0718fa09b4..bc3e2f8340 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S
> @@ -184,7 +184,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S
> index 2199ed35d1..41e015c490 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S
> @@ -181,7 +181,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S
> index 720b58f956..592caa85da 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S
> @@ -307,7 +307,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S
> index c78550ec22..e5996b3346 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S
> @@ -403,7 +403,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S
> index f9aeea6c85..1e8fc22910 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S
> @@ -355,7 +355,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S
> index e031dadf19..08c193e273 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S
> @@ -150,11 +150,11 @@ ENTRY(_ZGVeN16vv_atan2f_skx)
>         vaddps  {rn-sae}, %zmm11, %zmm9, %zmm9{%k4}
>         vorps   %zmm6, %zmm9, %zmm10
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm10 zmm11
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -251,7 +251,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm10
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -271,7 +271,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S
> index 60426108b1..0ec9b19590 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S
> @@ -157,11 +157,11 @@ ENTRY(_ZGVbN4vv_atan2f_sse4)
>         /*  Special branch for fast (vector) processing of zero arguments  */
>         testl   %ecx, %ecx
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -244,7 +244,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -96)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -264,7 +264,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx rbp r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S
> index bf632c8a99..69619cb4d8 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S
> @@ -131,11 +131,11 @@ ENTRY(_ZGVdN8vv_atan2f_avx2)
>         /*  Special branch for fast (vector) processing of zero arguments  */
>         testl   %eax, %eax
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 edx ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm9 ymm10 ymm12 ymm13
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -232,7 +232,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm9
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -252,7 +252,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S
> index f733c7a1b5..6c3d40d676 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S
> @@ -221,7 +221,7 @@ L(SPECIAL_VALUES_LOOP):
>         xorl    %ebp, %ebp
>         tzcntl  %ebx, %ebp
>
> -       /* Scalar math fucntion call to process special input.  */
> +       /* Scalar math function call to process special input.  */
>         vmovss  64(%rsp, %rbp, 4), %xmm0
>         call    atanhf@PLT
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S
> index 055484bfb2..ab2ef46b9d 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S
> @@ -242,7 +242,7 @@ L(SPECIAL_VALUES_LOOP):
>         xorl    %ebp, %ebp
>         bsfl    %ebx, %ebp
>
> -       /* Scalar math fucntion call to process special input.  */
> +       /* Scalar math function call to process special input.  */
>         movss   40(%rsp, %rbp, 4), %xmm0
>         call    atanhf@PLT
>         /* No good way to avoid the store-forwarding fault this will cause on
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S
> index 8ffe98cfe1..e70085b051 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S
> @@ -230,7 +230,7 @@ L(SPECIAL_VALUES_LOOP):
>         xorl    %ebp, %ebp
>         tzcntl  %ebx, %ebp
>
> -       /* Scalar math fucntion call to process special input.  */
> +       /* Scalar math function call to process special input.  */
>         vmovss  32(%rsp, %rbp, 4), %xmm0
>         call    atanhf@PLT
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S
> index f5331db13b..270e620d61 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S
> @@ -273,7 +273,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm12
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S
> index 76db762fe8..292eb5a93f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S
> @@ -298,7 +298,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S
> index 14696eeff4..773594d4e0 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S
> @@ -222,7 +222,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm6
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S
> index 654ac65916..ee987dd10f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S
> @@ -233,7 +233,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm2
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S
> index 474cb05473..24692722eb 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S
> @@ -236,7 +236,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S
> index 03b7e4adc1..3d19dbd58a 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S
> @@ -212,7 +212,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S
> index 02aa2b4f76..e7cae80579 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S
> @@ -219,7 +219,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S
> index c3e8e399db..958b46dbfe 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S
> @@ -237,7 +237,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S
> index e70e8c52ca..f2d8130ee4 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S
> @@ -23,7 +23,7 @@
>   *    - all arguments processed in the main path
>   *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
>   *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
> - *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
> + *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
>   *        - SAE used to avoid spurious flag settings
>   *
>   */
> @@ -180,7 +180,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S
> index 9de39a62c2..9eb215a40f 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S
> @@ -63,7 +63,7 @@ ENTRY(_ZGVbN4v_exp10f_sse4)
>         cfi_def_cfa_offset(80)
>         movaps  %xmm0, %xmm4
>
> -       /*  Load arument  */
> +       /*  Load argument  */
>         movups  _sLg2_10+__svml_sexp10_data_internal(%rip), %xmm2
>         lea     __svml_sexp10_data_internal(%rip), %r8
>         mulps   %xmm4, %xmm2
> @@ -212,7 +212,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S
> index e3087a75dc..79563cc353 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S
> @@ -69,7 +69,7 @@ ENTRY(_ZGVdN8v_exp10f_avx2)
>         lea     __svml_sexp10_data_internal(%rip), %rax
>         vmovups _sShifter+__svml_sexp10_data_internal(%rip), %ymm4
>
> -       /*  Load arument  */
> +       /*  Load argument  */
>         vmovups _sLg2_10+__svml_sexp10_data_internal(%rip), %ymm1
>         vmovups _iIndexMask+__svml_sexp10_data_internal(%rip), %ymm2
>         vmovaps %ymm0, %ymm3
> @@ -232,7 +232,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S
> index 1911c06bcf..ce983b297b 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S
> @@ -203,7 +203,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S
> index f4ddfbe932..512ea5c5bf 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S
> @@ -175,7 +175,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S
> index 277508b8ef..47592985c1 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S
> @@ -182,7 +182,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S
> index 7aa1e3c417..4683e546de 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S
> @@ -24,7 +24,7 @@
>   *    - all arguments processed in the main path
>   *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
>   *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
> - *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
> + *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
>   *
>   *
>   */
> @@ -188,7 +188,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S
> index 6a3a9d266c..5159b0785a 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S
> @@ -207,7 +207,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 xmm10
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S
> index ee442d8c4a..aae9068cc9 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S
> @@ -206,7 +206,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S
> index 06c6903df2..749deb0833 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S
> @@ -45,7 +45,7 @@
>   *    No multiprecision branch for _LA_ and _EP_
>   *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
>   *
> - *    Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout
> + *    Check _z exponent to be within borders [1E3 ; 60A] else goto Callout
>   *
>   *    Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z),
>   *      that multiplied by _z, is final result for _EP_ version.
> @@ -196,7 +196,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm2
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S
> index c5a94d7b5b..38ab12b1e2 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S
> @@ -45,7 +45,7 @@
>   *    No multiprecision branch for _LA_ and _EP_
>   *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
>   *
> - *    Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout
> + *    Check _z exponent to be within borders [1E3 ; 60A] else goto Callout
>   *
>   *    Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z),
>   *      that multiplied by _z, is final result for _EP_ version.
> @@ -117,7 +117,7 @@ ENTRY(_ZGVbN4vv_hypotf_sse4)
>         movaps  %xmm2, %xmm6
>         mulps   %xmm10, %xmm6
>
> -       /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */
> +       /* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */
>         movdqu  _LowBoundary+__svml_shypot_data_internal(%rip), %xmm4
>         subps   %xmm6, %xmm5
>
> @@ -216,7 +216,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -96)
>         # LOE rbx rbp r12 r13 r14 r15 xmm2
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S
> index fe87678ae6..80f1081201 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S
> @@ -45,7 +45,7 @@
>   *    No multiprecision branch for _LA_ and _EP_
>   *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
>   *
> - *    Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout
> + *    Check _z exponent to be within borders [1E3 ; 60A] else goto Callout
>   *
>   *    Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z),
>   *      that multiplied by _z, is final result for _EP_ version.
> @@ -107,7 +107,7 @@ ENTRY(_ZGVdN8vv_hypotf_avx2)
>          */
>         vmovups _sHalf+__svml_shypot_data_internal(%rip), %ymm7
>
> -       /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */
> +       /* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */
>         vmovups _LowBoundary+__svml_shypot_data_internal(%rip), %ymm2
>         vfmadd231ps %ymm1, %ymm1, %ymm8
>
> @@ -220,7 +220,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm2
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S
> index 87a1694a6f..0deb96997a 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S
> @@ -155,7 +155,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S
> index 80ded85293..6baff562f5 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S
> @@ -168,7 +168,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S
> index 480495037f..54ff0b1e4d 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S
> @@ -168,7 +168,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S
> index d629dc44f3..e4f8a603ff 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S
> @@ -201,7 +201,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
> index 511e064a3d..4a10457eb8 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
> @@ -182,7 +182,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S
> index ea39f66d22..672c91e07e 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S
> @@ -184,7 +184,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S
> index c14fd3d918..04288956c4 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S
> @@ -152,7 +152,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S
> index f4aa9481ca..93ed64254e 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S
> @@ -160,7 +160,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S
> index d2441c3581..02360e57ee 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S
> @@ -163,7 +163,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm1
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
> index dda1a0531b..03e7f345b0 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
> @@ -246,7 +246,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
> index 34ec276ac0..59d6329126 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
> @@ -236,7 +236,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -80)
>         # LOE rbx rbp r12 r13 r14 r15 xmm14
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
> index abf8d658ab..81e1f19e26 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
> @@ -237,7 +237,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S
> index 3d4dba3fab..ae95fbae91 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S
> @@ -94,11 +94,11 @@ ENTRY(_ZGVeN16v_tanf_skx)
>         vfnmadd231ps {rn-sae}, %zmm5, %zmm2, %zmm4
>         vfnmadd213ps {rn-sae}, %zmm4, %zmm3, %zmm5
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx r12 r13 r14 r15 edx zmm0 zmm5 zmm10 zmm11 k6
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -229,7 +229,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE rbx r12 r13 r14 r15 zmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -248,7 +248,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S
> index 1292e88cf9..fab86645b6 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S
> @@ -175,11 +175,11 @@ ENTRY(_ZGVbN4v_tanf_sse4)
>
>         testl   %edx, %edx
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         # LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm4 xmm11 xmm12 xmm13
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -258,7 +258,7 @@ L(SPECIAL_VALUES_LOOP):
>         cfi_offset(14, -240)
>         # LOE rbx rbp r12 r13 r14 r15 xmm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -277,7 +277,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(14)
>         # LOE rbx rbp r15 r12d r13d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S
> index ab52321220..30585a77b4 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S
> @@ -161,13 +161,13 @@ ENTRY(_ZGVdN8v_tanf_avx2)
>
>         testl   %edx, %edx
>
> -       /* Go to auxilary branch */
> +       /* Go to auxiliary branch */
>         jne     L(AUX_BRANCH)
>         /*  DW_CFA_expression: r3 (rbx) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -8; DW_OP_plus)  */
>         .cfi_escape 0x10, 0x03, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xf8, 0xff, 0xff, 0xff, 0x22
>         # LOE r12 r13 r14 r15 eax ymm0 ymm1 ymm10 ymm11 ymm12
>
> -       /* Return from auxilary branch
> +       /* Return from auxiliary branch
>          * for out of main path inputs
>          */
>
> @@ -255,7 +255,7 @@ L(SPECIAL_VALUES_LOOP):
>         .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
>         # LOE r12 r13 r14 r15 ymm0
>
> -       /* Scalar math fucntion call
> +       /* Scalar math function call
>          * to process special input
>          */
>
> @@ -273,7 +273,7 @@ L(SCALAR_MATH_CALL):
>         cfi_restore(13)
>         # LOE r14 r15 ebx r12d
>
> -       /* Auxilary branch
> +       /* Auxiliary branch
>          * for out of main path inputs
>          */
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S
> index d72a88924c..e639c48524 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S
> @@ -220,7 +220,7 @@ L(SPECIAL_VALUES_LOOP):
>         xorl    %ebp, %ebp
>         tzcntl  %ebx, %ebp
>
> -       /* Scalar math fucntion call to process special input.  */
> +       /* Scalar math function call to process special input.  */
>         vmovss  64(%rsp, %rbp, 4), %xmm0
>         call    tanhf@PLT
>
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S
> index dcbb1886d0..357ad375b3 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S
> @@ -73,7 +73,7 @@
>
>  #include <sysdep.h>
>
> -/* tanhf data tables for avx2 and sse4 implementatins defined here.
> +/* tanhf data tables for avx2 and sse4 implementations defined here.
>   */
>  #define ONLY_DECL_OFFSET
>  #include "svml_s_tanhf_rodata.S"
> @@ -217,7 +217,7 @@ L(SPECIAL_VALUES_LOOP):
>         xorl    %ebp, %ebp
>         bsfl    %ebx, %ebp
>
> -       /* Scalar math fucntion call to process special input.  */
> +       /* Scalar math function call to process special input.  */
>         movss   40(%rsp, %rbp, 4), %xmm0
>         call    tanhf@PLT
>         /* No good way to avoid the store-forwarding fault this will cause on
> diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S
> index b8d828e081..ea19903d9d 100644
> --- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S
> +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S
> @@ -72,7 +72,7 @@
>
>  #include <sysdep.h>
>
> -/* tanhf data tables for avx2 and sse4 implementatins defined here.
> +/* tanhf data tables for avx2 and sse4 implementations defined here.
>   */
>  #include "svml_s_tanhf_rodata.S"
>
> --
> 2.40.1.698.g37aff9b760-goog
>
diff mbox series

Patch

diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S
index 840c3d6a17..a46ddc136e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos2_core_sse4.S
@@ -222,7 +222,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S
index 3c75200578..808ea2fe95 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos4_core_avx2.S
@@ -204,7 +204,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S
index 0647a2e1f7..878d1454c6 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acos8_core_avx512.S
@@ -226,7 +226,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S
index 8a56813ff0..b69e5cef8b 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh2_core_sse4.S
@@ -321,7 +321,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S
index f16f539fb6..825b231173 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh4_core_avx2.S
@@ -366,7 +366,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S
index 1a3211bf43..32ed85e368 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_acosh8_core_avx512.S
@@ -311,7 +311,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S
index 9fb9ddcf3d..7bba3b5272 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin2_core_sse4.S
@@ -211,7 +211,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S
index af6fa771c5..c7dbb727e3 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin4_core_avx2.S
@@ -196,7 +196,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S
index 2a0f6d4378..c23665b9b2 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asin8_core_avx512.S
@@ -218,7 +218,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S
index a3630b1145..f4da4b2c32 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh2_core_sse4.S
@@ -474,7 +474,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S
index d97a5f845f..3ecec43c66 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh4_core_avx2.S
@@ -423,7 +423,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S
index b4d88848b5..82bd52407d 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S
@@ -337,7 +337,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S
index 7d14cb8cb4..39d86480e4 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan22_core_sse4.S
@@ -229,11 +229,11 @@  ENTRY(_ZGVbN2vv_atan2_sse4)
 	/*  Special branch for fast (vector) processing of zero arguments  */
 	testb	$3, %cl
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -316,7 +316,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -96)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -336,7 +336,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx rbp r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S
index 35b635dac7..a4bcf9c375 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan24_core_avx2.S
@@ -170,11 +170,11 @@  ENTRY(_ZGVdN4vv_atan2_avx2)
 	/*  Special branch for fast (vector) processing of zero arguments  */
 	testl	%eax, %eax
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 edx xmm3 ymm0 ymm1 ymm2 ymm4 ymm5 ymm6 ymm7 ymm8
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -271,7 +271,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -291,7 +291,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S
index 49662bc8c9..def7af38dc 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atan28_core_avx512.S
@@ -188,11 +188,11 @@  ENTRY(_ZGVeN8vv_atan2_skx)
 	vmovups	64(%rsp), %zmm9
 	testl	%eax, %eax
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 edx ymm6 zmm0 zmm2 zmm3 zmm4 zmm5 zmm7 zmm9 zmm11
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -289,7 +289,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm11
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -309,7 +309,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S
index 50345f026d..0a87c8cd81 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh2_core_sse4.S
@@ -367,7 +367,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S
index 0e2f6cadae..44517bea88 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh4_core_avx2.S
@@ -333,7 +333,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S
index 7ba45c0056..99141c1f39 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_atanh8_core_avx512.S
@@ -268,7 +268,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S
index aa90322722..98b276f2e2 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt2_core_sse4.S
@@ -241,7 +241,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm6
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S
index d0de65fde8..45f395dccb 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cbrt4_core_avx2.S
@@ -256,7 +256,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S
index c2a13245a8..dd89de036f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh2_core_sse4.S
@@ -260,7 +260,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S
index c152307a25..8330968063 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh4_core_avx2.S
@@ -276,7 +276,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S
index b4b2284a16..3e2aa620b2 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cosh8_core_avx512.S
@@ -225,7 +225,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S
index 5934986b52..a5f2f11508 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc2_core_sse4.S
@@ -251,7 +251,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S
index 2948e6b3c3..376be17b34 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc4_core_avx2.S
@@ -252,7 +252,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S
index 5c92653e20..debba0c365 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_erfc8_core_avx512.S
@@ -255,7 +255,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S
index 65abd70168..db25e5b14d 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp102_core_sse4.S
@@ -68,7 +68,7 @@  ENTRY(_ZGVbN2v_exp10_sse4)
 	/*  R  */
 	movaps	%xmm0, %xmm12
 
-	/*  Load arument  */
+	/*  Load argument  */
 	movups	_dbLg2_10+__svml_dexp10_data_internal(%rip), %xmm13
 	lea	__svml_dexp10_data_internal(%rip), %rsi
 	mulpd	%xmm0, %xmm13
@@ -214,7 +214,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S
index 1c7c8e2db8..c5cec289a7 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp104_core_avx2.S
@@ -73,7 +73,7 @@  ENTRY(_ZGVdN4v_exp10_avx2)
 	vmovapd	%ymm0, %ymm2
 	vmovupd	_dbShifter+__svml_dexp10_data_internal(%rip), %ymm3
 
-	/*  Load arument  */
+	/*  Load argument  */
 	vmovupd	_dbLg2_10+__svml_dexp10_data_internal(%rip), %ymm0
 	vfmadd213pd %ymm3, %ymm2, %ymm0
 	vsubpd	%ymm3, %ymm0, %ymm1
@@ -225,7 +225,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S
index 2f45c9292d..9ea6a3d204 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp108_core_avx512.S
@@ -23,7 +23,7 @@ 
  *    - all arguments processed in the main path
  *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
  *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
- *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
+ *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
  *        - SAE used to avoid spurious flag settings
  *
  */
@@ -185,7 +185,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S
index 0ffb56d9d4..4c24aa8a2e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp22_core_sse4.S
@@ -67,7 +67,7 @@  ENTRY(_ZGVbN2v_exp2_sse4)
 	/* out, basePtr, iIndex, iBaseOfs, iSize, iGran, iOfs */
 	lea	__svml_dexp2_data_internal(%rip), %rsi
 
-	/*  Load arument  */
+	/*  Load argument  */
 	movaps	%xmm1, %xmm10
 	addpd	%xmm0, %xmm10
 	movaps	%xmm10, %xmm6
@@ -201,7 +201,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S
index 9337921c63..1e55f3db85 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp24_core_avx2.S
@@ -71,7 +71,7 @@  ENTRY(_ZGVdN4v_exp2_avx2)
 	vmovupd	_lIndexMask+__svml_dexp2_data_internal(%rip), %ymm3
 	vmovapd	%ymm0, %ymm1
 
-	/*  Load arument  */
+	/*  Load argument  */
 	vaddpd	%ymm4, %ymm1, %ymm2
 	vsubpd	%ymm4, %ymm2, %ymm0
 
@@ -217,7 +217,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S
index ab3db00910..7e759c445f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S
@@ -221,7 +221,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S
index 7e1df110e4..05be9079f5 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm12_core_sse4.S
@@ -206,7 +206,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm6
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S
index 815ef34935..ad0b49978c 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm14_core_avx2.S
@@ -199,7 +199,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S
index f38c694eb1..968801ab00 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_expm18_core_avx512.S
@@ -24,7 +24,7 @@ 
  *    - all arguments processed in the main path
  *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
  *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
- *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
+ *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
  *
  *
  */
@@ -205,7 +205,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S
index 136f5ebd8d..07c3156cf7 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot2_core_sse4.S
@@ -47,7 +47,7 @@ 
  *    No multiprecision branch for _LA_ and _EP_
  *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
  *
- *    Check _z exponent to be withing borders [3BC ; 441] else goto Callout
+ *    Check _z exponent to be within borders [3BC ; 441] else goto Callout
  *
  *    _s  ~ 1.0/sqrt(_z)
  *    _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O)
@@ -127,7 +127,7 @@  ENTRY(_ZGVbN2vv_hypot_sse4)
 	mulpd	%xmm10, %xmm11
 	mulpd	%xmm10, %xmm2
 
-	/* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */
+	/* Check _z exponent to be within borders [3BC ; 441] else goto Callout */
 	movq	_LowBoundary+__svml_dhypot_data_internal(%rip), %xmm5
 	movq	_HighBoundary+__svml_dhypot_data_internal(%rip), %xmm3
 	pshufd	$221, %xmm10, %xmm4
@@ -215,7 +215,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -96)
 	# LOE rbx rbp r12 r13 r14 r15 xmm2
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S
index 61d12c9795..d8c6a3ac43 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot4_core_avx2.S
@@ -47,7 +47,7 @@ 
  *    No multiprecision branch for _LA_ and _EP_
  *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
  *
- *    Check _z exponent to be withing borders [3BC ; 441] else goto Callout
+ *    Check _z exponent to be within borders [3BC ; 441] else goto Callout
  *
  *    _s  ~ 1.0/sqrt(_z)
  *    _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O)
@@ -111,7 +111,7 @@  ENTRY(_ZGVdN4vv_hypot_avx2)
 	 */
 	vcvtpd2ps %ymm0, %xmm12
 
-	/* Check _z exponent to be withing borders [3BC ; 441] else goto Callout */
+	/* Check _z exponent to be within borders [3BC ; 441] else goto Callout */
 	vextractf128 $1, %ymm0, %xmm3
 	vrsqrtps %xmm12, %xmm13
 	vshufps	$221, %xmm3, %xmm0, %xmm5
@@ -225,7 +225,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S
index fb53d5dbd7..24ab764b7a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_hypot8_core_avx512.S
@@ -47,7 +47,7 @@ 
  *    No multiprecision branch for _LA_ and _EP_
  *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
  *
- *    Check _z exponent to be withing borders [3BC ; 441] else goto Callout
+ *    Check _z exponent to be within borders [3BC ; 441] else goto Callout
  *
  *    _s  ~ 1.0/sqrt(_z)
  *    _s2 ~ 1.0/(sqrt(_z)*sqrt(_z)) ~ 1.0/_z = (1.0/_z + O)
@@ -188,7 +188,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm2
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S
index b2e75c1f23..de1583b394 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log102_core_sse4.S
@@ -227,7 +227,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm3
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S
index 2e6ebac0ee..8a9b8a84fb 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log104_core_avx2.S
@@ -219,7 +219,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S
index b7593067c0..b4e5a9ccea 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log108_core_avx512.S
@@ -201,7 +201,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S
index d0372e82c6..618b7e1e09 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p2_core_sse4.S
@@ -265,7 +265,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S
index d114653c71..dc2ccb3255 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p4_core_avx2.S
@@ -257,7 +257,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S
index 283c40b689..f5ec27ddb1 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log1p8_core_avx512.S
@@ -219,7 +219,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S
index 93bf27092d..29465643cd 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log22_core_sse4.S
@@ -225,7 +225,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm3
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S
index 83d8d4c462..30fa3e4473 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log24_core_avx2.S
@@ -217,7 +217,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
index bc9db384e9..351e00d182 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log28_core_avx512.S
@@ -199,7 +199,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
index 03a703f5f1..3b01840d73 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
@@ -260,7 +260,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
index 26075187ae..585e2e51bf 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
@@ -274,7 +274,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
index ce08de9dd0..8158d1455f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
@@ -265,7 +265,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S
index 9fac5fa4bc..9c208765af 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan2_core_sse4.S
@@ -181,11 +181,11 @@  ENTRY(_ZGVbN2v_tan_sse4)
 	movmskpd %xmm4, %edx
 	testl	%edx, %edx
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm1 xmm4 xmm5
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -264,7 +264,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -283,7 +283,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx rbp r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S
index 8586565ddb..82d2ceff9a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan4_core_avx2.S
@@ -166,11 +166,11 @@  ENTRY(_ZGVdN4v_tan_avx2)
 	vxorpd	%ymm0, %ymm8, %ymm0
 	testl	%eax, %eax
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 r9d ymm0 ymm1 ymm14 ymm15
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -261,7 +261,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -280,7 +280,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S
index 79deb21b2a..c5738cef99 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tan8_core_avx512.S
@@ -96,11 +96,11 @@  ENTRY(_ZGVeN8v_tan_skx)
 	vfnmadd231pd {rn-sae}, %zmm8, %zmm3, %zmm5
 	vfnmadd213pd {rn-sae}, %zmm5, %zmm4, %zmm8
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm8 zmm11 k1
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -233,7 +233,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -252,7 +252,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S
index 6fef5f0856..cbcb0d6a43 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh2_core_sse4.S
@@ -259,7 +259,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S
index c05f4c2079..cf0182bf8a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh4_core_avx2.S
@@ -266,7 +266,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S
index 70f0880049..b3477a346b 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_tanh8_core_avx512.S
@@ -280,7 +280,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xfe, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S
index 1c68130a87..5bdc356429 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf16_core_avx512.S
@@ -199,7 +199,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S
index 372beff631..ac099d38c5 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf4_core_sse4.S
@@ -198,7 +198,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm7
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S
index 9e2f3b0dfe..76296d91c0 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acosf8_core_avx2.S
@@ -192,7 +192,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S
index 9ba81506ca..ff7063499f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf16_core_avx512.S
@@ -284,7 +284,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S
index 6c3cbf0c3b..6a213dc2e4 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf4_core_sse4.S
@@ -299,7 +299,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm9
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S
index 45aede28ea..17f6a19b3d 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_acoshf8_core_avx2.S
@@ -280,7 +280,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S
index daa5cfa91e..2ffe24e1ff 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf16_core_avx512.S
@@ -192,7 +192,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S
index 0718fa09b4..bc3e2f8340 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf4_core_sse4.S
@@ -184,7 +184,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S
index 2199ed35d1..41e015c490 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinf8_core_avx2.S
@@ -181,7 +181,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S
index 720b58f956..592caa85da 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf16_core_avx512.S
@@ -307,7 +307,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S
index c78550ec22..e5996b3346 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf4_core_sse4.S
@@ -403,7 +403,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S
index f9aeea6c85..1e8fc22910 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_asinhf8_core_avx2.S
@@ -355,7 +355,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S
index e031dadf19..08c193e273 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f16_core_avx512.S
@@ -150,11 +150,11 @@  ENTRY(_ZGVeN16vv_atan2f_skx)
 	vaddps	{rn-sae}, %zmm11, %zmm9, %zmm9{%k4}
 	vorps	%zmm6, %zmm9, %zmm10
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm10 zmm11
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -251,7 +251,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm10
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -271,7 +271,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S
index 60426108b1..0ec9b19590 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f4_core_sse4.S
@@ -157,11 +157,11 @@  ENTRY(_ZGVbN4vv_atan2f_sse4)
 	/*  Special branch for fast (vector) processing of zero arguments  */
 	testl	%ecx, %ecx
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx rbp r12 r13 r14 r15 eax edx xmm0 xmm1 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -244,7 +244,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -96)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -264,7 +264,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx rbp r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S
index bf632c8a99..69619cb4d8 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atan2f8_core_avx2.S
@@ -131,11 +131,11 @@  ENTRY(_ZGVdN8vv_atan2f_avx2)
 	/*  Special branch for fast (vector) processing of zero arguments  */
 	testl	%eax, %eax
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 edx ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm9 ymm10 ymm12 ymm13
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -232,7 +232,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm9
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -252,7 +252,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S
index f733c7a1b5..6c3d40d676 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf16_core_avx512.S
@@ -221,7 +221,7 @@  L(SPECIAL_VALUES_LOOP):
 	xorl	%ebp, %ebp
 	tzcntl	%ebx, %ebp
 
-	/* Scalar math fucntion call to process special input.  */
+	/* Scalar math function call to process special input.  */
 	vmovss	64(%rsp, %rbp, 4), %xmm0
 	call	atanhf@PLT
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S
index 055484bfb2..ab2ef46b9d 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf4_core_sse4.S
@@ -242,7 +242,7 @@  L(SPECIAL_VALUES_LOOP):
 	xorl	%ebp, %ebp
 	bsfl	%ebx, %ebp
 
-	/* Scalar math fucntion call to process special input.  */
+	/* Scalar math function call to process special input.  */
 	movss	40(%rsp, %rbp, 4), %xmm0
 	call	atanhf@PLT
 	/* No good way to avoid the store-forwarding fault this will cause on
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S
index 8ffe98cfe1..e70085b051 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_atanhf8_core_avx2.S
@@ -230,7 +230,7 @@  L(SPECIAL_VALUES_LOOP):
 	xorl	%ebp, %ebp
 	tzcntl	%ebx, %ebp
 
-	/* Scalar math fucntion call to process special input.  */
+	/* Scalar math function call to process special input.  */
 	vmovss	32(%rsp, %rbp, 4), %xmm0
 	call	atanhf@PLT
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S
index f5331db13b..270e620d61 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf4_core_sse4.S
@@ -273,7 +273,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm12
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S
index 76db762fe8..292eb5a93f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cbrtf8_core_avx2.S
@@ -298,7 +298,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S
index 14696eeff4..773594d4e0 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf16_core_avx512.S
@@ -222,7 +222,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm6
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S
index 654ac65916..ee987dd10f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf4_core_sse4.S
@@ -233,7 +233,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm2
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S
index 474cb05473..24692722eb 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_coshf8_core_avx2.S
@@ -236,7 +236,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S
index 03b7e4adc1..3d19dbd58a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf16_core_avx512.S
@@ -212,7 +212,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S
index 02aa2b4f76..e7cae80579 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf4_core_sse4.S
@@ -219,7 +219,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S
index c3e8e399db..958b46dbfe 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_erfcf8_core_avx2.S
@@ -237,7 +237,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S
index e70e8c52ca..f2d8130ee4 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f16_core_avx512.S
@@ -23,7 +23,7 @@ 
  *    - all arguments processed in the main path
  *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
  *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
- *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
+ *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
  *        - SAE used to avoid spurious flag settings
  *
  */
@@ -180,7 +180,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S
index 9de39a62c2..9eb215a40f 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f4_core_sse4.S
@@ -63,7 +63,7 @@  ENTRY(_ZGVbN4v_exp10f_sse4)
 	cfi_def_cfa_offset(80)
 	movaps	%xmm0, %xmm4
 
-	/*  Load arument  */
+	/*  Load argument  */
 	movups	_sLg2_10+__svml_sexp10_data_internal(%rip), %xmm2
 	lea	__svml_sexp10_data_internal(%rip), %r8
 	mulps	%xmm4, %xmm2
@@ -212,7 +212,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S
index e3087a75dc..79563cc353 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp10f8_core_avx2.S
@@ -69,7 +69,7 @@  ENTRY(_ZGVdN8v_exp10f_avx2)
 	lea	__svml_sexp10_data_internal(%rip), %rax
 	vmovups	_sShifter+__svml_sexp10_data_internal(%rip), %ymm4
 
-	/*  Load arument  */
+	/*  Load argument  */
 	vmovups	_sLg2_10+__svml_sexp10_data_internal(%rip), %ymm1
 	vmovups	_iIndexMask+__svml_sexp10_data_internal(%rip), %ymm2
 	vmovaps	%ymm0, %ymm3
@@ -232,7 +232,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S
index 1911c06bcf..ce983b297b 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f16_core_avx512.S
@@ -203,7 +203,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S
index f4ddfbe932..512ea5c5bf 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f4_core_sse4.S
@@ -175,7 +175,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S
index 277508b8ef..47592985c1 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_exp2f8_core_avx2.S
@@ -182,7 +182,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S
index 7aa1e3c417..4683e546de 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f16_core_avx512.S
@@ -24,7 +24,7 @@ 
  *    - all arguments processed in the main path
  *        - final VSCALEF assists branch-free design (correct overflow/underflow and special case responses)
  *        - a VAND is used to ensure the reduced argument |R|<2, even for large inputs
- *        - RZ mode used to avoid oveflow to +/-Inf for x*log2(e); helps with special case handling
+ *        - RZ mode used to avoid overflow to +/-Inf for x*log2(e); helps with special case handling
  *
  *
  */
@@ -188,7 +188,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S
index 6a3a9d266c..5159b0785a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f4_core_sse4.S
@@ -207,7 +207,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 xmm10
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S
index ee442d8c4a..aae9068cc9 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expm1f8_core_avx2.S
@@ -206,7 +206,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S
index 06c6903df2..749deb0833 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf16_core_avx512.S
@@ -45,7 +45,7 @@ 
  *    No multiprecision branch for _LA_ and _EP_
  *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
  *
- *    Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout
+ *    Check _z exponent to be within borders [1E3 ; 60A] else goto Callout
  *
  *    Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z),
  *      that multiplied by _z, is final result for _EP_ version.
@@ -196,7 +196,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x00, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm2
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S
index c5a94d7b5b..38ab12b1e2 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S
@@ -45,7 +45,7 @@ 
  *    No multiprecision branch for _LA_ and _EP_
  *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
  *
- *    Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout
+ *    Check _z exponent to be within borders [1E3 ; 60A] else goto Callout
  *
  *    Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z),
  *      that multiplied by _z, is final result for _EP_ version.
@@ -117,7 +117,7 @@  ENTRY(_ZGVbN4vv_hypotf_sse4)
 	movaps	%xmm2, %xmm6
 	mulps	%xmm10, %xmm6
 
-	/* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */
+	/* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */
 	movdqu	_LowBoundary+__svml_shypot_data_internal(%rip), %xmm4
 	subps	%xmm6, %xmm5
 
@@ -216,7 +216,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -96)
 	# LOE rbx rbp r12 r13 r14 r15 xmm2
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S
index fe87678ae6..80f1081201 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf8_core_avx2.S
@@ -45,7 +45,7 @@ 
  *    No multiprecision branch for _LA_ and _EP_
  *      _z = _VARG1 * _VARG1 + _VARG2 * _VARG2
  *
- *    Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout
+ *    Check _z exponent to be within borders [1E3 ; 60A] else goto Callout
  *
  *    Compute resciplicle sqrt s0 ~ 1.0/sqrt(_z),
  *      that multiplied by _z, is final result for _EP_ version.
@@ -107,7 +107,7 @@  ENTRY(_ZGVdN8vv_hypotf_avx2)
 	 */
 	vmovups	_sHalf+__svml_shypot_data_internal(%rip), %ymm7
 
-	/* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */
+	/* Check _z exponent to be within borders [1E3 ; 60A] else goto Callout */
 	vmovups	_LowBoundary+__svml_shypot_data_internal(%rip), %ymm2
 	vfmadd231ps %ymm1, %ymm1, %ymm8
 
@@ -220,7 +220,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x80, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm2
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S
index 87a1694a6f..0deb96997a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f16_core_avx512.S
@@ -155,7 +155,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S
index 80ded85293..6baff562f5 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f4_core_sse4.S
@@ -168,7 +168,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S
index 480495037f..54ff0b1e4d 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log10f8_core_avx2.S
@@ -168,7 +168,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S
index d629dc44f3..e4f8a603ff 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf16_core_avx512.S
@@ -201,7 +201,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
index 511e064a3d..4a10457eb8 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf4_core_sse4.S
@@ -182,7 +182,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S
index ea39f66d22..672c91e07e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log1pf8_core_avx2.S
@@ -184,7 +184,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S
index c14fd3d918..04288956c4 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f16_core_avx512.S
@@ -152,7 +152,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S
index f4aa9481ca..93ed64254e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f4_core_sse4.S
@@ -160,7 +160,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S
index d2441c3581..02360e57ee 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_log2f8_core_avx2.S
@@ -163,7 +163,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm1
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
index dda1a0531b..03e7f345b0 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
@@ -246,7 +246,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
index 34ec276ac0..59d6329126 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
@@ -236,7 +236,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -80)
 	# LOE rbx rbp r12 r13 r14 r15 xmm14
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
index abf8d658ab..81e1f19e26 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
@@ -237,7 +237,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S
index 3d4dba3fab..ae95fbae91 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf16_core_avx512.S
@@ -94,11 +94,11 @@  ENTRY(_ZGVeN16v_tanf_skx)
 	vfnmadd231ps {rn-sae}, %zmm5, %zmm2, %zmm4
 	vfnmadd213ps {rn-sae}, %zmm4, %zmm3, %zmm5
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx r12 r13 r14 r15 edx zmm0 zmm5 zmm10 zmm11 k6
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -229,7 +229,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE rbx r12 r13 r14 r15 zmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -248,7 +248,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S
index 1292e88cf9..fab86645b6 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf4_core_sse4.S
@@ -175,11 +175,11 @@  ENTRY(_ZGVbN4v_tanf_sse4)
 
 	testl	%edx, %edx
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	# LOE rbx rbp r12 r13 r14 r15 eax xmm0 xmm4 xmm11 xmm12 xmm13
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -258,7 +258,7 @@  L(SPECIAL_VALUES_LOOP):
 	cfi_offset(14, -240)
 	# LOE rbx rbp r12 r13 r14 r15 xmm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -277,7 +277,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(14)
 	# LOE rbx rbp r15 r12d r13d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S
index ab52321220..30585a77b4 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanf8_core_avx2.S
@@ -161,13 +161,13 @@  ENTRY(_ZGVdN8v_tanf_avx2)
 
 	testl	%edx, %edx
 
-	/* Go to auxilary branch */
+	/* Go to auxiliary branch */
 	jne	L(AUX_BRANCH)
 	/*  DW_CFA_expression: r3 (rbx) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -8; DW_OP_plus)  */
 	.cfi_escape 0x10, 0x03, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xf8, 0xff, 0xff, 0xff, 0x22
 	# LOE r12 r13 r14 r15 eax ymm0 ymm1 ymm10 ymm11 ymm12
 
-	/* Return from auxilary branch
+	/* Return from auxiliary branch
 	 * for out of main path inputs
 	 */
 
@@ -255,7 +255,7 @@  L(SPECIAL_VALUES_LOOP):
 	.cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
 	# LOE r12 r13 r14 r15 ymm0
 
-	/* Scalar math fucntion call
+	/* Scalar math function call
 	 * to process special input
 	 */
 
@@ -273,7 +273,7 @@  L(SCALAR_MATH_CALL):
 	cfi_restore(13)
 	# LOE r14 r15 ebx r12d
 
-	/* Auxilary branch
+	/* Auxiliary branch
 	 * for out of main path inputs
 	 */
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S
index d72a88924c..e639c48524 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf16_core_avx512.S
@@ -220,7 +220,7 @@  L(SPECIAL_VALUES_LOOP):
 	xorl	%ebp, %ebp
 	tzcntl	%ebx, %ebp
 
-	/* Scalar math fucntion call to process special input.  */
+	/* Scalar math function call to process special input.  */
 	vmovss	64(%rsp, %rbp, 4), %xmm0
 	call	tanhf@PLT
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S
index dcbb1886d0..357ad375b3 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf4_core_sse4.S
@@ -73,7 +73,7 @@ 
 
 #include <sysdep.h>
 
-/* tanhf data tables for avx2 and sse4 implementatins defined here.
+/* tanhf data tables for avx2 and sse4 implementations defined here.
  */
 #define ONLY_DECL_OFFSET
 #include "svml_s_tanhf_rodata.S"
@@ -217,7 +217,7 @@  L(SPECIAL_VALUES_LOOP):
 	xorl	%ebp, %ebp
 	bsfl	%ebx, %ebp
 
-	/* Scalar math fucntion call to process special input.  */
+	/* Scalar math function call to process special input.  */
 	movss	40(%rsp, %rbp, 4), %xmm0
 	call	tanhf@PLT
 	/* No good way to avoid the store-forwarding fault this will cause on
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S
index b8d828e081..ea19903d9d 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_tanhf8_core_avx2.S
@@ -72,7 +72,7 @@ 
 
 #include <sysdep.h>
 
-/* tanhf data tables for avx2 and sse4 implementatins defined here.
+/* tanhf data tables for avx2 and sse4 implementations defined here.
  */
 #include "svml_s_tanhf_rodata.S"