diff mbox series

x86: Replace vmovdqu with movdqu in BF16 XMM ABI tests

Message ID 20220823205455.43845-1-hjl.tools@gmail.com
State New
Headers show
Series x86: Replace vmovdqu with movdqu in BF16 XMM ABI tests | expand

Commit Message

H.J. Lu Aug. 23, 2022, 8:54 p.m. UTC
I am checking in this as an obvious fix.

H.J.
---
Since XMM BF16 tests only require SSE2, replace vmovdqu with movdqu in
BF16 XMM ABI tests to support SSE2 machines without AVX.

Tested on x86-64 machines with and without AVX.

	* gcc.target/x86_64/abi/bf16/asm-support.S: Replace vmovdqu with
	movdqu.
---
 .../gcc.target/x86_64/abi/bf16/asm-support.S  | 36 +++++++++----------
 1 file changed, 18 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S b/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S
index a8165d86317..7559aa910c4 100644
--- a/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S
+++ b/gcc/testsuite/gcc.target/x86_64/abi/bf16/asm-support.S
@@ -20,22 +20,22 @@  snapshot:
 	movq	%r13, r13(%rip)
 	movq	%r14, r14(%rip)
 	movq	%r15, r15(%rip)
-	vmovdqu	%xmm0, xmm_regs+0(%rip)
-	vmovdqu	%xmm1, xmm_regs+16(%rip)
-	vmovdqu	%xmm2, xmm_regs+32(%rip)
-	vmovdqu	%xmm3, xmm_regs+48(%rip)
-	vmovdqu	%xmm4, xmm_regs+64(%rip)
-	vmovdqu	%xmm5, xmm_regs+80(%rip)
-	vmovdqu	%xmm6, xmm_regs+96(%rip)
-	vmovdqu	%xmm7, xmm_regs+112(%rip)
-	vmovdqu	%xmm8, xmm_regs+128(%rip)
-	vmovdqu	%xmm9, xmm_regs+144(%rip)
-	vmovdqu	%xmm10, xmm_regs+160(%rip)
-	vmovdqu	%xmm11, xmm_regs+176(%rip)
-	vmovdqu	%xmm12, xmm_regs+192(%rip)
-	vmovdqu	%xmm13, xmm_regs+208(%rip)
-	vmovdqu	%xmm14, xmm_regs+224(%rip)
-	vmovdqu	%xmm15, xmm_regs+240(%rip)
+	movdqu	%xmm0, xmm_regs+0(%rip)
+	movdqu	%xmm1, xmm_regs+16(%rip)
+	movdqu	%xmm2, xmm_regs+32(%rip)
+	movdqu	%xmm3, xmm_regs+48(%rip)
+	movdqu	%xmm4, xmm_regs+64(%rip)
+	movdqu	%xmm5, xmm_regs+80(%rip)
+	movdqu	%xmm6, xmm_regs+96(%rip)
+	movdqu	%xmm7, xmm_regs+112(%rip)
+	movdqu	%xmm8, xmm_regs+128(%rip)
+	movdqu	%xmm9, xmm_regs+144(%rip)
+	movdqu	%xmm10, xmm_regs+160(%rip)
+	movdqu	%xmm11, xmm_regs+176(%rip)
+	movdqu	%xmm12, xmm_regs+192(%rip)
+	movdqu	%xmm13, xmm_regs+208(%rip)
+	movdqu	%xmm14, xmm_regs+224(%rip)
+	movdqu	%xmm15, xmm_regs+240(%rip)
 	jmp	*callthis(%rip)
 .LFE3:
 	.size	snapshot, .-snapshot
@@ -50,8 +50,8 @@  snapshot_ret:
 	addq	$8, %rsp
 	movq	%rax, rax(%rip)
 	movq	%rdx, rdx(%rip)
-	vmovdqu	%xmm0, xmm_regs+0(%rip)
-	vmovdqu	%xmm1, xmm_regs+16(%rip)
+	movdqu	%xmm0, xmm_regs+0(%rip)
+	movdqu	%xmm1, xmm_regs+16(%rip)
 	fstpt	x87_regs(%rip)
 	fstpt	x87_regs+16(%rip)
 	fldt	x87_regs+16(%rip)