xref: /aosp_15_r20/external/llvm/test/CodeGen/X86/vmovq.ll (revision 9880d6810fe72a1726cb53787c6711e909410d58)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx    | FileCheck %s --check-prefix=AVX
4
5define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
6; SSE-LABEL: PR25554:
7; SSE:       # BB#0:
8; SSE-NEXT:    movl $1, %eax
9; SSE-NEXT:    movd %rax, %xmm1
10; SSE-NEXT:    por %xmm1, %xmm0
11; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
12; SSE-NEXT:    paddq %xmm1, %xmm0
13; SSE-NEXT:    retq
14;
15; AVX-LABEL: PR25554:
16; AVX:       # BB#0:
17; AVX-NEXT:    movl $1, %eax
18; AVX-NEXT:    vmovq %rax, %xmm1
19; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
20; AVX-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
21; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
22; AVX-NEXT:    retq
23
24  %c1 = or <2 x i64> %v0, <i64 1, i64 0>
25  %c2 = add <2 x i64> %c1, <i64 0, i64 1>
26  ret <2 x i64> %c2
27}
28
29