xref: /aosp_15_r20/external/clang/test/CodeGenCXX/align-avx-complete-objects.cpp (revision 67e74705e28f6214e480b399dd47ea732279e315)
1*67e74705SXin Li // RUN: %clang_cc1 -x c++ %s -O0 -triple=x86_64-apple-darwin -target-feature +avx2 -fmax-type-align=16 -emit-llvm -o - -Werror | FileCheck %s
2*67e74705SXin Li // rdar://16254558
3*67e74705SXin Li 
4*67e74705SXin Li typedef float AVX2Float __attribute__((__vector_size__(32)));
5*67e74705SXin Li 
6*67e74705SXin Li 
TestAlign(void)7*67e74705SXin Li volatile float TestAlign(void)
8*67e74705SXin Li {
9*67e74705SXin Li        volatile AVX2Float *p = new AVX2Float;
10*67e74705SXin Li         *p = *p;
11*67e74705SXin Li         AVX2Float r = *p;
12*67e74705SXin Li         return r[0];
13*67e74705SXin Li }
14*67e74705SXin Li 
15*67e74705SXin Li // CHECK: [[R:%.*]] = alloca <8 x float>, align 32
16*67e74705SXin Li // CHECK-NEXT:  [[CALL:%.*]] = call i8* @_Znwm(i64 32)
17*67e74705SXin Li // CHECK-NEXT:  [[ZERO:%.*]] = bitcast i8* [[CALL]] to <8 x float>*
18*67e74705SXin Li // CHECK-NEXT:  store <8 x float>* [[ZERO]], <8 x float>** [[P:%.*]], align 8
19*67e74705SXin Li // CHECK-NEXT:  [[ONE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8
20*67e74705SXin Li // CHECK-NEXT:  [[TWO:%.*]] = load volatile <8 x float>, <8 x float>* [[ONE]], align 16
21*67e74705SXin Li // CHECK-NEXT:  [[THREE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8
22*67e74705SXin Li // CHECK-NEXT:  store volatile <8 x float> [[TWO]], <8 x float>* [[THREE]], align 16
23*67e74705SXin Li // CHECK-NEXT:  [[FOUR:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8
24*67e74705SXin Li // CHECK-NEXT:  [[FIVE:%.*]] = load volatile <8 x float>, <8 x float>* [[FOUR]], align 16
25*67e74705SXin Li // CHECK-NEXT:  store <8 x float> [[FIVE]], <8 x float>* [[R]], align 32
26*67e74705SXin Li // CHECK-NEXT:  [[SIX:%.*]] = load <8 x float>, <8 x float>* [[R]], align 32
27*67e74705SXin Li // CHECK-NEXT:  [[VECEXT:%.*]] = extractelement <8 x float> [[SIX]], i32 0
28*67e74705SXin Li // CHECK-NEXT:  ret float [[VECEXT]]
29*67e74705SXin Li 
30*67e74705SXin Li typedef float AVX2Float_Explicitly_aligned __attribute__((__vector_size__(32))) __attribute__((aligned (32)));
31*67e74705SXin Li 
32*67e74705SXin Li typedef AVX2Float_Explicitly_aligned AVX2Float_indirect;
33*67e74705SXin Li 
34*67e74705SXin Li typedef AVX2Float_indirect AVX2Float_use_existing_align;
35*67e74705SXin Li 
TestAlign2(void)36*67e74705SXin Li volatile float TestAlign2(void)
37*67e74705SXin Li {
38*67e74705SXin Li        volatile AVX2Float_use_existing_align *p = new AVX2Float_use_existing_align;
39*67e74705SXin Li         *p = *p;
40*67e74705SXin Li         AVX2Float_use_existing_align r = *p;
41*67e74705SXin Li         return r[0];
42*67e74705SXin Li }
43*67e74705SXin Li 
44*67e74705SXin Li // CHECK: [[R:%.*]] = alloca <8 x float>, align 32
45*67e74705SXin Li // CHECK-NEXT:  [[CALL:%.*]] = call i8* @_Znwm(i64 32)
46*67e74705SXin Li // CHECK-NEXT:  [[ZERO:%.*]] = bitcast i8* [[CALL]] to <8 x float>*
47*67e74705SXin Li // CHECK-NEXT:  store <8 x float>* [[ZERO]], <8 x float>** [[P:%.*]], align 8
48*67e74705SXin Li // CHECK-NEXT:  [[ONE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8
49*67e74705SXin Li // CHECK-NEXT:  [[TWO:%.*]] = load volatile <8 x float>, <8 x float>* [[ONE]], align 32
50*67e74705SXin Li // CHECK-NEXT:  [[THREE:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8
51*67e74705SXin Li // CHECK-NEXT:  store volatile <8 x float> [[TWO]], <8 x float>* [[THREE]], align 32
52*67e74705SXin Li // CHECK-NEXT:  [[FOUR:%.*]] = load <8 x float>*, <8 x float>** [[P]], align 8
53*67e74705SXin Li // CHECK-NEXT:  [[FIVE:%.*]] = load volatile <8 x float>, <8 x float>* [[FOUR]], align 32
54*67e74705SXin Li // CHECK-NEXT:  store <8 x float> [[FIVE]], <8 x float>* [[R]], align 32
55*67e74705SXin Li // CHECK-NEXT:  [[SIX:%.*]] = load <8 x float>, <8 x float>* [[R]], align 32
56*67e74705SXin Li // CHECK-NEXT:  [[VECEXT:%.*]] = extractelement <8 x float> [[SIX]], i32 0
57*67e74705SXin Li // CHECK-NEXT:  ret float [[VECEXT]]
58