22//@ no-prefer-dynamic
33//@ needs-offload
44
5- // This test is verifying that we generate __tgt_target_data_*_mapper before and after a call to the
6- // kernel_1. Better documentation to what each global or variable means is available in the gpu
7- // offload code, or the LLVM offload documentation.
5+ // This test is verifying that we generate __tgt_target_data_*_mapper before and after a call to
6+ // __tgt_target_kernel, and initialize all needed variables. It also verifies some related globals.
7+ // Better documentation to what each global or variable means is available in the gpu offload code,
8+ // or the LLVM offload documentation.
89
910#![ feature( rustc_attrs) ]
1011#![ feature( core_intrinsics) ]
@@ -17,10 +18,8 @@ fn main() {
1718 core:: hint:: black_box ( & x) ;
1819}
1920
20- #[ unsafe( no_mangle) ]
21- #[ inline( never) ]
2221pub fn kernel_1 ( x : & mut [ f32 ; 256 ] ) {
23- core:: intrinsics:: offload ( _kernel_1 , [ 256 , 1 , 1 ] , [ 32 , 1 , 1 ] , ( x, ) )
22+ core:: intrinsics:: offload ( kernel_1 , [ 256 , 1 , 1 ] , [ 32 , 1 , 1 ] , ( x, ) )
2423}
2524
2625#[ unsafe( no_mangle) ]
@@ -33,74 +32,74 @@ pub fn _kernel_1(x: &mut [f32; 256]) {
3332
3433// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
3534// CHECK: %struct.__tgt_offload_entry = type { i64, i16, i16, i32, ptr, ptr, i64, i64, ptr }
36- // CHECK: %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
3735// CHECK: %struct.__tgt_kernel_arguments = type { i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, i64, i64, [3 x i32], [3 x i32], i32 }
3836
39- // CHECK: @anon.{{.*}} .0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
40- // CHECK: @anon.{{.*}}.1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @anon.{{.*}} .0 }, align 8
37+ // CHECK: @anon.[[ID:.*]] .0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
38+ // CHECK: @anon.{{.*}}.1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @anon.[[ID]] .0 }, align 8
4139
42- // CHECK: @.offload_sizes._kernel_1 = private unnamed_addr constant [1 x i64] [i64 1024]
43- // CHECK: @.offload_maptypes._kernel_1 = private unnamed_addr constant [1 x i64] [i64 35]
44- // CHECK: @._kernel_1.region_id = internal constant i8 0
45- // CHECK: @.offloading.entry_name._kernel_1 = internal unnamed_addr constant [10 x i8] c"_kernel_1\00", section ".llvm.rodata.offloading", align 1
46- // CHECK: @.offloading.entry._kernel_1 = internal constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 1, i32 0, ptr @._kernel_1.region_id, ptr @.offloading.entry_name._kernel_1, i64 0, i64 0, ptr null }, section "llvm_offload_entries", align 8
40+ // CHECK-DAG: @.omp_offloading.descriptor = internal constant { i32, ptr, ptr, ptr } zeroinitializer
41+ // CHECK-DAG: @llvm.global_ctors = appending constant [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 101, ptr @.omp_offloading.descriptor_reg, ptr null }]
42+ // CHECK-DAG: @.offload_sizes.[[K:[^ ]*kernel_1]] = private unnamed_addr constant [1 x i64] [i64 1024]
43+ // CHECK-DAG: @.offload_maptypes.[[K]] = private unnamed_addr constant [1 x i64] [i64 35]
44+ // CHECK-DAG: @.[[K]].region_id = internal constant i8 0
45+ // CHECK-DAG: @.offloading.entry_name.[[K]] = internal unnamed_addr constant [{{[0-9]+}} x i8] c"[[K]]{{\\00}}", section ".llvm.rodata.offloading", align 1
46+ // CHECK-DAG: @.offloading.entry.[[K]] = internal constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 1, i32 0, ptr @.[[K]].region_id, ptr @.offloading.entry_name.[[K]], i64 0, i64 0, ptr null }, section "llvm_offload_entries", align 8
4747
4848// CHECK: declare i32 @__tgt_target_kernel(ptr, i64, i32, i32, ptr, ptr)
49- // CHECK: declare void @__tgt_register_lib(ptr) local_unnamed_addr
50- // CHECK: declare void @__tgt_unregister_lib(ptr) local_unnamed_addr
51-
52- // CHECK: define{{( dso_local)?}} void @main()
53- // CHECK-NEXT: start:
54- // CHECK-NEXT: %0 = alloca [8 x i8], align 8
55- // CHECK-NEXT: %x = alloca [1024 x i8], align 16
56- // CHECK: call void @kernel_1(ptr noalias noundef nonnull align 4 dereferenceable(1024) %x)
57- // CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %0)
58- // CHECK-NEXT: store ptr %x, ptr %0, align 8
59- // CHECK-NEXT: call void asm sideeffect "", "r,~{memory}"(ptr nonnull %0)
60- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %0)
61- // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %x)
62- // CHECK-NEXT: ret void
63- // CHECK-NEXT: }
6449
65- // CHECK: define{{( dso_local)?}} void @kernel_1(ptr noalias noundef align 4 dereferenceable(1024) %x )
50+ // CHECK-LABEL: define{{( dso_local)?}} void @main( )
6651// CHECK-NEXT: start:
67- // CHECK-NEXT: %EmptyDesc = alloca %struct.__tgt_bin_desc, align 8
52+ // CHECK-NEXT: %0 = alloca [8 x i8], align 8
53+ // CHECK-NEXT: %x = alloca [1024 x i8], align 16
6854// CHECK-NEXT: %.offload_baseptrs = alloca [1 x ptr], align 8
6955// CHECK-NEXT: %.offload_ptrs = alloca [1 x ptr], align 8
7056// CHECK-NEXT: %.offload_sizes = alloca [1 x i64], align 8
7157// CHECK-NEXT: %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
72- // CHECK-NEXT: %dummy = load volatile ptr, ptr @.offload_sizes._kernel_1, align 8
73- // CHECK-NEXT: %dummy1 = load volatile ptr, ptr @.offloading.entry._kernel_1, align 8
74- // CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) %EmptyDesc, i8 0, i64 32, i1 false)
75- // CHECK-NEXT: call void @__tgt_register_lib(ptr nonnull %EmptyDesc)
58+ // CHECK: %dummy = load volatile ptr, ptr @.offload_sizes.[[K]], align 8
59+ // CHECK-NEXT: %dummy1 = load volatile ptr, ptr @.offloading.entry.[[K]], align 8
7660// CHECK-NEXT: call void @__tgt_init_all_rtls()
7761// CHECK-NEXT: store ptr %x, ptr %.offload_baseptrs, align 8
7862// CHECK-NEXT: store ptr %x, ptr %.offload_ptrs, align 8
7963// CHECK-NEXT: store i64 1024, ptr %.offload_sizes, align 8
80- // CHECK-NEXT: call void @__tgt_target_data_begin_mapper(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 1, ptr nonnull %.offload_baseptrs, ptr nonnull %.offload_ptrs, ptr nonnull %.offload_sizes, ptr nonnull @.offload_maptypes._kernel_1 , ptr null, ptr null)
64+ // CHECK-NEXT: call void @__tgt_target_data_begin_mapper(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 1, ptr nonnull %.offload_baseptrs, ptr nonnull %.offload_ptrs, ptr nonnull %.offload_sizes, ptr nonnull @.offload_maptypes.[[K]] , ptr null, ptr null)
8165// CHECK-NEXT: store i32 3, ptr %kernel_args, align 8
82- // CHECK-NEXT: %0 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 4
83- // CHECK-NEXT: store i32 1, ptr %0, align 4
84- // CHECK-NEXT: %1 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 8
85- // CHECK-NEXT: store ptr %.offload_baseptrs, ptr %1, align 8
86- // CHECK-NEXT: %2 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 16
87- // CHECK-NEXT: store ptr %.offload_ptrs, ptr %2, align 8
88- // CHECK-NEXT: %3 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 24
89- // CHECK-NEXT: store ptr %.offload_sizes, ptr %3, align 8
90- // CHECK-NEXT: %4 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 32
91- // CHECK-NEXT: store ptr @.offload_maptypes._kernel_1, ptr %4, align 8
92- // CHECK-NEXT: %5 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 40
93- // CHECK-NEXT: %6 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 72
94- // CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) %5, i8 0, i64 32, i1 false)
95- // CHECK-NEXT: store <4 x i32> <i32 256, i32 1, i32 1, i32 32>, ptr %6, align 8
96- // CHECK-NEXT: %.fca.1.gep5 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 88
97- // CHECK-NEXT: store i32 1, ptr %.fca.1.gep5, align 8
98- // CHECK-NEXT: %.fca.2.gep7 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 92
99- // CHECK-NEXT: store i32 1, ptr %.fca.2.gep7, align 4
100- // CHECK-NEXT: %7 = getelementptr inbounds nuw i8, ptr %kernel_args, i64 96
101- // CHECK-NEXT: store i32 0, ptr %7, align 8
102- // CHECK-NEXT: %8 = call i32 @__tgt_target_kernel(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 256, i32 32, ptr nonnull @._kernel_1.region_id, ptr nonnull %kernel_args)
103- // CHECK-NEXT: call void @__tgt_target_data_end_mapper(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 1, ptr nonnull %.offload_baseptrs, ptr nonnull %.offload_ptrs, ptr nonnull %.offload_sizes, ptr nonnull @.offload_maptypes._kernel_1, ptr null, ptr null)
104- // CHECK-NEXT: call void @__tgt_unregister_lib(ptr nonnull %EmptyDesc)
66+ // CHECK-NEXT: [[P4:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 4
67+ // CHECK-NEXT: store i32 1, ptr [[P4]], align 4
68+ // CHECK-NEXT: [[P8:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 8
69+ // CHECK-NEXT: store ptr %.offload_baseptrs, ptr [[P8]], align 8
70+ // CHECK-NEXT: [[P16:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 16
71+ // CHECK-NEXT: store ptr %.offload_ptrs, ptr [[P16]], align 8
72+ // CHECK-NEXT: [[P24:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 24
73+ // CHECK-NEXT: store ptr %.offload_sizes, ptr [[P24]], align 8
74+ // CHECK-NEXT: [[P32:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 32
75+ // CHECK-NEXT: store ptr @.offload_maptypes.[[K]], ptr [[P32]], align 8
76+ // CHECK-NEXT: [[P40:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 40
77+ // CHECK-NEXT: [[P72:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 72
78+ // CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) [[P40]], i8 0, i64 32, i1 false)
79+ // CHECK-NEXT: store <4 x i32> <i32 256, i32 1, i32 1, i32 32>, ptr [[P72]], align 8
80+ // CHECK-NEXT: [[P88:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 88
81+ // CHECK-NEXT: store i32 1, ptr [[P88]], align 8
82+ // CHECK-NEXT: [[P92:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 92
83+ // CHECK-NEXT: store i32 1, ptr [[P92]], align 4
84+ // CHECK-NEXT: [[P96:%[^ ]+]] = getelementptr inbounds nuw i8, ptr %kernel_args, i64 96
85+ // CHECK-NEXT: store i32 0, ptr [[P96]], align 8
86+ // CHECK-NEXT: {{%[^ ]+}} = call i32 @__tgt_target_kernel(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 256, i32 32, ptr nonnull @.[[K]].region_id, ptr nonnull %kernel_args)
87+ // CHECK-NEXT: call void @__tgt_target_data_end_mapper(ptr nonnull @anon.{{.*}}.1, i64 -1, i32 1, ptr nonnull %.offload_baseptrs, ptr nonnull %.offload_ptrs, ptr nonnull %.offload_sizes, ptr nonnull @.offload_maptypes.[[K]], ptr null, ptr null)
88+ // CHECK: ret void
89+ // CHECK-NEXT: }
90+
91+ // CHECK: declare void @__tgt_register_lib(ptr) local_unnamed_addr
92+ // CHECK: declare void @__tgt_unregister_lib(ptr) local_unnamed_addr
93+
94+ // CHECK-LABEL: define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
95+ // CHECK-NEXT: entry:
96+ // CHECK-NEXT: call void @__tgt_register_lib(ptr nonnull @.omp_offloading.descriptor)
97+ // CHECK-NEXT: %0 = {{tail }}call i32 @atexit(ptr nonnull @.omp_offloading.descriptor_unreg)
98+ // CHECK-NEXT: ret void
99+ // CHECK-NEXT: }
100+
101+ // CHECK-LABEL: define internal void @.omp_offloading.descriptor_unreg() section ".text.startup" {
102+ // CHECK-NEXT: entry:
103+ // CHECK-NEXT: call void @__tgt_unregister_lib(ptr nonnull @.omp_offloading.descriptor)
105104// CHECK-NEXT: ret void
106105// CHECK-NEXT: }
0 commit comments