Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Julia 1.11 private tests are failing #433

Open
vchuravy opened this issue Nov 6, 2023 · 0 comments
Open

Julia 1.11 private tests are failing #433

vchuravy opened this issue Nov 6, 2023 · 0 comments
Assignees

Comments

@vchuravy
Copy link
Member

vchuravy commented Nov 6, 2023

Test Failed at /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:98
  Expression: !(occursin("gcframe", IR))
   Evaluated: !(occursin("gcframe", "; Function Signature: cpu_reduce_private(KernelAbstractions.CompilerMetadata{KernelAbstractions.NDIteration.DynamicSize, KernelAbstractions.NDIteration.NoDynamicCheck, Base.IteratorsMD.CartesianIndex{1}, Base.IteratorsMD.CartesianIndices{1, Tuple{Base.OneTo{Int64}}}, KernelAbstractions.NDIteration.NDRange{1, KernelAbstractions.NDIteration.DynamicSize, KernelAbstractions.NDIteration.StaticSize{(8,)}, Base.IteratorsMD.CartesianIndices{1, Tuple{Base.OneTo{Int64}}}, Nothing}}, Array{Float64, 1}, Array{Float64, 2})\n;  @ none within `cpu_reduce_private`\ndefine void @julia_cpu_reduce_private_15381({ [1 x [1 x i64]], [1 x [1 x [1 x i64]]], { [1 x [1 x [1 x i64]]] } }* nocapture noundef nonnull readonly align 8 dereferenceable(24) %\"__ctx__::CompilerMetadata\", {}* noundef nonnull align 8 dereferenceable(24) %\"out::Array\", {}* noundef nonnull align 8 dereferenceable(32) %\"A::Array\") #0 {\ntop:\n  %gcframe663 = alloca [3 x {}*], align 16\n  %gcframe663.sub = getelementptr inbounds [3 x {}*], [3 x {}*]* %gcframe663, i64 0, i64 0\n  %0 = bitcast [3 x {}*]* %gcframe663 to i8*\n  call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 0, i64 24, i1 true)\n  %\"new::Tuple116\" = alloca [2 x i64], align 8\n  %\"new::Tuple241\" = alloca [1 x i64], align 8\n  %\"new::Tuple253\" = alloca [1 x i64], align 8\n  %thread_ptr = call i8* asm \"movq %fs:0, \$0\", \"=r\"() #16\n  %tls_ppgcstack = getelementptr i8, i8* %thread_ptr, i64 -8\n  %1 = bitcast i8* %tls_ppgcstack to {}****\n  %tls_pgcstack = load {}***, {}**** %1, align 8\n  %frame.nroots661 = bitcast [3 x {}*]* %gcframe663 to i64*\n  store i64 4, i64* %frame.nroots661, align 16\n  %frame.prev = getelementptr inbounds [3 x {}*], [3 x {}*]* %gcframe663, i64 0, i64 1\n  %2 = bitcast {}** %frame.prev to {}***\n  %task.gcstack = load {}**, {}*** %tls_pgcstack, align 8\n  store {}** %task.gcstack, {}*** %2, align 8\n  %3 = bitcast {}*** %tls_pgcstack to {}***\n  store {}** %gcframe663.sub, {}*** %3, align 8\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:178 within `Scratchpad`\n   %lcnt = load volatile i64, i64* inttoptr (i64 129620768 to i64*), align 32\n   %4 = add i64 %lcnt, 1\n   store volatile i64 %4, i64* inttoptr (i64 129620768 to i64*), align 32\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:179 within `Scratchpad`\n   %lcnt1 = load volatile i64, i64* inttoptr (i64 129620776 to i64*), align 8\n   %5 = add i64 %lcnt1, 1\n   store volatile i64 %5, i64* inttoptr (i64 129620776 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:25 within `__groupsize`\n    %lcnt2 = load volatile i64, i64* inttoptr (i64 57810808 to i64*), align 8\n    %6 = add i64 %lcnt2, 1\n    store volatile i64 %6, i64* inttoptr (i64 57810808 to i64*), align 8\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n     %lcnt3 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n     %7 = add i64 %lcnt3, 1\n     store volatile i64 %7, i64* inttoptr (i64 57810792 to i64*), align 8\n; ││└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n     %lcnt4 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n     %8 = add i64 %lcnt4, 1\n     store volatile i64 %8, i64* inttoptr (i64 81001848 to i64*), align 8\n; │└└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/KernelAbstractions.jl:698 within `__size`\n    %lcnt5 = load volatile i64, i64* inttoptr (i64 41261072 to i64*), align 16\n    %9 = add i64 %lcnt5, 1\n    store volatile i64 %9, i64* inttoptr (i64 41261072 to i64*), align 16\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:175 within `ScratchArray`\n    %lcnt7 = load volatile i64, i64* inttoptr (i64 129620744 to i64*), align 8\n    %10 = add i64 %lcnt7, 1\n    store volatile i64 %10, i64* inttoptr (i64 129620744 to i64*), align 8\n; └└\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:29 within `__workitems_iterspace`\n   %lcnt8 = load volatile i64, i64* inttoptr (i64 57810840 to i64*), align 8\n   %11 = add i64 %lcnt8, 1\n   store volatile i64 %11, i64* inttoptr (i64 57810840 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n    %lcnt9 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n    %12 = add i64 %lcnt9, 1\n    store volatile i64 %12, i64* inttoptr (i64 57810792 to i64*), align 8\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n    %lcnt10 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n    %13 = add i64 %lcnt10, 1\n    store volatile i64 %13, i64* inttoptr (i64 81001848 to i64*), align 8\n; └└\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:284\n  %lcnt11 = load volatile i64, i64* inttoptr (i64 31813520 to i64*), align 16\n  %14 = add i64 %lcnt11, 1\n  store volatile i64 %14, i64* inttoptr (i64 31813520 to i64*), align 16\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:29 within `__workitems_iterspace`\n   %lcnt12 = load volatile i64, i64* inttoptr (i64 57810840 to i64*), align 8\n   %15 = add i64 %lcnt12, 1\n   store volatile i64 %15, i64* inttoptr (i64 57810840 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n    %lcnt13 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n    %16 = add i64 %lcnt13, 1\n    store volatile i64 %16, i64* inttoptr (i64 57810792 to i64*), align 8\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n    %lcnt14 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n    %17 = add i64 %lcnt14, 1\n    store volatile i64 %17, i64* inttoptr (i64 81001848 to i64*), align 8\n    %\"__ctx__::CompilerMetadata.groupindex_ptr.I_ptr[1]_ptr\" = getelementptr inbounds { [1 x [1 x i64]], [1 x [1 x [1 x i64]]], { [1 x [1 x [1 x i64]]] } }, { [1 x [1 x i64]], [1 x [1 x [1 x i64]]], { [1 x [1 x [1 x i64]]] } }* %\"__ctx__::CompilerMetadata\", i64 0, i32 0, i64 0, i64 0\n    %\"__ctx__::CompilerMetadata.groupindex_ptr.I_ptr[1]_ptr.unbox\" = load i64, i64* %\"__ctx__::CompilerMetadata.groupindex_ptr.I_ptr[1]_ptr\", align 8\n    %18 = shl i64 %\"__ctx__::CompilerMetadata.groupindex_ptr.I_ptr[1]_ptr.unbox\", 3\n    %19 = bitcast {}* %\"A::Array\" to i8*\n    %20 = getelementptr inbounds i8, i8* %19, i64 16\n    %.size118.sroa.0.0..sroa_idx = bitcast i8* %20 to i64*\n    %.size118.sroa.2.0..sroa_idx279 = getelementptr inbounds i8, i8* %19, i64 24\n    %21 = bitcast i8* %.size118.sroa.2.0..sroa_idx279 to i64*\n    %22 = bitcast {}* %\"A::Array\" to { i8*, {}* }*\n    %23 = getelementptr inbounds { i8*, {}* }, { i8*, {}* }* %22, i64 0, i32 1\n    %24 = bitcast {}** %23 to i64**\n    %25 = bitcast {}* %\"A::Array\" to i8**\n    %26 = bitcast {}* %\"out::Array\" to i8*\n    %27 = getelementptr inbounds i8, i8* %26, i64 16\n    %memcpy_refined_src = bitcast i8* %27 to i64*\n    %28 = bitcast {}* %\"out::Array\" to { i8*, {}* }*\n    %29 = bitcast {}* %\"out::Array\" to i8**\n    %30 = getelementptr inbounds { i8*, {}* }, { i8*, {}* }* %28, i64 0, i32 1\n    br label %L129\n\nL129:                                             ; preds = %load224, %top\n    %value_phi = phi i64 [ 1, %top ], [ %166, %load224 ]\n; └└\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:285\n  %lcnt21 = load volatile i64, i64* inttoptr (i64 31813528 to i64*), align 8\n  %31 = add i64 %lcnt21, 1\n  store volatile i64 %31, i64* inttoptr (i64 31813528 to i64*), align 8\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:151 within `__validindex`\n   %lcnt22 = load volatile i64, i64* inttoptr (i64 62257400 to i64*), align 8\n   %32 = add i64 %lcnt22, 1\n   store volatile i64 %32, i64* inttoptr (i64 62257400 to i64*), align 8\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:153 within `__validindex`\n   %lcnt23 = load volatile i64, i64* inttoptr (i64 62257416 to i64*), align 8\n   %33 = add i64 %lcnt23, 1\n   store volatile i64 %33, i64* inttoptr (i64 62257416 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:26 within `__dynamic_checkbounds`\n    %lcnt24 = load volatile i64, i64* inttoptr (i64 57810816 to i64*), align 128\n    %34 = add i64 %lcnt24, 1\n    store volatile i64 %34, i64* inttoptr (i64 57810816 to i64*), align 128\n; │└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:157 within `__validindex`\n   %lcnt25 = load volatile i64, i64* inttoptr (i64 62257448 to i64*), align 8\n   %35 = add i64 %lcnt25, 1\n   store volatile i64 %35, i64* inttoptr (i64 62257448 to i64*), align 8\n; └\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:286\n  %lcnt26 = load volatile i64, i64* inttoptr (i64 31813536 to i64*), align 32\n  %36 = add i64 %lcnt26, 1\n  store volatile i64 %36, i64* inttoptr (i64 31813536 to i64*), align 32\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/KernelAbstractions.jl:405 within `__index_Global_NTuple`\n   %lcnt27 = load volatile i64, i64* inttoptr (i64 33976264 to i64*), align 8\n   %37 = add i64 %lcnt27, 1\n   store volatile i64 %37, i64* inttoptr (i64 33976264 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:147 within `__index_Global_Cartesian`\n    %lcnt28 = load volatile i64, i64* inttoptr (i64 62257368 to i64*), align 8\n    %38 = add i64 %lcnt28, 1\n    store volatile i64 %38, i64* inttoptr (i64 62257368 to i64*), align 8\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:148 within `__index_Global_Cartesian`\n    %lcnt29 = load volatile i64, i64* inttoptr (i64 62257376 to i64*), align 32\n    %39 = add i64 %lcnt29, 1\n    store volatile i64 %39, i64* inttoptr (i64 62257376 to i64*), align 32\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n     %lcnt30 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n     %40 = add i64 %lcnt30, 1\n     store volatile i64 %40, i64* inttoptr (i64 57810792 to i64*), align 8\n; ││└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:24 within `__groupindex`\n     %lcnt31 = load volatile i64, i64* inttoptr (i64 57810800 to i64*), align 16\n     %41 = add i64 %lcnt31, 1\n     store volatile i64 %41, i64* inttoptr (i64 57810800 to i64*), align 16\n; ││└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:73 within `expand`\n     %lcnt32 = load volatile i64, i64* inttoptr (i64 81001944 to i64*), align 8\n     %42 = add i64 %lcnt32, 1\n     store volatile i64 %42, i64* inttoptr (i64 81001944 to i64*), align 8\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:74 within `expand`\n     %lcnt33 = load volatile i64, i64* inttoptr (i64 81001952 to i64*), align 32\n     %43 = add i64 %lcnt33, 1\n     store volatile i64 %43, i64* inttoptr (i64 81001952 to i64*), align 32\n; │││┌ @ ntuple.jl:48 within `ntuple`\n; ││││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:75 within `#1`\n       %lcnt34 = load volatile i64, i64* inttoptr (i64 81001960 to i64*), align 8\n       %44 = add i64 %lcnt34, 1\n       store volatile i64 %44, i64* inttoptr (i64 81001960 to i64*), align 8\n; │││││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:76 within `#1`\n       %lcnt35 = load volatile i64, i64* inttoptr (i64 81001968 to i64*), align 16\n       %45 = add i64 %lcnt35, 1\n       store volatile i64 %45, i64* inttoptr (i64 81001968 to i64*), align 16\n; │││││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n        %lcnt36 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n        %46 = add i64 %lcnt36, 1\n        store volatile i64 %46, i64* inttoptr (i64 81001848 to i64*), align 8\n; │││││└\n; │││││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:77 within `#1`\n       %lcnt37 = load volatile i64, i64* inttoptr (i64 81001976 to i64*), align 8\n       %47 = add i64 %lcnt37, 1\n       store volatile i64 %47, i64* inttoptr (i64 81001976 to i64*), align 8\n; │││││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:78 within `#1`\n       %lcnt40 = load volatile i64, i64* inttoptr (i64 81001984 to i64*), align 512\n       %48 = add i64 %lcnt40, 1\n       store volatile i64 %48, i64* inttoptr (i64 81001984 to i64*), align 512\n; │││││┌ @ int.jl:88 within `*`\n        %49 = add nsw i64 %value_phi, -8\n; │││││└\n; │││││┌ @ int.jl:87 within `+`\n        %50 = add i64 %49, %18\n; │││└└└\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:80 within `expand`\n     %lcnt41 = load volatile i64, i64* inttoptr (i64 81002000 to i64*), align 16\n     %51 = add i64 %lcnt41, 1\n     store volatile i64 %51, i64* inttoptr (i64 81002000 to i64*), align 16\n; └└└\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:124 within `__index_Local_Linear`\n   %lcnt42 = load volatile i64, i64* inttoptr (i64 57810560 to i64*), align 128\n   %52 = add i64 %lcnt42, 1\n   store volatile i64 %52, i64* inttoptr (i64 57810560 to i64*), align 128\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:125 within `__index_Local_Linear`\n   %lcnt43 = load volatile i64, i64* inttoptr (i64 57810568 to i64*), align 8\n   %53 = add i64 %lcnt43, 1\n   store volatile i64 %53, i64* inttoptr (i64 57810568 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n    %lcnt44 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n    %54 = add i64 %lcnt44, 1\n    store volatile i64 %54, i64* inttoptr (i64 57810792 to i64*), align 8\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n    %lcnt45 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n    %55 = add i64 %lcnt45, 1\n    store volatile i64 %55, i64* inttoptr (i64 81001848 to i64*), align 8\n; │└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:126 within `__index_Local_Linear`\n   %lcnt46 = load volatile i64, i64* inttoptr (i64 57810576 to i64*), align 16\n   %56 = add i64 %lcnt46, 1\n   store volatile i64 %56, i64* inttoptr (i64 57810576 to i64*), align 16\n; └\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:287\n  %lcnt48 = load volatile i64, i64* inttoptr (i64 31813544 to i64*), align 8\n  %57 = add i64 %lcnt48, 1\n  store volatile i64 %57, i64* inttoptr (i64 31813544 to i64*), align 8\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:53 within `macro expansion`\n   %lcnt49 = load volatile i64, i64* inttoptr (i64 61927352 to i64*), align 8\n   %58 = add i64 %lcnt49, 1\n   store volatile i64 %58, i64* inttoptr (i64 61927352 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:124 within `__index_Local_Linear`\n    %lcnt50 = load volatile i64, i64* inttoptr (i64 57810560 to i64*), align 128\n    %59 = add i64 %lcnt50, 1\n    store volatile i64 %59, i64* inttoptr (i64 57810560 to i64*), align 128\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:125 within `__index_Local_Linear`\n    %lcnt51 = load volatile i64, i64* inttoptr (i64 57810568 to i64*), align 8\n    %60 = add i64 %lcnt51, 1\n    store volatile i64 %60, i64* inttoptr (i64 57810568 to i64*), align 8\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n     %lcnt52 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n     %61 = add i64 %lcnt52, 1\n     store volatile i64 %61, i64* inttoptr (i64 57810792 to i64*), align 8\n; ││└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n     %lcnt53 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n     %62 = add i64 %lcnt53, 1\n     store volatile i64 %62, i64* inttoptr (i64 81001848 to i64*), align 8\n; ││└\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:126 within `__index_Local_Linear`\n    %lcnt54 = load volatile i64, i64* inttoptr (i64 57810576 to i64*), align 16\n    %63 = add i64 %lcnt54, 1\n    store volatile i64 %63, i64* inttoptr (i64 57810576 to i64*), align 16\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:189 within `getindex`\n    %lcnt60 = load volatile i64, i64* inttoptr (i64 129620856 to i64*), align 8\n    %64 = add i64 %lcnt60, 1\n    store volatile i64 %64, i64* inttoptr (i64 129620856 to i64*), align 8\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:190 within `getindex`\n    %lcnt61 = load volatile i64, i64* inttoptr (i64 129620864 to i64*), align 128\n    %65 = add i64 %lcnt61, 1\n    store volatile i64 %65, i64* inttoptr (i64 129620864 to i64*), align 128\n; ││┌ @ ntuple.jl:48 within `ntuple`\n; │││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:190 within `#22`\n      %lcnt62 = load volatile i64, i64* inttoptr (i64 129620864 to i64*), align 128\n      %66 = add i64 %lcnt62, 1\n      store volatile i64 %66, i64* inttoptr (i64 129620864 to i64*), align 128\n; ││└└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:184 within `aview`\n     %lcnt63 = load volatile i64, i64* inttoptr (i64 129620816 to i64*), align 16\n     %67 = add i64 %lcnt63, 1\n     store volatile i64 %67, i64* inttoptr (i64 129620816 to i64*), align 16\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:185 within `aview`\n     %lcnt64 = load volatile i64, i64* inttoptr (i64 129620824 to i64*), align 8\n     %68 = add i64 %lcnt64, 1\n     store volatile i64 %68, i64* inttoptr (i64 129620824 to i64*), align 8\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:186 within `aview`\n     %lcnt65 = load volatile i64, i64* inttoptr (i64 129620832 to i64*), align 32\n     %69 = add i64 %lcnt65, 1\n     store volatile i64 %69, i64* inttoptr (i64 129620832 to i64*), align 32\n; │└└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:54 within `macro expansion`\n   %lcnt75 = load volatile i64, i64* inttoptr (i64 61927360 to i64*), align 64\n   %70 = add i64 %lcnt75, 1\n   store volatile i64 %70, i64* inttoptr (i64 61927360 to i64*), align 64\n; │┌ @ array.jl:191 within `size`\n    %.size.sroa.1.0.copyload = load i64, i64* %21, align 8\n; │└\n; │┌ @ range.jl:5 within `Colon`\n; ││┌ @ range.jl:404 within `UnitRange`\n; │││┌ @ range.jl:415 within `unitrange_last`\n      %71 = call i64 @llvm.smax.i64(i64 %.size.sroa.1.0.copyload, i64 0)\n; │└└└\n; │┌ @ range.jl:898 within `iterate`\n; ││┌ @ range.jl:675 within `isempty`\n; │││┌ @ operators.jl:378 within `>`\n; ││││┌ @ int.jl:83 within `<`\n       %72 = icmp sgt i64 %.size.sroa.1.0.copyload, 0\n; ││└└└\n    br i1 %72, label %L150.split, label %L129.L361_crit_edge\n\nL129.L361_crit_edge:                              ; preds = %L129\n; │└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:57 within `macro expansion`\n; │┌ @ array.jl:965 within `setindex!`\n; ││┌ @ int.jl:86 within `-`\n     %.pre548 = add i64 %50, -1\n; │└└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:54 within `macro expansion`\n; │┌ @ range.jl:898 within `iterate`\n    br label %L361\n\nL150.split:                                       ; preds = %L129\n    %lcnt263 = load volatile i64, i64* inttoptr (i64 61927360 to i64*), align 64\n    %73 = add i64 %lcnt263, 1\n    store volatile i64 %73, i64* inttoptr (i64 61927360 to i64*), align 64\n    %74 = add i64 %50, -1\n; │└\n   br label %L155\n\nL155:                                             ; preds = %L300, %L150.split\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:55 within `macro expansion`\n; │┌ @ subarray.jl:321 within `getindex` @ /home/runner/.julia/packages/StaticArrays/cZ1ET/src/MArray.jl:25\n; ││┌ @ pointer.jl:152 within `unsafe_load`\n     %pointerref = phi double [ 0.000000e+00, %L150.split ], [ %115, %L300 ]\n     %value_phi83 = phi i64 [ 1, %L150.split ], [ %118, %L300 ]\n; │└└\n   %lcnt85 = load volatile i64, i64* inttoptr (i64 61927368 to i64*), align 8\n   %75 = add i64 %lcnt85, 1\n   store volatile i64 %75, i64* inttoptr (i64 61927368 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:124 within `__index_Local_Linear`\n    %lcnt86 = load volatile i64, i64* inttoptr (i64 57810560 to i64*), align 128\n    %76 = add i64 %lcnt86, 1\n    store volatile i64 %76, i64* inttoptr (i64 57810560 to i64*), align 128\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:125 within `__index_Local_Linear`\n    %lcnt87 = load volatile i64, i64* inttoptr (i64 57810568 to i64*), align 8\n    %77 = add i64 %lcnt87, 1\n    store volatile i64 %77, i64* inttoptr (i64 57810568 to i64*), align 8\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n     %lcnt88 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n     %78 = add i64 %lcnt88, 1\n     store volatile i64 %78, i64* inttoptr (i64 57810792 to i64*), align 8\n; ││└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n     %lcnt89 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n     %79 = add i64 %lcnt89, 1\n     store volatile i64 %79, i64* inttoptr (i64 81001848 to i64*), align 8\n; ││└\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:126 within `__index_Local_Linear`\n    %lcnt90 = load volatile i64, i64* inttoptr (i64 57810576 to i64*), align 16\n    %80 = add i64 %lcnt90, 1\n    store volatile i64 %80, i64* inttoptr (i64 57810576 to i64*), align 16\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:189 within `getindex`\n    %lcnt96 = load volatile i64, i64* inttoptr (i64 129620856 to i64*), align 8\n    %81 = add i64 %lcnt96, 1\n    store volatile i64 %81, i64* inttoptr (i64 129620856 to i64*), align 8\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:190 within `getindex`\n    %lcnt97 = load volatile i64, i64* inttoptr (i64 129620864 to i64*), align 128\n    %82 = add i64 %lcnt97, 1\n    store volatile i64 %82, i64* inttoptr (i64 129620864 to i64*), align 128\n; ││┌ @ ntuple.jl:48 within `ntuple`\n; │││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:190 within `#22`\n      %lcnt98 = load volatile i64, i64* inttoptr (i64 129620864 to i64*), align 128\n      %83 = add i64 %lcnt98, 1\n      store volatile i64 %83, i64* inttoptr (i64 129620864 to i64*), align 128\n; ││└└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:184 within `aview`\n     %lcnt99 = load volatile i64, i64* inttoptr (i64 129620816 to i64*), align 16\n     %84 = add i64 %lcnt99, 1\n     store volatile i64 %84, i64* inttoptr (i64 129620816 to i64*), align 16\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:185 within `aview`\n     %lcnt100 = load volatile i64, i64* inttoptr (i64 129620824 to i64*), align 8\n     %85 = add i64 %lcnt100, 1\n     store volatile i64 %85, i64* inttoptr (i64 129620824 to i64*), align 8\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:186 within `aview`\n     %lcnt101 = load volatile i64, i64* inttoptr (i64 129620832 to i64*), align 32\n     %86 = add i64 %lcnt101, 1\n     store volatile i64 %86, i64* inttoptr (i64 129620832 to i64*), align 32\n; ││└\n; ││ @ array.jl:909 within `getindex`\n; ││┌ @ abstractarray.jl:699 within `checkbounds` @ abstractarray.jl:678\n; │││┌ @ abstractarray.jl:98 within `axes`\n; ││││┌ @ array.jl:194 within `size`\n       %.size118.sroa.0.0.copyload = load i64, i64* %.size118.sroa.0.0..sroa_idx, align 8\n       %.size118.sroa.2.0.copyload = load i64, i64* %21, align 8\n; │││└└\n; │││┌ @ abstractarray.jl:725 within `checkbounds_indices`\n; ││││┌ @ abstractarray.jl:758 within `checkindex`\n; │││││┌ @ int.jl:513 within `<`\n        %87 = icmp uge i64 %74, %.size118.sroa.0.0.copyload\n; ││││└└\n; ││││ @ abstractarray.jl:725 within `checkbounds_indices` @ abstractarray.jl:725\n; ││││┌ @ abstractarray.jl:758 within `checkindex`\n; │││││┌ @ int.jl:86 within `-`\n        %88 = add nsw i64 %value_phi83, -1\n; │││││└\n; │││││┌ @ int.jl:513 within `<`\n        %89 = icmp uge i64 %88, %.size118.sroa.2.0.copyload\n; │││└└└\n; │││ @ abstractarray.jl:699 within `checkbounds`\n     %90 = or i1 %87, %89\n     br i1 %90, label %L239, label %L242\n\nL239:                                             ; preds = %L155\n     %91 = getelementptr inbounds [2 x i64], [2 x i64]* %\"new::Tuple116\", i64 0, i64 1\n     %92 = getelementptr inbounds [2 x i64], [2 x i64]* %\"new::Tuple116\", i64 0, i64 0\n; │││ @ abstractarray.jl:697 within `checkbounds`\n     store i64 %50, i64* %92, align 8\n     store i64 %value_phi83, i64* %91, align 8\n; │││ @ abstractarray.jl:699 within `checkbounds`\n     call void @j_throw_boundserror_15410({}* nonnull %\"A::Array\", [2 x i64]* nocapture readonly %\"new::Tuple116\") #4\n     unreachable\n\nL242:                                             ; preds = %L155\n; ││└\n; ││ @ array.jl:910 within `getindex`\n; ││┌ @ abstractarray.jl:1349 within `_to_linear_index`\n; │││┌ @ abstractarray.jl:2979 within `_sub2ind` @ abstractarray.jl:2995\n; ││││┌ @ abstractarray.jl:3011 within `_sub2ind_recurse` @ abstractarray.jl:3011\n; │││││┌ @ int.jl:88 within `*`\n        %93 = mul i64 %.size118.sroa.0.0.copyload, %88\n; │││││└\n; │││││┌ @ int.jl:87 within `+`\n        %94 = add i64 %93, %50\n; ││└└└└\n; ││ @ array.jl:910 within `getindex` @ essentials.jl:790\n    %95 = add i64 %94, -1\n; ││┌ @ essentials.jl:12 within `length`\n     %96 = load i64*, i64** %24, align 8\n; ││└\n    %bitcast149 = load i64, i64* %96, align 8\n    %.not = icmp ult i64 %95, %bitcast149\n    br i1 %.not, label %L266, label %L263\n\nL263:                                             ; preds = %L242\n    %97 = getelementptr inbounds [1 x i64], [1 x i64]* %\"new::Tuple253\", i64 0, i64 0\n    store i64 %94, i64* %97, align 8\n    call void @j_throw_boundserror_15409({}* nonnull %\"A::Array\", [1 x i64]* nocapture readonly %\"new::Tuple253\") #4\n    unreachable\n\nL266:                                             ; preds = %L242\n; ││ @ array.jl:910 within `getindex` @ essentials.jl:791\n    %98 = load i8*, i8** %25, align 8\n    %99 = shl nuw nsw i64 %bitcast149, 1\n    %100 = add i64 %bitcast149, %95\n    %.not289 = icmp ult i64 %100, %99\n    %101 = bitcast i8* %98 to double*\n    %102 = getelementptr inbounds double, double* %101, i64 %95\n    %.data_ptr = getelementptr inbounds i64, i64* %96, i64 1\n    %103 = bitcast i64* %.data_ptr to i8**\n    %104 = load i8*, i8** %103, align 8\n    %105 = ptrtoint i8* %104 to i64\n    %106 = ptrtoint double* %102 to i64\n    %107 = sub i64 %106, %105\n    %108 = shl nuw nsw i64 %bitcast149, 3\n    %109 = icmp ult i64 %107, %108\n    %110 = and i1 %.not289, %109\n    br i1 %110, label %L287, label %oob\n\nL287:                                             ; preds = %L266\n    %111 = bitcast i8* %98 to {}**\n    %112 = getelementptr inbounds {}*, {}** %111, i64 %95\n    %113 = bitcast {}** %112 to double*\n    %114 = load double, double* %113, align 8\n; │└\n; │┌ @ float.jl:460 within `+`\n    %115 = fadd double %pointerref, %114\n; │└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:56 within `macro expansion`\n   %lcnt166 = load volatile i64, i64* inttoptr (i64 61927376 to i64*), align 16\n   %116 = add i64 %lcnt166, 1\n   store volatile i64 %116, i64* inttoptr (i64 61927376 to i64*), align 16\n; │┌ @ range.jl:902 within `iterate`\n; ││┌ @ promotion.jl:620 within `==`\n     %.not293 = icmp eq i64 %value_phi83, %71\n; ││└\n    br i1 %.not293, label %L361, label %L300\n\nL300:                                             ; preds = %L287\n; ││ @ range.jl:903 within `iterate`\n; ││┌ @ int.jl:87 within `+`\n     %lcnt247 = load volatile i64, i64* inttoptr (i64 61927376 to i64*), align 16\n     %117 = add i64 %lcnt247, 1\n     store volatile i64 %117, i64* inttoptr (i64 61927376 to i64*), align 16\n     %118 = add nuw i64 %value_phi83, 1\n     br label %L155\n\nL361:                                             ; preds = %L287, %L129.L361_crit_edge\n; │└└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:57 within `macro expansion`\n; │┌ @ array.jl:965 within `setindex!`\n; ││┌ @ int.jl:86 within `-`\n     %.pre-phi = phi i64 [ %.pre548, %L129.L361_crit_edge ], [ %74, %L287 ]\n; │└└\n; │┌ @ subarray.jl:321 within `getindex` @ /home/runner/.julia/packages/StaticArrays/cZ1ET/src/MArray.jl:25\n; ││┌ @ pointer.jl:152 within `unsafe_load`\n     %pointerref203 = phi double [ 0.000000e+00, %L129.L361_crit_edge ], [ %115, %L287 ]\n; │└└\n   %lcnt171 = load volatile i64, i64* inttoptr (i64 61927384 to i64*), align 8\n   %119 = add i64 %lcnt171, 1\n   store volatile i64 %119, i64* inttoptr (i64 61927384 to i64*), align 8\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:124 within `__index_Local_Linear`\n    %lcnt172 = load volatile i64, i64* inttoptr (i64 57810560 to i64*), align 128\n    %120 = add i64 %lcnt172, 1\n    store volatile i64 %120, i64* inttoptr (i64 57810560 to i64*), align 128\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:125 within `__index_Local_Linear`\n    %lcnt173 = load volatile i64, i64* inttoptr (i64 57810568 to i64*), align 8\n    %121 = add i64 %lcnt173, 1\n    store volatile i64 %121, i64* inttoptr (i64 57810568 to i64*), align 8\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/compiler.jl:23 within `__iterspace`\n     %lcnt174 = load volatile i64, i64* inttoptr (i64 57810792 to i64*), align 8\n     %122 = add i64 %lcnt174, 1\n     store volatile i64 %122, i64* inttoptr (i64 57810792 to i64*), align 8\n; ││└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/nditeration.jl:63 within `workitems`\n     %lcnt175 = load volatile i64, i64* inttoptr (i64 81001848 to i64*), align 8\n     %123 = add i64 %lcnt175, 1\n     store volatile i64 %123, i64* inttoptr (i64 81001848 to i64*), align 8\n; ││└\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:126 within `__index_Local_Linear`\n    %lcnt176 = load volatile i64, i64* inttoptr (i64 57810576 to i64*), align 16\n    %124 = add i64 %lcnt176, 1\n    store volatile i64 %124, i64* inttoptr (i64 57810576 to i64*), align 16\n; │└\n; │┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:189 within `getindex`\n    %lcnt182 = load volatile i64, i64* inttoptr (i64 129620856 to i64*), align 8\n    %125 = add i64 %lcnt182, 1\n    store volatile i64 %125, i64* inttoptr (i64 129620856 to i64*), align 8\n; ││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:190 within `getindex`\n    %lcnt183 = load volatile i64, i64* inttoptr (i64 129620864 to i64*), align 128\n    %126 = add i64 %lcnt183, 1\n    store volatile i64 %126, i64* inttoptr (i64 129620864 to i64*), align 128\n; ││┌ @ ntuple.jl:48 within `ntuple`\n; │││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:190 within `#22`\n      %lcnt184 = load volatile i64, i64* inttoptr (i64 129620864 to i64*), align 128\n      %127 = add i64 %lcnt184, 1\n      store volatile i64 %127, i64* inttoptr (i64 129620864 to i64*), align 128\n; ││└└\n; ││┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:184 within `aview`\n     %lcnt185 = load volatile i64, i64* inttoptr (i64 129620816 to i64*), align 16\n     %128 = add i64 %lcnt185, 1\n     store volatile i64 %128, i64* inttoptr (i64 129620816 to i64*), align 16\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:185 within `aview`\n     %lcnt186 = load volatile i64, i64* inttoptr (i64 129620824 to i64*), align 8\n     %129 = add i64 %lcnt186, 1\n     store volatile i64 %129, i64* inttoptr (i64 129620824 to i64*), align 8\n; │││ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/cpu.jl:186 within `aview`\n     %lcnt187 = load volatile i64, i64* inttoptr (i64 129620832 to i64*), align 32\n     %130 = add i64 %lcnt187, 1\n     store volatile i64 %130, i64* inttoptr (i64 129620832 to i64*), align 32\n; │└└\n; │┌ @ array.jl:965 within `setindex!`\n; ││┌ @ essentials.jl:11 within `length`\n     %131 = load i64, i64* %memcpy_refined_src, align 8\n; ││└\n; ││┌ @ int.jl:513 within `<`\n     %.not294 = icmp ult i64 %.pre-phi, %131\n; ││└\n    br i1 %.not294, label %L382, label %L379\n\nL379:                                             ; preds = %L361\n    %lcnt238 = load volatile i64, i64* inttoptr (i64 31813544 to i64*), align 8\n    %132 = add i64 %lcnt238, 1\n    store volatile i64 %132, i64* inttoptr (i64 31813544 to i64*), align 8\n    %lcnt239 = load volatile i64, i64* inttoptr (i64 61927384 to i64*), align 8\n    %133 = add i64 %lcnt239, 1\n    store volatile i64 %133, i64* inttoptr (i64 61927384 to i64*), align 8\n    %134 = getelementptr inbounds [1 x i64], [1 x i64]* %\"new::Tuple241\", i64 0, i64 0\n    store i64 %50, i64* %134, align 8\n    call void @j_throw_boundserror_15406({}* nonnull %\"out::Array\", [1 x i64]* nocapture readonly %\"new::Tuple241\") #4\n    unreachable\n\nL382:                                             ; preds = %L361\n; ││ @ array.jl:966 within `setindex!`\n; ││┌ @ Base.jl:47 within `getproperty`\n     %135 = load i8*, i8** %29, align 8\n     %136 = load {}*, {}** %30, align 8\n; ││└\n    %137 = bitcast {}* %136 to i64*\n    %138 = load i64, i64* %137, align 8\n    %139 = shl nuw nsw i64 %138, 1\n    %140 = add i64 %138, %.pre-phi\n    %.not295 = icmp ult i64 %140, %139\n    %141 = bitcast i8* %135 to double*\n    %142 = getelementptr inbounds double, double* %141, i64 %.pre-phi\n    %143 = bitcast {}* %136 to { i64, {}** }*\n    %.data_ptr216 = getelementptr inbounds { i64, {}** }, { i64, {}** }* %143, i64 0, i32 1\n    %144 = bitcast {}*** %.data_ptr216 to i8**\n    %145 = load i8*, i8** %144, align 8\n    %146 = ptrtoint i8* %145 to i64\n    %147 = ptrtoint double* %142 to i64\n    %148 = sub i64 %147, %146\n    %149 = shl nuw nsw i64 %138, 3\n    %150 = icmp ult i64 %148, %149\n    %151 = and i1 %.not295, %150\n    br i1 %151, label %idxend222, label %oob217\n\nL405:                                             ; preds = %load224\n    %frame.prev662 = load {}*, {}** %frame.prev, align 8\n    %152 = bitcast {}*** %tls_pgcstack to {}**\n    store {}* %frame.prev662, {}** %152, align 8\n; └└\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:288\n  ret void\n\noob:                                              ; preds = %L266\n  %gc_slot_addr_0 = getelementptr inbounds [3 x {}*], [3 x {}*]* %gcframe663, i64 0, i64 2\n  %153 = bitcast {}** %gc_slot_addr_0 to i64**\n  store i64* %96, i64** %153, align 16\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:287\n; ┌ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:55 within `macro expansion`\n; │┌ @ array.jl:910 within `getindex` @ essentials.jl:791\n    %ptls_field667 = getelementptr inbounds {}**, {}*** %tls_pgcstack, i64 2\n    %154 = bitcast {}*** %ptls_field667 to i8**\n    %ptls_load668669 = load i8*, i8** %154, align 8\n    %\"box::GenericMemoryRef\" = call noalias nonnull dereferenceable(32) {}* @ijl_gc_pool_alloc_instrumented(i8* %ptls_load668669, i32 800, i32 32, i64 139965152542256) #14\n    %155 = bitcast {}* %\"box::GenericMemoryRef\" to i64*\n    %\"box::GenericMemoryRef.tag_addr\" = getelementptr inbounds i64, i64* %155, i64 -1\n    store atomic i64 139965152542256, i64* %\"box::GenericMemoryRef.tag_addr\" unordered, align 8\n    %156 = bitcast {}* %\"box::GenericMemoryRef\" to { i8*, {}* }*\n    %.repack = bitcast {}* %\"box::GenericMemoryRef\" to i8**\n    store i8* %98, i8** %.repack, align 8\n    %.repack291 = getelementptr inbounds { i8*, {}* }, { i8*, {}* }* %156, i64 0, i32 1\n    %157 = bitcast {}** %.repack291 to i64**\n    store i64* %96, i64** %157, align 8\n    call void @ijl_bounds_error_int({}* %\"box::GenericMemoryRef\", i64 %94)\n    unreachable\n\noob217:                                           ; preds = %L382\n    %gc_slot_addr_0653 = getelementptr inbounds [3 x {}*], [3 x {}*]* %gcframe663, i64 0, i64 2\n    store {}* %136, {}** %gc_slot_addr_0653, align 16\n; │└\n; │ @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/test/private.jl:57 within `macro expansion`\n; │┌ @ array.jl:966 within `setindex!`\n    %ptls_field658664 = getelementptr inbounds {}**, {}*** %tls_pgcstack, i64 2\n    %158 = bitcast {}*** %ptls_field658664 to i8**\n    %ptls_load659665666 = load i8*, i8** %158, align 8\n    %\"box::GenericMemoryRef221\" = call noalias nonnull dereferenceable(32) {}* @ijl_gc_pool_alloc_instrumented(i8* %ptls_load659665666, i32 800, i32 32, i64 139965152542256) #14\n    %159 = bitcast {}* %\"box::GenericMemoryRef221\" to i64*\n    %\"box::GenericMemoryRef221.tag_addr\" = getelementptr inbounds i64, i64* %159, i64 -1\n    store atomic i64 139965152542256, i64* %\"box::GenericMemoryRef221.tag_addr\" unordered, align 8\n    %160 = bitcast {}* %\"box::GenericMemoryRef221\" to { i8*, {}* }*\n    %.repack297 = bitcast {}* %\"box::GenericMemoryRef221\" to i8**\n    store i8* %135, i8** %.repack297, align 8\n    %.repack298 = getelementptr inbounds { i8*, {}* }, { i8*, {}* }* %160, i64 0, i32 1\n    store {}* %136, {}** %.repack298, align 8\n    call void @ijl_bounds_error_int({}* %\"box::GenericMemoryRef221\", i64 %50)\n    unreachable\n\nidxend222:                                        ; preds = %L382\n    %161 = icmp eq i64 %138, 0\n    br i1 %161, label %oob223, label %load224\n\noob223:                                           ; preds = %idxend222\n    call void @ijl_bounds_error_int({}* %136, i64 1)\n    unreachable\n\nload224:                                          ; preds = %idxend222\n    %162 = bitcast i8* %135 to {}**\n    %163 = getelementptr inbounds {}*, {}** %162, i64 %.pre-phi\n    %164 = bitcast {}** %163 to double*\n    store double %pointerref203, double* %164, align 8\n; └└\n;  @ none within `cpu_reduce_private` @ /home/runner/work/KernelAbstractions.jl/KernelAbstractions.jl/src/macros.jl:288\n  %lcnt225 = load volatile i64, i64* inttoptr (i64 31813568 to i64*), align 64\n  %165 = add i64 %lcnt225, 1\n  store volatile i64 %165, i64* inttoptr (i64 31813568 to i64*), align 64\n; ┌ @ multidimensional.jl:408 within `iterate`\n; │┌ @ multidimensional.jl:426 within `__inc`\n; ││┌ @ int.jl:87 within `+`\n     %166 = add nuw nsw i64 %value_phi, 1\n; ││└\n; ││ @ multidimensional.jl:427 within `__inc`\n; ││┌ @ operators.jl:276 within `!=`\n; │││┌ @ promotion.jl:620 within `==`\n      %.not300 = icmp eq i64 %value_phi, 8\n; └└└└\n  br i1 %.not300, label %L405, label %L129\n}\n"))
@vchuravy vchuravy self-assigned this Nov 6, 2023
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant