1#version 450 core 2#pragma use_vulkan_memory_model 3#extension GL_KHR_shader_subgroup_basic : enable 4#extension GL_KHR_shader_subgroup_shuffle : enable 5#extension GL_KHR_shader_subgroup_ballot : enable 6#extension GL_KHR_memory_scope_semantics : enable 7#extension GL_ARB_gpu_shader_int64 : enable 8#extension GL_EXT_buffer_reference : enable 9// DIM/NUM_WORKGROUP_EACH_DIM overriden by spec constants 10layout(constant_id = 0) const int DIM = 1; 11layout(constant_id = 1) const int NUM_WORKGROUP_EACH_DIM = 1; 12shared bool sharedSkip; 13layout(local_size_x_id = 0, local_size_y_id = 0, local_size_z = 1) in; 14layout(buffer_reference) buffer PayloadRef { uint x[]; }; 15layout(buffer_reference) buffer GuardRef { uint x[]; }; 16layout(set=0, binding=2) buffer Fail { uint x[]; } fail; 17layout (push_constant, std430) uniform PC { 18 layout(offset = 0) PayloadRef payloadref; 19layout(offset = 8) GuardRef guard; 20}; 21void main() 22{ 23 bool pass = true; 24 bool skip = false; 25 sharedSkip = false; 26 nonprivate PayloadRef payload = payloadref; 27 ivec2 globalId = ivec2(gl_GlobalInvocationID.xy); 28 ivec2 partnerGlobalId = ivec2(DIM*NUM_WORKGROUP_EACH_DIM-1) - ivec2(gl_GlobalInvocationID.xy); 29 uint bufferCoord = globalId.y * DIM*NUM_WORKGROUP_EACH_DIM + globalId.x; 30 uint partnerBufferCoord = partnerGlobalId.y * DIM*NUM_WORKGROUP_EACH_DIM + partnerGlobalId.x; 31 ivec2 imageCoord = globalId; 32 ivec2 partnerImageCoord = partnerGlobalId; 33 ivec2 globalId00 = ivec2(DIM) * ivec2(gl_WorkGroupID.xy); 34 ivec2 partnerGlobalId00 = ivec2(DIM) * (ivec2(NUM_WORKGROUP_EACH_DIM-1) - ivec2(gl_WorkGroupID.xy)); 35 uint bufferCoord00 = globalId00.y * DIM*NUM_WORKGROUP_EACH_DIM + globalId00.x; 36 uint partnerBufferCoord00 = partnerGlobalId00.y * DIM*NUM_WORKGROUP_EACH_DIM + partnerGlobalId00.x; 37 ivec2 imageCoord00 = globalId00; 38 ivec2 partnerImageCoord00 = partnerGlobalId00; 39 payload.x[bufferCoord] = bufferCoord + (payload.x[partnerBufferCoord]>>31); 40 controlBarrier(gl_ScopeWorkgroup, gl_ScopeWorkgroup, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquireRelease | gl_SemanticsMakeAvailable); 41 if (all(equal(gl_LocalInvocationID.xy, ivec2(0,0)))) { 42 atomicStore(guard.x[bufferCoord], uint(1u), gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease | gl_SemanticsMakeAvailable); 43 skip = atomicLoad(guard.x[partnerBufferCoord00], gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire | gl_SemanticsMakeVisible) == 0; 44 sharedSkip = skip; 45 } 46 controlBarrier(gl_ScopeWorkgroup, gl_ScopeWorkgroup, gl_StorageSemanticsBuffer | gl_StorageSemanticsShared, gl_SemanticsAcquireRelease | gl_SemanticsMakeVisible); 47 skip = sharedSkip; 48 uint r = payload.x[partnerBufferCoord]; 49 if (!skip && r != uint(partnerBufferCoord)) { fail.x[bufferCoord] = 1; } 50}