1 2 3groupshared uint gs_ua; 4groupshared uint gs_ub; 5groupshared uint gs_uc; 6groupshared uint2 gs_ua2; 7groupshared uint2 gs_ub2; 8groupshared uint2 gs_uc2; 9groupshared uint3 gs_ua3; 10groupshared uint3 gs_ub3; 11groupshared uint3 gs_uc3; 12groupshared uint4 gs_ua4; 13groupshared uint4 gs_ub4; 14groupshared uint4 gs_uc4; 15groupshared float gs_fa; 16groupshared float gs_fb; 17 18float ComputeShaderFunctionS(float inF0, float inF1, float inF2, uint inU0, uint inU1) 19{ 20 uint out_u1; 21 22 // Don't repeat all the pixel/vertex fns - just one for sanity. 23 all(inF0); 24 25 // Test atomics 26 InterlockedAdd(gs_ua, gs_ub); 27 InterlockedAdd(gs_ua, gs_ub, out_u1); 28 InterlockedAnd(gs_ua, gs_ub); 29 InterlockedAnd(gs_ua, gs_ub, out_u1); 30 InterlockedCompareExchange(gs_ua, gs_ub, gs_uc, out_u1); 31 InterlockedExchange(gs_ua, gs_ub, out_u1); 32 InterlockedMax(gs_ua, gs_ub); 33 InterlockedMax(gs_ua, gs_ub, out_u1); 34 InterlockedMin(gs_ua, gs_ub); 35 InterlockedMin(gs_ua, gs_ub, out_u1); 36 InterlockedOr(gs_ua, gs_ub); 37 InterlockedOr(gs_ua, gs_ub, out_u1); 38 InterlockedXor(gs_ua, gs_ub); 39 InterlockedXor(gs_ua, gs_ub, out_u1); 40 41 InterlockedAdd(gs_fa, gs_fb); 42 43 // CheckAccessFullyMapped(3); // TODO: ... 44 45 return 0.0; 46} 47 48float1 ComputeShaderFunction1(float1 inF0, float1 inF1, float1 inF2) 49{ 50 // TODO: ... add when float1 prototypes are generated 51 return 0.0; 52} 53 54float2 ComputeShaderFunction2(float2 inF0, float2 inF1, float2 inF2, uint2 inU0, uint2 inU1) 55{ 56 uint2 out_u2; 57 58 // Don't repeat all the pixel/vertex fns - just one for sanity. 59 all(inF0); 60 61 // Test atomics 62 InterlockedAdd(gs_ua2, gs_ub2); 63 InterlockedAdd(gs_ua2, gs_ub2, out_u2); 64 InterlockedAnd(gs_ua2, gs_ub2); 65 InterlockedAnd(gs_ua2, gs_ub2, out_u2); 66 InterlockedCompareExchange(gs_ua2, gs_ub2, gs_uc2, out_u2); 67 InterlockedExchange(gs_ua2, gs_ub2, out_u2); 68 InterlockedMax(gs_ua2, gs_ub2); 69 InterlockedMax(gs_ua2, gs_ub2, out_u2); 70 InterlockedMin(gs_ua2, gs_ub2); 71 InterlockedMin(gs_ua2, gs_ub2, out_u2); 72 InterlockedOr(gs_ua2, gs_ub2); 73 InterlockedOr(gs_ua2, gs_ub2, out_u2); 74 InterlockedXor(gs_ua2, gs_ub2); 75 InterlockedXor(gs_ua2, gs_ub2, out_u2); 76 77 // TODO: ... add when float1 prototypes are generated 78 return float2(1,2); 79} 80 81float3 ComputeShaderFunction3(float3 inF0, float3 inF1, float3 inF2, uint3 inU0, uint3 inU1) 82{ 83 uint3 out_u3; 84 85 // Don't repeat all the pixel/vertex fns - just one for sanity. 86 all(inF0); 87 88 // Test atomics 89 InterlockedAdd(gs_ua3, gs_ub3); 90 InterlockedAdd(gs_ua3, gs_ub3, out_u3); 91 InterlockedAnd(gs_ua3, gs_ub3); 92 InterlockedAnd(gs_ua3, gs_ub3, out_u3); 93 InterlockedCompareExchange(gs_ua3, gs_ub3, gs_uc3, out_u3); 94 InterlockedExchange(gs_ua3, gs_ub3, out_u3); 95 InterlockedMax(gs_ua3, gs_ub3); 96 InterlockedMax(gs_ua3, gs_ub3, out_u3); 97 InterlockedMin(gs_ua3, gs_ub3); 98 InterlockedMin(gs_ua3, gs_ub3, out_u3); 99 InterlockedOr(gs_ua3, gs_ub3); 100 InterlockedOr(gs_ua3, gs_ub3, out_u3); 101 InterlockedXor(gs_ua3, gs_ub3); 102 InterlockedXor(gs_ua3, gs_ub3, out_u3); 103 104 // TODO: ... add when float1 prototypes are generated 105 return float3(1,2,3); 106} 107 108float4 ComputeShaderFunction(float4 inF0, float4 inF1, float4 inF2, uint4 inU0, uint4 inU1) 109{ 110 uint4 out_u4; 111 112 // Don't repeat all the pixel/vertex fns - just one for sanity. 113 all(inF0); 114 115 // Test atomics 116 InterlockedAdd(gs_ua4, gs_ub4); 117 InterlockedAdd(gs_ua4, gs_ub4, out_u4); 118 InterlockedAnd(gs_ua4, gs_ub4); 119 InterlockedAnd(gs_ua4, gs_ub4, out_u4); 120 InterlockedCompareExchange(gs_ua4, gs_ub4, gs_uc4, out_u4); 121 InterlockedExchange(gs_ua4, gs_ub4, out_u4); 122 InterlockedMax(gs_ua4, gs_ub4); 123 InterlockedMax(gs_ua4, gs_ub4, out_u4); 124 InterlockedMin(gs_ua4, gs_ub4); 125 InterlockedMin(gs_ua4, gs_ub4, out_u4); 126 InterlockedOr(gs_ua4, gs_ub4); 127 InterlockedOr(gs_ua4, gs_ub4, out_u4); 128 InterlockedXor(gs_ua4, gs_ub4); 129 InterlockedXor(gs_ua4, gs_ub4, out_u4); 130 131 // TODO: ... add when float1 prototypes are generated 132 return float4(1,2,3,4); 133} 134