1 //
2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2016 LunarG, Inc.
4 // Copyright (C) 2017, 2022-2024 Arm Limited.
5 // Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
6 //
7 // All rights reserved.
8 //
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
11 // are met:
12 //
13 // Redistributions of source code must retain the above copyright
14 // notice, this list of conditions and the following disclaimer.
15 //
16 // Redistributions in binary form must reproduce the above
17 // copyright notice, this list of conditions and the following
18 // disclaimer in the documentation and/or other materials provided
19 // with the distribution.
20 //
21 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 // contributors may be used to endorse or promote products derived
23 // from this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
37 //
38
39 //
40 // Definition of the in-memory high-level intermediate representation
41 // of shaders. This is a tree that parser creates.
42 //
43 // Nodes in the tree are defined as a hierarchy of classes derived from
44 // TIntermNode. Each is a node in a tree. There is no preset branching factor;
45 // each node can have it's own type of list of children.
46 //
47
48 #ifndef __INTERMEDIATE_H
49 #define __INTERMEDIATE_H
50
51 #include "Common.h"
52 #include "Types.h"
53 #include "ConstantUnion.h"
54
55 namespace glslang {
56
57 class TIntermediate;
58
59 //
60 // Operators used by the high-level (parse tree) representation.
61 //
62 enum TOperator {
63 EOpNull, // if in a node, should only mean a node is still being built
64 EOpSequence, // denotes a list of statements, or parameters, etc.
65 EOpScope, // Used by debugging to denote a scoped list of statements
66 EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
67 EOpFunctionCall,
68 EOpFunction, // For function definition
69 EOpParameters, // an aggregate listing the parameters to a function
70 EOpSpirvInst,
71
72 //
73 // Unary operators
74 //
75
76 EOpNegative,
77 EOpLogicalNot,
78 EOpVectorLogicalNot,
79 EOpBitwiseNot,
80
81 EOpPostIncrement,
82 EOpPostDecrement,
83 EOpPreIncrement,
84 EOpPreDecrement,
85
86 EOpCopyObject,
87
88 EOpDeclare, // Used by debugging to force declaration of variable in correct scope
89
90 // Operator used to represent all conversions between int, float, and bool.
91 // The specific types are inferred from TBasicType.
92 EOpConvNumeric,
93
94 // uint64_t <-> pointer
95 EOpConvUint64ToPtr,
96 EOpConvPtrToUint64,
97
98 // uvec2 <-> pointer
99 EOpConvUvec2ToPtr,
100 EOpConvPtrToUvec2,
101
102 // uint64_t -> accelerationStructureEXT
103 EOpConvUint64ToAccStruct,
104
105 // uvec2 -> accelerationStructureEXT
106 EOpConvUvec2ToAccStruct,
107
108 //
109 // binary operations
110 //
111
112 EOpAdd,
113 EOpSub,
114 EOpMul,
115 EOpDiv,
116 EOpMod,
117 EOpRightShift,
118 EOpLeftShift,
119 EOpAnd,
120 EOpInclusiveOr,
121 EOpExclusiveOr,
122 EOpEqual,
123 EOpNotEqual,
124 EOpVectorEqual,
125 EOpVectorNotEqual,
126 EOpLessThan,
127 EOpGreaterThan,
128 EOpLessThanEqual,
129 EOpGreaterThanEqual,
130 EOpComma,
131
132 EOpVectorTimesScalar,
133 EOpVectorTimesMatrix,
134 EOpMatrixTimesVector,
135 EOpMatrixTimesScalar,
136
137 EOpLogicalOr,
138 EOpLogicalXor,
139 EOpLogicalAnd,
140
141 EOpIndexDirect,
142 EOpIndexIndirect,
143 EOpIndexDirectStruct,
144
145 EOpVectorSwizzle,
146
147 EOpMethod,
148 EOpScoping,
149
150 //
151 // Built-in functions mapped to operators
152 //
153
154 EOpRadians,
155 EOpDegrees,
156 EOpSin,
157 EOpCos,
158 EOpTan,
159 EOpAsin,
160 EOpAcos,
161 EOpAtan,
162 EOpSinh,
163 EOpCosh,
164 EOpTanh,
165 EOpAsinh,
166 EOpAcosh,
167 EOpAtanh,
168
169 EOpPow,
170 EOpExp,
171 EOpLog,
172 EOpExp2,
173 EOpLog2,
174 EOpSqrt,
175 EOpInverseSqrt,
176
177 EOpAbs,
178 EOpSign,
179 EOpFloor,
180 EOpTrunc,
181 EOpRound,
182 EOpRoundEven,
183 EOpCeil,
184 EOpFract,
185 EOpModf,
186 EOpMin,
187 EOpMax,
188 EOpClamp,
189 EOpMix,
190 EOpStep,
191 EOpSmoothStep,
192
193 EOpIsNan,
194 EOpIsInf,
195
196 EOpFma,
197
198 EOpFrexp,
199 EOpLdexp,
200
201 EOpFloatBitsToInt,
202 EOpFloatBitsToUint,
203 EOpIntBitsToFloat,
204 EOpUintBitsToFloat,
205 EOpDoubleBitsToInt64,
206 EOpDoubleBitsToUint64,
207 EOpInt64BitsToDouble,
208 EOpUint64BitsToDouble,
209 EOpFloat16BitsToInt16,
210 EOpFloat16BitsToUint16,
211 EOpInt16BitsToFloat16,
212 EOpUint16BitsToFloat16,
213 EOpPackSnorm2x16,
214 EOpUnpackSnorm2x16,
215 EOpPackUnorm2x16,
216 EOpUnpackUnorm2x16,
217 EOpPackSnorm4x8,
218 EOpUnpackSnorm4x8,
219 EOpPackUnorm4x8,
220 EOpUnpackUnorm4x8,
221 EOpPackHalf2x16,
222 EOpUnpackHalf2x16,
223 EOpPackDouble2x32,
224 EOpUnpackDouble2x32,
225 EOpPackInt2x32,
226 EOpUnpackInt2x32,
227 EOpPackUint2x32,
228 EOpUnpackUint2x32,
229 EOpPackFloat2x16,
230 EOpUnpackFloat2x16,
231 EOpPackInt2x16,
232 EOpUnpackInt2x16,
233 EOpPackUint2x16,
234 EOpUnpackUint2x16,
235 EOpPackInt4x16,
236 EOpUnpackInt4x16,
237 EOpPackUint4x16,
238 EOpUnpackUint4x16,
239 EOpPack16,
240 EOpPack32,
241 EOpPack64,
242 EOpUnpack32,
243 EOpUnpack16,
244 EOpUnpack8,
245
246 EOpLength,
247 EOpDistance,
248 EOpDot,
249 EOpCross,
250 EOpNormalize,
251 EOpFaceForward,
252 EOpReflect,
253 EOpRefract,
254
255 EOpMin3,
256 EOpMax3,
257 EOpMid3,
258
259 EOpDPdx, // Fragment only
260 EOpDPdy, // Fragment only
261 EOpFwidth, // Fragment only
262 EOpDPdxFine, // Fragment only
263 EOpDPdyFine, // Fragment only
264 EOpFwidthFine, // Fragment only
265 EOpDPdxCoarse, // Fragment only
266 EOpDPdyCoarse, // Fragment only
267 EOpFwidthCoarse, // Fragment only
268
269 EOpInterpolateAtCentroid, // Fragment only
270 EOpInterpolateAtSample, // Fragment only
271 EOpInterpolateAtOffset, // Fragment only
272 EOpInterpolateAtVertex,
273
274 EOpMatrixTimesMatrix,
275 EOpOuterProduct,
276 EOpDeterminant,
277 EOpMatrixInverse,
278 EOpTranspose,
279
280 EOpFtransform,
281
282 EOpNoise,
283
284 EOpEmitVertex, // geometry only
285 EOpEndPrimitive, // geometry only
286 EOpEmitStreamVertex, // geometry only
287 EOpEndStreamPrimitive, // geometry only
288
289 EOpBarrier,
290 EOpMemoryBarrier,
291 EOpMemoryBarrierAtomicCounter,
292 EOpMemoryBarrierBuffer,
293 EOpMemoryBarrierImage,
294 EOpMemoryBarrierShared, // compute only
295 EOpGroupMemoryBarrier, // compute only
296
297 EOpBallot,
298 EOpReadInvocation,
299 EOpReadFirstInvocation,
300
301 EOpAnyInvocation,
302 EOpAllInvocations,
303 EOpAllInvocationsEqual,
304
305 EOpSubgroupGuardStart,
306 EOpSubgroupBarrier,
307 EOpSubgroupMemoryBarrier,
308 EOpSubgroupMemoryBarrierBuffer,
309 EOpSubgroupMemoryBarrierImage,
310 EOpSubgroupMemoryBarrierShared, // compute only
311 EOpSubgroupElect,
312 EOpSubgroupAll,
313 EOpSubgroupAny,
314 EOpSubgroupAllEqual,
315 EOpSubgroupBroadcast,
316 EOpSubgroupBroadcastFirst,
317 EOpSubgroupBallot,
318 EOpSubgroupInverseBallot,
319 EOpSubgroupBallotBitExtract,
320 EOpSubgroupBallotBitCount,
321 EOpSubgroupBallotInclusiveBitCount,
322 EOpSubgroupBallotExclusiveBitCount,
323 EOpSubgroupBallotFindLSB,
324 EOpSubgroupBallotFindMSB,
325 EOpSubgroupShuffle,
326 EOpSubgroupShuffleXor,
327 EOpSubgroupShuffleUp,
328 EOpSubgroupShuffleDown,
329 EOpSubgroupRotate,
330 EOpSubgroupClusteredRotate,
331 EOpSubgroupAdd,
332 EOpSubgroupMul,
333 EOpSubgroupMin,
334 EOpSubgroupMax,
335 EOpSubgroupAnd,
336 EOpSubgroupOr,
337 EOpSubgroupXor,
338 EOpSubgroupInclusiveAdd,
339 EOpSubgroupInclusiveMul,
340 EOpSubgroupInclusiveMin,
341 EOpSubgroupInclusiveMax,
342 EOpSubgroupInclusiveAnd,
343 EOpSubgroupInclusiveOr,
344 EOpSubgroupInclusiveXor,
345 EOpSubgroupExclusiveAdd,
346 EOpSubgroupExclusiveMul,
347 EOpSubgroupExclusiveMin,
348 EOpSubgroupExclusiveMax,
349 EOpSubgroupExclusiveAnd,
350 EOpSubgroupExclusiveOr,
351 EOpSubgroupExclusiveXor,
352 EOpSubgroupClusteredAdd,
353 EOpSubgroupClusteredMul,
354 EOpSubgroupClusteredMin,
355 EOpSubgroupClusteredMax,
356 EOpSubgroupClusteredAnd,
357 EOpSubgroupClusteredOr,
358 EOpSubgroupClusteredXor,
359 EOpSubgroupQuadBroadcast,
360 EOpSubgroupQuadSwapHorizontal,
361 EOpSubgroupQuadSwapVertical,
362 EOpSubgroupQuadSwapDiagonal,
363 EOpSubgroupQuadAll,
364 EOpSubgroupQuadAny,
365
366 EOpSubgroupPartition,
367 EOpSubgroupPartitionedAdd,
368 EOpSubgroupPartitionedMul,
369 EOpSubgroupPartitionedMin,
370 EOpSubgroupPartitionedMax,
371 EOpSubgroupPartitionedAnd,
372 EOpSubgroupPartitionedOr,
373 EOpSubgroupPartitionedXor,
374 EOpSubgroupPartitionedInclusiveAdd,
375 EOpSubgroupPartitionedInclusiveMul,
376 EOpSubgroupPartitionedInclusiveMin,
377 EOpSubgroupPartitionedInclusiveMax,
378 EOpSubgroupPartitionedInclusiveAnd,
379 EOpSubgroupPartitionedInclusiveOr,
380 EOpSubgroupPartitionedInclusiveXor,
381 EOpSubgroupPartitionedExclusiveAdd,
382 EOpSubgroupPartitionedExclusiveMul,
383 EOpSubgroupPartitionedExclusiveMin,
384 EOpSubgroupPartitionedExclusiveMax,
385 EOpSubgroupPartitionedExclusiveAnd,
386 EOpSubgroupPartitionedExclusiveOr,
387 EOpSubgroupPartitionedExclusiveXor,
388
389 EOpSubgroupGuardStop,
390
391 EOpMinInvocations,
392 EOpMaxInvocations,
393 EOpAddInvocations,
394 EOpMinInvocationsNonUniform,
395 EOpMaxInvocationsNonUniform,
396 EOpAddInvocationsNonUniform,
397 EOpMinInvocationsInclusiveScan,
398 EOpMaxInvocationsInclusiveScan,
399 EOpAddInvocationsInclusiveScan,
400 EOpMinInvocationsInclusiveScanNonUniform,
401 EOpMaxInvocationsInclusiveScanNonUniform,
402 EOpAddInvocationsInclusiveScanNonUniform,
403 EOpMinInvocationsExclusiveScan,
404 EOpMaxInvocationsExclusiveScan,
405 EOpAddInvocationsExclusiveScan,
406 EOpMinInvocationsExclusiveScanNonUniform,
407 EOpMaxInvocationsExclusiveScanNonUniform,
408 EOpAddInvocationsExclusiveScanNonUniform,
409 EOpSwizzleInvocations,
410 EOpSwizzleInvocationsMasked,
411 EOpWriteInvocation,
412 EOpMbcnt,
413
414 EOpCubeFaceIndex,
415 EOpCubeFaceCoord,
416 EOpTime,
417
418 EOpAtomicAdd,
419 EOpAtomicSubtract,
420 EOpAtomicMin,
421 EOpAtomicMax,
422 EOpAtomicAnd,
423 EOpAtomicOr,
424 EOpAtomicXor,
425 EOpAtomicExchange,
426 EOpAtomicCompSwap,
427 EOpAtomicLoad,
428 EOpAtomicStore,
429
430 EOpAtomicCounterIncrement, // results in pre-increment value
431 EOpAtomicCounterDecrement, // results in post-decrement value
432 EOpAtomicCounter,
433 EOpAtomicCounterAdd,
434 EOpAtomicCounterSubtract,
435 EOpAtomicCounterMin,
436 EOpAtomicCounterMax,
437 EOpAtomicCounterAnd,
438 EOpAtomicCounterOr,
439 EOpAtomicCounterXor,
440 EOpAtomicCounterExchange,
441 EOpAtomicCounterCompSwap,
442
443 EOpAny,
444 EOpAll,
445
446 EOpCooperativeMatrixLoad,
447 EOpCooperativeMatrixStore,
448 EOpCooperativeMatrixMulAdd,
449 EOpCooperativeMatrixLoadNV,
450 EOpCooperativeMatrixStoreNV,
451 EOpCooperativeMatrixLoadTensorNV,
452 EOpCooperativeMatrixStoreTensorNV,
453 EOpCooperativeMatrixMulAddNV,
454 EOpCooperativeMatrixReduceNV,
455 EOpCooperativeMatrixPerElementOpNV,
456 EOpCooperativeMatrixTransposeNV,
457
458 EOpCreateTensorLayoutNV,
459 EOpTensorLayoutSetBlockSizeNV,
460 EOpTensorLayoutSetDimensionNV,
461 EOpTensorLayoutSetStrideNV,
462 EOpTensorLayoutSliceNV,
463 EOpTensorLayoutSetClampValueNV,
464
465 EOpCreateTensorViewNV,
466 EOpTensorViewSetDimensionNV,
467 EOpTensorViewSetStrideNV,
468 EOpTensorViewSetClipNV,
469
470 EOpBeginInvocationInterlock, // Fragment only
471 EOpEndInvocationInterlock, // Fragment only
472
473 EOpIsHelperInvocation,
474
475 EOpDebugPrintf,
476
477 //
478 // Branch
479 //
480
481 EOpKill, // Fragment only
482 EOpTerminateInvocation, // Fragment only
483 EOpDemote, // Fragment only
484 EOpTerminateRayKHR, // Any-hit only
485 EOpIgnoreIntersectionKHR, // Any-hit only
486 EOpReturn,
487 EOpBreak,
488 EOpContinue,
489 EOpCase,
490 EOpDefault,
491
492 //
493 // Constructors
494 //
495
496 EOpConstructGuardStart,
497 EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
498 EOpConstructUint,
499 EOpConstructInt8,
500 EOpConstructUint8,
501 EOpConstructInt16,
502 EOpConstructUint16,
503 EOpConstructInt64,
504 EOpConstructUint64,
505 EOpConstructBool,
506 EOpConstructFloat,
507 EOpConstructDouble,
508 // Keep vector and matrix constructors in a consistent relative order for
509 // TParseContext::constructBuiltIn, which converts between 8/16/32 bit
510 // vector constructors
511 EOpConstructVec2,
512 EOpConstructVec3,
513 EOpConstructVec4,
514 EOpConstructMat2x2,
515 EOpConstructMat2x3,
516 EOpConstructMat2x4,
517 EOpConstructMat3x2,
518 EOpConstructMat3x3,
519 EOpConstructMat3x4,
520 EOpConstructMat4x2,
521 EOpConstructMat4x3,
522 EOpConstructMat4x4,
523 EOpConstructDVec2,
524 EOpConstructDVec3,
525 EOpConstructDVec4,
526 EOpConstructBVec2,
527 EOpConstructBVec3,
528 EOpConstructBVec4,
529 EOpConstructI8Vec2,
530 EOpConstructI8Vec3,
531 EOpConstructI8Vec4,
532 EOpConstructU8Vec2,
533 EOpConstructU8Vec3,
534 EOpConstructU8Vec4,
535 EOpConstructI16Vec2,
536 EOpConstructI16Vec3,
537 EOpConstructI16Vec4,
538 EOpConstructU16Vec2,
539 EOpConstructU16Vec3,
540 EOpConstructU16Vec4,
541 EOpConstructIVec2,
542 EOpConstructIVec3,
543 EOpConstructIVec4,
544 EOpConstructUVec2,
545 EOpConstructUVec3,
546 EOpConstructUVec4,
547 EOpConstructI64Vec2,
548 EOpConstructI64Vec3,
549 EOpConstructI64Vec4,
550 EOpConstructU64Vec2,
551 EOpConstructU64Vec3,
552 EOpConstructU64Vec4,
553 EOpConstructDMat2x2,
554 EOpConstructDMat2x3,
555 EOpConstructDMat2x4,
556 EOpConstructDMat3x2,
557 EOpConstructDMat3x3,
558 EOpConstructDMat3x4,
559 EOpConstructDMat4x2,
560 EOpConstructDMat4x3,
561 EOpConstructDMat4x4,
562 EOpConstructIMat2x2,
563 EOpConstructIMat2x3,
564 EOpConstructIMat2x4,
565 EOpConstructIMat3x2,
566 EOpConstructIMat3x3,
567 EOpConstructIMat3x4,
568 EOpConstructIMat4x2,
569 EOpConstructIMat4x3,
570 EOpConstructIMat4x4,
571 EOpConstructUMat2x2,
572 EOpConstructUMat2x3,
573 EOpConstructUMat2x4,
574 EOpConstructUMat3x2,
575 EOpConstructUMat3x3,
576 EOpConstructUMat3x4,
577 EOpConstructUMat4x2,
578 EOpConstructUMat4x3,
579 EOpConstructUMat4x4,
580 EOpConstructBMat2x2,
581 EOpConstructBMat2x3,
582 EOpConstructBMat2x4,
583 EOpConstructBMat3x2,
584 EOpConstructBMat3x3,
585 EOpConstructBMat3x4,
586 EOpConstructBMat4x2,
587 EOpConstructBMat4x3,
588 EOpConstructBMat4x4,
589 EOpConstructFloat16,
590 EOpConstructF16Vec2,
591 EOpConstructF16Vec3,
592 EOpConstructF16Vec4,
593 EOpConstructF16Mat2x2,
594 EOpConstructF16Mat2x3,
595 EOpConstructF16Mat2x4,
596 EOpConstructF16Mat3x2,
597 EOpConstructF16Mat3x3,
598 EOpConstructF16Mat3x4,
599 EOpConstructF16Mat4x2,
600 EOpConstructF16Mat4x3,
601 EOpConstructF16Mat4x4,
602 EOpConstructStruct,
603 EOpConstructTextureSampler,
604 EOpConstructNonuniform, // expected to be transformed away, not present in final AST
605 EOpConstructReference,
606 EOpConstructCooperativeMatrixNV,
607 EOpConstructCooperativeMatrixKHR,
608 EOpConstructAccStruct,
609 EOpConstructGuardEnd,
610
611 //
612 // moves
613 //
614
615 EOpAssign,
616 EOpAddAssign,
617 EOpSubAssign,
618 EOpMulAssign,
619 EOpVectorTimesMatrixAssign,
620 EOpVectorTimesScalarAssign,
621 EOpMatrixTimesScalarAssign,
622 EOpMatrixTimesMatrixAssign,
623 EOpDivAssign,
624 EOpModAssign,
625 EOpAndAssign,
626 EOpInclusiveOrAssign,
627 EOpExclusiveOrAssign,
628 EOpLeftShiftAssign,
629 EOpRightShiftAssign,
630
631 //
632 // Array operators
633 //
634
635 // Can apply to arrays, vectors, or matrices.
636 // Can be decomposed to a constant at compile time, but this does not always happen,
637 // due to link-time effects. So, consumer can expect either a link-time sized or
638 // run-time sized array.
639 EOpArrayLength,
640
641 //
642 // Image operations
643 //
644
645 EOpImageGuardBegin,
646
647 EOpImageQuerySize,
648 EOpImageQuerySamples,
649 EOpImageLoad,
650 EOpImageStore,
651 EOpImageLoadLod,
652 EOpImageStoreLod,
653 EOpImageAtomicAdd,
654 EOpImageAtomicMin,
655 EOpImageAtomicMax,
656 EOpImageAtomicAnd,
657 EOpImageAtomicOr,
658 EOpImageAtomicXor,
659 EOpImageAtomicExchange,
660 EOpImageAtomicCompSwap,
661 EOpImageAtomicLoad,
662 EOpImageAtomicStore,
663
664 EOpSubpassLoad,
665 EOpSubpassLoadMS,
666 EOpSparseImageLoad,
667 EOpSparseImageLoadLod,
668 EOpColorAttachmentReadEXT, // Fragment only
669
670 EOpImageGuardEnd,
671
672 //
673 // Texture operations
674 //
675
676 EOpTextureGuardBegin,
677
678 EOpTextureQuerySize,
679 EOpTextureQueryLod,
680 EOpTextureQueryLevels,
681 EOpTextureQuerySamples,
682
683 EOpSamplingGuardBegin,
684
685 EOpTexture,
686 EOpTextureProj,
687 EOpTextureLod,
688 EOpTextureOffset,
689 EOpTextureFetch,
690 EOpTextureFetchOffset,
691 EOpTextureProjOffset,
692 EOpTextureLodOffset,
693 EOpTextureProjLod,
694 EOpTextureProjLodOffset,
695 EOpTextureGrad,
696 EOpTextureGradOffset,
697 EOpTextureProjGrad,
698 EOpTextureProjGradOffset,
699 EOpTextureGather,
700 EOpTextureGatherOffset,
701 EOpTextureGatherOffsets,
702 EOpTextureClamp,
703 EOpTextureOffsetClamp,
704 EOpTextureGradClamp,
705 EOpTextureGradOffsetClamp,
706 EOpTextureGatherLod,
707 EOpTextureGatherLodOffset,
708 EOpTextureGatherLodOffsets,
709 EOpFragmentMaskFetch,
710 EOpFragmentFetch,
711
712 EOpSparseTextureGuardBegin,
713
714 EOpSparseTexture,
715 EOpSparseTextureLod,
716 EOpSparseTextureOffset,
717 EOpSparseTextureFetch,
718 EOpSparseTextureFetchOffset,
719 EOpSparseTextureLodOffset,
720 EOpSparseTextureGrad,
721 EOpSparseTextureGradOffset,
722 EOpSparseTextureGather,
723 EOpSparseTextureGatherOffset,
724 EOpSparseTextureGatherOffsets,
725 EOpSparseTexelsResident,
726 EOpSparseTextureClamp,
727 EOpSparseTextureOffsetClamp,
728 EOpSparseTextureGradClamp,
729 EOpSparseTextureGradOffsetClamp,
730 EOpSparseTextureGatherLod,
731 EOpSparseTextureGatherLodOffset,
732 EOpSparseTextureGatherLodOffsets,
733
734 EOpSparseTextureGuardEnd,
735
736 EOpImageFootprintGuardBegin,
737 EOpImageSampleFootprintNV,
738 EOpImageSampleFootprintClampNV,
739 EOpImageSampleFootprintLodNV,
740 EOpImageSampleFootprintGradNV,
741 EOpImageSampleFootprintGradClampNV,
742 EOpImageFootprintGuardEnd,
743 EOpSamplingGuardEnd,
744 EOpTextureGuardEnd,
745
746 //
747 // Integer operations
748 //
749
750 EOpAddCarry,
751 EOpSubBorrow,
752 EOpUMulExtended,
753 EOpIMulExtended,
754 EOpBitfieldExtract,
755 EOpBitfieldInsert,
756 EOpBitFieldReverse,
757 EOpBitCount,
758 EOpFindLSB,
759 EOpFindMSB,
760
761 EOpCountLeadingZeros,
762 EOpCountTrailingZeros,
763 EOpAbsDifference,
764 EOpAddSaturate,
765 EOpSubSaturate,
766 EOpAverage,
767 EOpAverageRounded,
768 EOpMul32x16,
769
770 EOpTraceNV,
771 EOpTraceRayMotionNV,
772 EOpTraceKHR,
773 EOpReportIntersection,
774 EOpIgnoreIntersectionNV,
775 EOpTerminateRayNV,
776 EOpExecuteCallableNV,
777 EOpExecuteCallableKHR,
778 EOpWritePackedPrimitiveIndices4x8NV,
779 EOpEmitMeshTasksEXT,
780 EOpSetMeshOutputsEXT,
781
782 //
783 // GL_EXT_ray_query operations
784 //
785
786 EOpRayQueryInitialize,
787 EOpRayQueryTerminate,
788 EOpRayQueryGenerateIntersection,
789 EOpRayQueryConfirmIntersection,
790 EOpRayQueryProceed,
791 EOpRayQueryGetIntersectionType,
792 EOpRayQueryGetRayTMin,
793 EOpRayQueryGetRayFlags,
794 EOpRayQueryGetIntersectionT,
795 EOpRayQueryGetIntersectionInstanceCustomIndex,
796 EOpRayQueryGetIntersectionInstanceId,
797 EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset,
798 EOpRayQueryGetIntersectionGeometryIndex,
799 EOpRayQueryGetIntersectionPrimitiveIndex,
800 EOpRayQueryGetIntersectionBarycentrics,
801 EOpRayQueryGetIntersectionFrontFace,
802 EOpRayQueryGetIntersectionCandidateAABBOpaque,
803 EOpRayQueryGetIntersectionObjectRayDirection,
804 EOpRayQueryGetIntersectionObjectRayOrigin,
805 EOpRayQueryGetWorldRayDirection,
806 EOpRayQueryGetWorldRayOrigin,
807 EOpRayQueryGetIntersectionObjectToWorld,
808 EOpRayQueryGetIntersectionWorldToObject,
809
810 //
811 // GL_NV_shader_invocation_reorder
812 //
813
814 EOpHitObjectTraceRayNV,
815 EOpHitObjectTraceRayMotionNV,
816 EOpHitObjectRecordHitNV,
817 EOpHitObjectRecordHitMotionNV,
818 EOpHitObjectRecordHitWithIndexNV,
819 EOpHitObjectRecordHitWithIndexMotionNV,
820 EOpHitObjectRecordMissNV,
821 EOpHitObjectRecordMissMotionNV,
822 EOpHitObjectRecordEmptyNV,
823 EOpHitObjectExecuteShaderNV,
824 EOpHitObjectIsEmptyNV,
825 EOpHitObjectIsMissNV,
826 EOpHitObjectIsHitNV,
827 EOpHitObjectGetRayTMinNV,
828 EOpHitObjectGetRayTMaxNV,
829 EOpHitObjectGetObjectRayOriginNV,
830 EOpHitObjectGetObjectRayDirectionNV,
831 EOpHitObjectGetWorldRayOriginNV,
832 EOpHitObjectGetWorldRayDirectionNV,
833 EOpHitObjectGetWorldToObjectNV,
834 EOpHitObjectGetObjectToWorldNV,
835 EOpHitObjectGetInstanceCustomIndexNV,
836 EOpHitObjectGetInstanceIdNV,
837 EOpHitObjectGetGeometryIndexNV,
838 EOpHitObjectGetPrimitiveIndexNV,
839 EOpHitObjectGetHitKindNV,
840 EOpHitObjectGetShaderBindingTableRecordIndexNV,
841 EOpHitObjectGetShaderRecordBufferHandleNV,
842 EOpHitObjectGetAttributesNV,
843 EOpHitObjectGetCurrentTimeNV,
844 EOpReorderThreadNV,
845 EOpFetchMicroTriangleVertexPositionNV,
846 EOpFetchMicroTriangleVertexBarycentricNV,
847
848 // HLSL operations
849 //
850
851 EOpClip, // discard if input value < 0
852 EOpIsFinite,
853 EOpLog10, // base 10 log
854 EOpRcp, // 1/x
855 EOpSaturate, // clamp from 0 to 1
856 EOpSinCos, // sin and cos in out parameters
857 EOpGenMul, // mul(x,y) on any of mat/vec/scalars
858 EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
859 EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
860 EOpInterlockedAnd, // ...
861 EOpInterlockedCompareExchange, // ...
862 EOpInterlockedCompareStore, // ...
863 EOpInterlockedExchange, // ...
864 EOpInterlockedMax, // ...
865 EOpInterlockedMin, // ...
866 EOpInterlockedOr, // ...
867 EOpInterlockedXor, // ...
868 EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
869 EOpDeviceMemoryBarrier, // ...
870 EOpDeviceMemoryBarrierWithGroupSync, // ...
871 EOpWorkgroupMemoryBarrier, // ...
872 EOpWorkgroupMemoryBarrierWithGroupSync, // ...
873 EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
874 EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
875 EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
876 EOpLit, // HLSL lighting coefficient vector
877 EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
878 EOpAsDouble, // slightly different from EOpUint64BitsToDouble
879 EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
880
881 EOpMethodSample, // Texture object methods. These are translated to existing
882 EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
883 EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
884 EOpMethodSampleCmpLevelZero, // ...
885 EOpMethodSampleGrad, // ...
886 EOpMethodSampleLevel, // ...
887 EOpMethodLoad, // ...
888 EOpMethodGetDimensions, // ...
889 EOpMethodGetSamplePosition, // ...
890 EOpMethodGather, // ...
891 EOpMethodCalculateLevelOfDetail, // ...
892 EOpMethodCalculateLevelOfDetailUnclamped, // ...
893
894 // Load already defined above for textures
895 EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
896 EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
897 EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
898 EOpMethodStore, // ...
899 EOpMethodStore2, // ...
900 EOpMethodStore3, // ...
901 EOpMethodStore4, // ...
902 EOpMethodIncrementCounter, // ...
903 EOpMethodDecrementCounter, // ...
904 // EOpMethodAppend is defined for geo shaders below
905 EOpMethodConsume,
906
907 // SM5 texture methods
908 EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
909 EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
910 EOpMethodGatherBlue, // because HLSL arguments are slightly different.
911 EOpMethodGatherAlpha, // ...
912 EOpMethodGatherCmp, // ...
913 EOpMethodGatherCmpRed, // ...
914 EOpMethodGatherCmpGreen, // ...
915 EOpMethodGatherCmpBlue, // ...
916 EOpMethodGatherCmpAlpha, // ...
917
918 // geometry methods
919 EOpMethodAppend, // Geometry shader methods
920 EOpMethodRestartStrip, // ...
921
922 // matrix
923 EOpMatrixSwizzle, // select multiple matrix components (non-column)
924
925 // SM6 wave ops
926 EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
927 EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
928 EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
929 EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
930
931 // GL_EXT_expect_assume
932 EOpAssumeEXT,
933 EOpExpectEXT,
934
935 // Shader Clock Ops
936 EOpReadClockSubgroupKHR,
937 EOpReadClockDeviceKHR,
938
939 // GL_EXT_ray_tracing_position_fetch
940 EOpRayQueryGetIntersectionTriangleVertexPositionsEXT,
941
942 // Shader tile image ops
943 EOpStencilAttachmentReadEXT, // Fragment only
944 EOpDepthAttachmentReadEXT, // Fragment only
945
946 // Image processing
947 EOpImageSampleWeightedQCOM,
948 EOpImageBoxFilterQCOM,
949 EOpImageBlockMatchSADQCOM,
950 EOpImageBlockMatchSSDQCOM,
951
952 // Image processing2
953 EOpImageBlockMatchWindowSSDQCOM,
954 EOpImageBlockMatchWindowSADQCOM,
955 EOpImageBlockMatchGatherSSDQCOM,
956 EOpImageBlockMatchGatherSADQCOM,
957 };
958
IsOpNumericConv(const TOperator op)959 inline bool IsOpNumericConv(const TOperator op) {
960 return op == EOpConvNumeric;
961 }
962
963 enum TLinkType {
964 ELinkNone,
965 ELinkExport,
966 };
967
968 class TIntermTraverser;
969 class TIntermOperator;
970 class TIntermAggregate;
971 class TIntermUnary;
972 class TIntermBinary;
973 class TIntermConstantUnion;
974 class TIntermSelection;
975 class TIntermSwitch;
976 class TIntermBranch;
977 class TIntermTyped;
978 class TIntermMethod;
979 class TIntermSymbol;
980 class TIntermLoop;
981
982 } // end namespace glslang
983
984 //
985 // Base class for the tree nodes
986 //
987 // (Put outside the glslang namespace, as it's used as part of the external interface.)
988 //
989 class TIntermNode {
990 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())991 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
992
993 TIntermNode() { loc.init(); }
getLoc()994 virtual const glslang::TSourceLoc& getLoc() const { return loc; }
setLoc(const glslang::TSourceLoc & l)995 virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
996 virtual void traverse(glslang::TIntermTraverser*) = 0;
getAsTyped()997 virtual glslang::TIntermTyped* getAsTyped() { return nullptr; }
getAsOperator()998 virtual glslang::TIntermOperator* getAsOperator() { return nullptr; }
getAsConstantUnion()999 virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return nullptr; }
getAsAggregate()1000 virtual glslang::TIntermAggregate* getAsAggregate() { return nullptr; }
getAsUnaryNode()1001 virtual glslang::TIntermUnary* getAsUnaryNode() { return nullptr; }
getAsBinaryNode()1002 virtual glslang::TIntermBinary* getAsBinaryNode() { return nullptr; }
getAsSelectionNode()1003 virtual glslang::TIntermSelection* getAsSelectionNode() { return nullptr; }
getAsSwitchNode()1004 virtual glslang::TIntermSwitch* getAsSwitchNode() { return nullptr; }
getAsMethodNode()1005 virtual glslang::TIntermMethod* getAsMethodNode() { return nullptr; }
getAsSymbolNode()1006 virtual glslang::TIntermSymbol* getAsSymbolNode() { return nullptr; }
getAsBranchNode()1007 virtual glslang::TIntermBranch* getAsBranchNode() { return nullptr; }
getAsLoopNode()1008 virtual glslang::TIntermLoop* getAsLoopNode() { return nullptr; }
1009
getAsTyped()1010 virtual const glslang::TIntermTyped* getAsTyped() const { return nullptr; }
getAsOperator()1011 virtual const glslang::TIntermOperator* getAsOperator() const { return nullptr; }
getAsConstantUnion()1012 virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return nullptr; }
getAsAggregate()1013 virtual const glslang::TIntermAggregate* getAsAggregate() const { return nullptr; }
getAsUnaryNode()1014 virtual const glslang::TIntermUnary* getAsUnaryNode() const { return nullptr; }
getAsBinaryNode()1015 virtual const glslang::TIntermBinary* getAsBinaryNode() const { return nullptr; }
getAsSelectionNode()1016 virtual const glslang::TIntermSelection* getAsSelectionNode() const { return nullptr; }
getAsSwitchNode()1017 virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return nullptr; }
getAsMethodNode()1018 virtual const glslang::TIntermMethod* getAsMethodNode() const { return nullptr; }
getAsSymbolNode()1019 virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return nullptr; }
getAsBranchNode()1020 virtual const glslang::TIntermBranch* getAsBranchNode() const { return nullptr; }
getAsLoopNode()1021 virtual const glslang::TIntermLoop* getAsLoopNode() const { return nullptr; }
~TIntermNode()1022 virtual ~TIntermNode() { }
1023
1024 protected:
1025 TIntermNode(const TIntermNode&);
1026 TIntermNode& operator=(const TIntermNode&);
1027 glslang::TSourceLoc loc;
1028 };
1029
1030 namespace glslang {
1031
1032 //
1033 // This is just to help yacc.
1034 //
1035 struct TIntermNodePair {
1036 TIntermNode* node1;
1037 TIntermNode* node2;
1038 };
1039
1040 //
1041 // Intermediate class for nodes that have a type.
1042 //
1043 class TIntermTyped : public TIntermNode {
1044 public:
TIntermTyped(const TType & t)1045 TIntermTyped(const TType& t) { type.shallowCopy(t); }
TIntermTyped(TBasicType basicType)1046 TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
getAsTyped()1047 virtual TIntermTyped* getAsTyped() { return this; }
getAsTyped()1048 virtual const TIntermTyped* getAsTyped() const { return this; }
setType(const TType & t)1049 virtual void setType(const TType& t) { type.shallowCopy(t); }
getType()1050 virtual const TType& getType() const { return type; }
getWritableType()1051 virtual TType& getWritableType() { return type; }
1052
getBasicType()1053 virtual TBasicType getBasicType() const { return type.getBasicType(); }
getQualifier()1054 virtual TQualifier& getQualifier() { return type.getQualifier(); }
getQualifier()1055 virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
getArraySizes()1056 virtual TArraySizes* getArraySizes() { return type.getArraySizes(); }
getArraySizes()1057 virtual const TArraySizes* getArraySizes() const { return type.getArraySizes(); }
1058 virtual void propagatePrecision(TPrecisionQualifier);
getVectorSize()1059 virtual int getVectorSize() const { return type.getVectorSize(); }
getMatrixCols()1060 virtual int getMatrixCols() const { return type.getMatrixCols(); }
getMatrixRows()1061 virtual int getMatrixRows() const { return type.getMatrixRows(); }
isMatrix()1062 virtual bool isMatrix() const { return type.isMatrix(); }
isArray()1063 virtual bool isArray() const { return type.isArray(); }
isVector()1064 virtual bool isVector() const { return type.isVector(); }
isScalar()1065 virtual bool isScalar() const { return type.isScalar(); }
isStruct()1066 virtual bool isStruct() const { return type.isStruct(); }
isFloatingDomain()1067 virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
isIntegerDomain()1068 virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
isAtomic()1069 bool isAtomic() const { return type.isAtomic(); }
isReference()1070 bool isReference() const { return type.isReference(); }
1071 TString getCompleteString(bool enhanced = false) const { return type.getCompleteString(enhanced); }
1072
1073 protected:
1074 TIntermTyped& operator=(const TIntermTyped&);
1075 TType type;
1076 };
1077
1078 //
1079 // Handle for, do-while, and while loops.
1080 //
1081 class TIntermLoop : public TIntermNode {
1082 public:
TIntermLoop(TIntermNode * aBody,TIntermTyped * aTest,TIntermTyped * aTerminal,bool testFirst)1083 TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
1084 body(aBody),
1085 test(aTest),
1086 terminal(aTerminal),
1087 first(testFirst),
1088 unroll(false),
1089 dontUnroll(false),
1090 dependency(0),
1091 minIterations(0),
1092 maxIterations(iterationsInfinite),
1093 iterationMultiple(1),
1094 peelCount(0),
1095 partialCount(0)
1096 { }
1097
getAsLoopNode()1098 virtual TIntermLoop* getAsLoopNode() { return this; }
getAsLoopNode()1099 virtual const TIntermLoop* getAsLoopNode() const { return this; }
1100 virtual void traverse(TIntermTraverser*);
getBody()1101 TIntermNode* getBody() const { return body; }
getTest()1102 TIntermTyped* getTest() const { return test; }
getTerminal()1103 TIntermTyped* getTerminal() const { return terminal; }
testFirst()1104 bool testFirst() const { return first; }
1105
setUnroll()1106 void setUnroll() { unroll = true; }
setDontUnroll()1107 void setDontUnroll() {
1108 dontUnroll = true;
1109 peelCount = 0;
1110 partialCount = 0;
1111 }
getUnroll()1112 bool getUnroll() const { return unroll; }
getDontUnroll()1113 bool getDontUnroll() const { return dontUnroll; }
1114
1115 static const unsigned int dependencyInfinite = 0xFFFFFFFF;
1116 static const unsigned int iterationsInfinite = 0xFFFFFFFF;
setLoopDependency(int d)1117 void setLoopDependency(int d) { dependency = d; }
getLoopDependency()1118 int getLoopDependency() const { return dependency; }
1119
setMinIterations(unsigned int v)1120 void setMinIterations(unsigned int v) { minIterations = v; }
getMinIterations()1121 unsigned int getMinIterations() const { return minIterations; }
setMaxIterations(unsigned int v)1122 void setMaxIterations(unsigned int v) { maxIterations = v; }
getMaxIterations()1123 unsigned int getMaxIterations() const { return maxIterations; }
setIterationMultiple(unsigned int v)1124 void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
getIterationMultiple()1125 unsigned int getIterationMultiple() const { return iterationMultiple; }
setPeelCount(unsigned int v)1126 void setPeelCount(unsigned int v) {
1127 peelCount = v;
1128 dontUnroll = false;
1129 }
getPeelCount()1130 unsigned int getPeelCount() const { return peelCount; }
setPartialCount(unsigned int v)1131 void setPartialCount(unsigned int v) {
1132 partialCount = v;
1133 dontUnroll = false;
1134 }
getPartialCount()1135 unsigned int getPartialCount() const { return partialCount; }
1136
1137 protected:
1138 TIntermNode* body; // code to loop over
1139 TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
1140 TIntermTyped* terminal; // exists for for-loops
1141 bool first; // true for while and for, not for do-while
1142 bool unroll; // true if unroll requested
1143 bool dontUnroll; // true if request to not unroll
1144 unsigned int dependency; // loop dependency hint; 0 means not set or unknown
1145 unsigned int minIterations; // as per the SPIR-V specification
1146 unsigned int maxIterations; // as per the SPIR-V specification
1147 unsigned int iterationMultiple; // as per the SPIR-V specification
1148 unsigned int peelCount; // as per the SPIR-V specification
1149 unsigned int partialCount; // as per the SPIR-V specification
1150 };
1151
1152 //
1153 // Handle case, break, continue, return, and kill.
1154 //
1155 class TIntermBranch : public TIntermNode {
1156 public:
TIntermBranch(TOperator op,TIntermTyped * e)1157 TIntermBranch(TOperator op, TIntermTyped* e) :
1158 flowOp(op),
1159 expression(e) { }
getAsBranchNode()1160 virtual TIntermBranch* getAsBranchNode() { return this; }
getAsBranchNode()1161 virtual const TIntermBranch* getAsBranchNode() const { return this; }
1162 virtual void traverse(TIntermTraverser*);
getFlowOp()1163 TOperator getFlowOp() const { return flowOp; }
getExpression()1164 TIntermTyped* getExpression() const { return expression; }
setExpression(TIntermTyped * pExpression)1165 void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
1166 void updatePrecision(TPrecisionQualifier parentPrecision);
1167 protected:
1168 TOperator flowOp;
1169 TIntermTyped* expression;
1170 };
1171
1172 //
1173 // Represent method names before seeing their calling signature
1174 // or resolving them to operations. Just an expression as the base object
1175 // and a textural name.
1176 //
1177 class TIntermMethod : public TIntermTyped {
1178 public:
TIntermMethod(TIntermTyped * o,const TType & t,const TString & m)1179 TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
getAsMethodNode()1180 virtual TIntermMethod* getAsMethodNode() { return this; }
getAsMethodNode()1181 virtual const TIntermMethod* getAsMethodNode() const { return this; }
getMethodName()1182 virtual const TString& getMethodName() const { return method; }
getObject()1183 virtual TIntermTyped* getObject() const { return object; }
1184 virtual void traverse(TIntermTraverser*);
setExport()1185 void setExport() { linkType = ELinkExport; }
1186 protected:
1187 TIntermTyped* object;
1188 TString method;
1189 TLinkType linkType;
1190 };
1191
1192 //
1193 // Nodes that correspond to symbols or constants in the source code.
1194 //
1195 class TIntermSymbol : public TIntermTyped {
1196 public:
1197 // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
1198 // per process threadPoolAllocator, then it causes increased memory usage per compile
1199 // it is essential to use "symbol = sym" to assign to symbol
1200 TIntermSymbol(long long i, const TString& n, EShLanguage s, const TType& t, const TString* mn = nullptr)
TIntermTyped(t)1201 : TIntermTyped(t), id(i), flattenSubset(-1), stage(s), constSubtree(nullptr) {
1202 name = n;
1203 if (mn) {
1204 mangledName = *mn;
1205 } else {
1206 mangledName = n;
1207 }
1208 }
getId()1209 virtual long long getId() const { return id; }
changeId(long long i)1210 virtual void changeId(long long i) { id = i; }
getName()1211 virtual const TString& getName() const { return name; }
getMangledName()1212 virtual const TString& getMangledName() const { return mangledName; }
1213 virtual void traverse(TIntermTraverser*);
getAsSymbolNode()1214 virtual TIntermSymbol* getAsSymbolNode() { return this; }
getAsSymbolNode()1215 virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
setConstArray(const TConstUnionArray & c)1216 void setConstArray(const TConstUnionArray& c) { constArray = c; }
getConstArray()1217 const TConstUnionArray& getConstArray() const { return constArray; }
setConstSubtree(TIntermTyped * subtree)1218 void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
getConstSubtree()1219 TIntermTyped* getConstSubtree() const { return constSubtree; }
setFlattenSubset(int subset)1220 void setFlattenSubset(int subset) { flattenSubset = subset; }
1221 virtual const TString& getAccessName() const;
1222
getFlattenSubset()1223 int getFlattenSubset() const { return flattenSubset; } // -1 means full object
1224
1225 // This is meant for cases where a node has already been constructed, and
1226 // later on, it becomes necessary to switch to a different symbol.
switchId(long long newId)1227 virtual void switchId(long long newId) { id = newId; }
getStage()1228 EShLanguage getStage() const { return stage; }
1229
1230 protected:
1231 long long id; // the unique id of the symbol this node represents
1232 int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
1233 TString name; // the name of the symbol this node represents
1234 EShLanguage stage;
1235 TString mangledName; // mangled function name, or a copy of name if not a function
1236 TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
1237 TIntermTyped* constSubtree;
1238 };
1239
1240 class TIntermConstantUnion : public TIntermTyped {
1241 public:
TIntermConstantUnion(const TConstUnionArray & ua,const TType & t)1242 TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
getConstArray()1243 const TConstUnionArray& getConstArray() const { return constArray; }
getAsConstantUnion()1244 virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
getAsConstantUnion()1245 virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
1246 virtual void traverse(TIntermTraverser*);
1247 virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
1248 virtual TIntermTyped* fold(TOperator, const TType&) const;
setLiteral()1249 void setLiteral() { literal = true; }
setExpression()1250 void setExpression() { literal = false; }
isLiteral()1251 bool isLiteral() const { return literal; }
1252
1253 protected:
1254 TIntermConstantUnion& operator=(const TIntermConstantUnion&);
1255
1256 const TConstUnionArray constArray;
1257 bool literal; // true if node represents a literal in the source code
1258 };
1259
1260 // Represent the independent aspects of a texturing TOperator
1261 struct TCrackedTextureOp {
1262 bool query;
1263 bool proj;
1264 bool lod;
1265 bool fetch;
1266 bool offset;
1267 bool offsets;
1268 bool gather;
1269 bool grad;
1270 bool subpass;
1271 bool lodClamp;
1272 bool fragMask;
1273 bool attachmentEXT;
1274 };
1275
1276 //
1277 // Intermediate class for node types that hold operators.
1278 //
1279 class TIntermOperator : public TIntermTyped {
1280 public:
getAsOperator()1281 virtual TIntermOperator* getAsOperator() { return this; }
getAsOperator()1282 virtual const TIntermOperator* getAsOperator() const { return this; }
getOp()1283 TOperator getOp() const { return op; }
setOp(TOperator newOp)1284 void setOp(TOperator newOp) { op = newOp; }
1285 bool modifiesState() const;
1286 bool isConstructor() const;
isTexture()1287 bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
isSampling()1288 bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
isImage()1289 bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
isSparseTexture()1290 bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
isImageFootprint()1291 bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
isSparseImage()1292 bool isSparseImage() const { return op == EOpSparseImageLoad; }
isSubgroup()1293 bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
1294
setOperationPrecision(TPrecisionQualifier p)1295 void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
getOperationPrecision()1296 TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
1297 operationPrecision :
1298 type.getQualifier().precision; }
getCompleteString()1299 TString getCompleteString() const
1300 {
1301 TString cs = type.getCompleteString();
1302 if (getOperationPrecision() != type.getQualifier().precision) {
1303 cs += ", operation at ";
1304 cs += GetPrecisionQualifierString(getOperationPrecision());
1305 }
1306
1307 return cs;
1308 }
1309
1310 // Crack the op into the individual dimensions of texturing operation.
crackTexture(TSampler sampler,TCrackedTextureOp & cracked)1311 void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
1312 {
1313 cracked.query = false;
1314 cracked.proj = false;
1315 cracked.lod = false;
1316 cracked.fetch = false;
1317 cracked.offset = false;
1318 cracked.offsets = false;
1319 cracked.gather = false;
1320 cracked.grad = false;
1321 cracked.subpass = false;
1322 cracked.attachmentEXT = false;
1323 cracked.lodClamp = false;
1324 cracked.fragMask = false;
1325
1326 switch (op) {
1327 case EOpImageQuerySize:
1328 case EOpImageQuerySamples:
1329 case EOpTextureQuerySize:
1330 case EOpTextureQueryLod:
1331 case EOpTextureQueryLevels:
1332 case EOpTextureQuerySamples:
1333 case EOpSparseTexelsResident:
1334 cracked.query = true;
1335 break;
1336 case EOpTexture:
1337 case EOpSparseTexture:
1338 break;
1339 case EOpTextureProj:
1340 cracked.proj = true;
1341 break;
1342 case EOpTextureLod:
1343 case EOpSparseTextureLod:
1344 cracked.lod = true;
1345 break;
1346 case EOpTextureOffset:
1347 case EOpSparseTextureOffset:
1348 cracked.offset = true;
1349 break;
1350 case EOpTextureFetch:
1351 case EOpSparseTextureFetch:
1352 cracked.fetch = true;
1353 if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1354 cracked.lod = true;
1355 break;
1356 case EOpTextureFetchOffset:
1357 case EOpSparseTextureFetchOffset:
1358 cracked.fetch = true;
1359 cracked.offset = true;
1360 if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
1361 cracked.lod = true;
1362 break;
1363 case EOpTextureProjOffset:
1364 cracked.offset = true;
1365 cracked.proj = true;
1366 break;
1367 case EOpTextureLodOffset:
1368 case EOpSparseTextureLodOffset:
1369 cracked.offset = true;
1370 cracked.lod = true;
1371 break;
1372 case EOpTextureProjLod:
1373 cracked.lod = true;
1374 cracked.proj = true;
1375 break;
1376 case EOpTextureProjLodOffset:
1377 cracked.offset = true;
1378 cracked.lod = true;
1379 cracked.proj = true;
1380 break;
1381 case EOpTextureGrad:
1382 case EOpSparseTextureGrad:
1383 cracked.grad = true;
1384 break;
1385 case EOpTextureGradOffset:
1386 case EOpSparseTextureGradOffset:
1387 cracked.grad = true;
1388 cracked.offset = true;
1389 break;
1390 case EOpTextureProjGrad:
1391 cracked.grad = true;
1392 cracked.proj = true;
1393 break;
1394 case EOpTextureProjGradOffset:
1395 cracked.grad = true;
1396 cracked.offset = true;
1397 cracked.proj = true;
1398 break;
1399 case EOpTextureClamp:
1400 case EOpSparseTextureClamp:
1401 cracked.lodClamp = true;
1402 break;
1403 case EOpTextureOffsetClamp:
1404 case EOpSparseTextureOffsetClamp:
1405 cracked.offset = true;
1406 cracked.lodClamp = true;
1407 break;
1408 case EOpTextureGradClamp:
1409 case EOpSparseTextureGradClamp:
1410 cracked.grad = true;
1411 cracked.lodClamp = true;
1412 break;
1413 case EOpTextureGradOffsetClamp:
1414 case EOpSparseTextureGradOffsetClamp:
1415 cracked.grad = true;
1416 cracked.offset = true;
1417 cracked.lodClamp = true;
1418 break;
1419 case EOpTextureGather:
1420 case EOpSparseTextureGather:
1421 cracked.gather = true;
1422 break;
1423 case EOpTextureGatherOffset:
1424 case EOpSparseTextureGatherOffset:
1425 cracked.gather = true;
1426 cracked.offset = true;
1427 break;
1428 case EOpTextureGatherOffsets:
1429 case EOpSparseTextureGatherOffsets:
1430 cracked.gather = true;
1431 cracked.offsets = true;
1432 break;
1433 case EOpTextureGatherLod:
1434 case EOpSparseTextureGatherLod:
1435 cracked.gather = true;
1436 cracked.lod = true;
1437 break;
1438 case EOpTextureGatherLodOffset:
1439 case EOpSparseTextureGatherLodOffset:
1440 cracked.gather = true;
1441 cracked.offset = true;
1442 cracked.lod = true;
1443 break;
1444 case EOpTextureGatherLodOffsets:
1445 case EOpSparseTextureGatherLodOffsets:
1446 cracked.gather = true;
1447 cracked.offsets = true;
1448 cracked.lod = true;
1449 break;
1450 case EOpImageLoadLod:
1451 case EOpImageStoreLod:
1452 case EOpSparseImageLoadLod:
1453 cracked.lod = true;
1454 break;
1455 case EOpFragmentMaskFetch:
1456 cracked.subpass = sampler.dim == EsdSubpass;
1457 cracked.fragMask = true;
1458 break;
1459 case EOpFragmentFetch:
1460 cracked.subpass = sampler.dim == EsdSubpass;
1461 cracked.fragMask = true;
1462 break;
1463 case EOpImageSampleFootprintNV:
1464 break;
1465 case EOpImageSampleFootprintClampNV:
1466 cracked.lodClamp = true;
1467 break;
1468 case EOpImageSampleFootprintLodNV:
1469 cracked.lod = true;
1470 break;
1471 case EOpImageSampleFootprintGradNV:
1472 cracked.grad = true;
1473 break;
1474 case EOpImageSampleFootprintGradClampNV:
1475 cracked.lodClamp = true;
1476 cracked.grad = true;
1477 break;
1478 case EOpSubpassLoad:
1479 case EOpSubpassLoadMS:
1480 cracked.subpass = true;
1481 break;
1482 case EOpColorAttachmentReadEXT:
1483 cracked.attachmentEXT = true;
1484 break;
1485 default:
1486 break;
1487 }
1488 }
1489
1490 protected:
TIntermOperator(TOperator o)1491 TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
TIntermOperator(TOperator o,TType & t)1492 TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
1493 TOperator op;
1494 // The result precision is in the inherited TType, and is usually meant to be both
1495 // the operation precision and the result precision. However, some more complex things,
1496 // like built-in function calls, distinguish between the two, in which case non-EqpNone
1497 // 'operationPrecision' overrides the result precision as far as operation precision
1498 // is concerned.
1499 TPrecisionQualifier operationPrecision;
1500 };
1501
1502 //
1503 // Nodes for all the basic binary math operators.
1504 //
1505 class TIntermBinary : public TIntermOperator {
1506 public:
TIntermBinary(TOperator o)1507 TIntermBinary(TOperator o) : TIntermOperator(o) {}
1508 virtual void traverse(TIntermTraverser*);
setLeft(TIntermTyped * n)1509 virtual void setLeft(TIntermTyped* n) { left = n; }
setRight(TIntermTyped * n)1510 virtual void setRight(TIntermTyped* n) { right = n; }
getLeft()1511 virtual TIntermTyped* getLeft() const { return left; }
getRight()1512 virtual TIntermTyped* getRight() const { return right; }
getAsBinaryNode()1513 virtual TIntermBinary* getAsBinaryNode() { return this; }
getAsBinaryNode()1514 virtual const TIntermBinary* getAsBinaryNode() const { return this; }
1515 virtual void updatePrecision();
1516 protected:
1517 TIntermTyped* left;
1518 TIntermTyped* right;
1519 };
1520
1521 //
1522 // Nodes for unary math operators.
1523 //
1524 class TIntermUnary : public TIntermOperator {
1525 public:
TIntermUnary(TOperator o,TType & t)1526 TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(nullptr) {}
TIntermUnary(TOperator o)1527 TIntermUnary(TOperator o) : TIntermOperator(o), operand(nullptr) {}
1528 virtual void traverse(TIntermTraverser*);
setOperand(TIntermTyped * o)1529 virtual void setOperand(TIntermTyped* o) { operand = o; }
getOperand()1530 virtual TIntermTyped* getOperand() { return operand; }
getOperand()1531 virtual const TIntermTyped* getOperand() const { return operand; }
getAsUnaryNode()1532 virtual TIntermUnary* getAsUnaryNode() { return this; }
getAsUnaryNode()1533 virtual const TIntermUnary* getAsUnaryNode() const { return this; }
1534 virtual void updatePrecision();
setSpirvInstruction(const TSpirvInstruction & inst)1535 void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
getSpirvInstruction()1536 const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
1537 protected:
1538 TIntermTyped* operand;
1539 TSpirvInstruction spirvInst;
1540 };
1541
1542 typedef TVector<TIntermNode*> TIntermSequence;
1543 typedef TVector<TStorageQualifier> TQualifierList;
1544 //
1545 // Nodes that operate on an arbitrary sized set of children.
1546 //
1547 class TIntermAggregate : public TIntermOperator {
1548 public:
TIntermAggregate()1549 TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) {
1550 endLoc.init();
1551 }
TIntermAggregate(TOperator o)1552 TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) {
1553 endLoc.init();
1554 }
~TIntermAggregate()1555 ~TIntermAggregate() { delete pragmaTable; }
getAsAggregate()1556 virtual TIntermAggregate* getAsAggregate() { return this; }
getAsAggregate()1557 virtual const TIntermAggregate* getAsAggregate() const { return this; }
1558 virtual void updatePrecision();
setOperator(TOperator o)1559 virtual void setOperator(TOperator o) { op = o; }
getSequence()1560 virtual TIntermSequence& getSequence() { return sequence; }
getSequence()1561 virtual const TIntermSequence& getSequence() const { return sequence; }
setName(const TString & n)1562 virtual void setName(const TString& n) { name = n; }
getName()1563 virtual const TString& getName() const { return name; }
1564 virtual void traverse(TIntermTraverser*);
setUserDefined()1565 virtual void setUserDefined() { userDefined = true; }
isUserDefined()1566 virtual bool isUserDefined() { return userDefined; }
getQualifierList()1567 virtual TQualifierList& getQualifierList() { return qualifier; }
getQualifierList()1568 virtual const TQualifierList& getQualifierList() const { return qualifier; }
setOptimize(bool o)1569 void setOptimize(bool o) { optimize = o; }
setDebug(bool d)1570 void setDebug(bool d) { debug = d; }
getOptimize()1571 bool getOptimize() const { return optimize; }
getDebug()1572 bool getDebug() const { return debug; }
1573 void setPragmaTable(const TPragmaTable& pTable);
getPragmaTable()1574 const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
setSpirvInstruction(const TSpirvInstruction & inst)1575 void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
getSpirvInstruction()1576 const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
1577
setEndLoc(TSourceLoc loc)1578 void setEndLoc(TSourceLoc loc) { endLoc = loc; }
getEndLoc()1579 TSourceLoc getEndLoc() const { return endLoc; }
1580
setLinkType(TLinkType l)1581 void setLinkType(TLinkType l) { linkType = l; }
getLinkType()1582 TLinkType getLinkType() const { return linkType; }
1583 protected:
1584 TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
1585 TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
1586 TIntermSequence sequence;
1587 TQualifierList qualifier;
1588 TString name;
1589 bool userDefined; // used for user defined function names
1590 bool optimize;
1591 bool debug;
1592 TPragmaTable* pragmaTable;
1593 TSpirvInstruction spirvInst;
1594 TLinkType linkType = ELinkNone;
1595
1596 // Marking the end source location of the aggregate.
1597 // This is currently only set for a compound statement or a function body, pointing to '}'.
1598 TSourceLoc endLoc;
1599 };
1600
1601 //
1602 // For if tests.
1603 //
1604 class TIntermSelection : public TIntermTyped {
1605 public:
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB)1606 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
1607 TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
1608 shortCircuit(true),
1609 flatten(false), dontFlatten(false) {}
TIntermSelection(TIntermTyped * cond,TIntermNode * trueB,TIntermNode * falseB,const TType & type)1610 TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
1611 TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
1612 shortCircuit(true),
1613 flatten(false), dontFlatten(false) {}
1614 virtual void traverse(TIntermTraverser*);
getCondition()1615 virtual TIntermTyped* getCondition() const { return condition; }
setCondition(TIntermTyped * c)1616 virtual void setCondition(TIntermTyped* c) { condition = c; }
getTrueBlock()1617 virtual TIntermNode* getTrueBlock() const { return trueBlock; }
setTrueBlock(TIntermTyped * tb)1618 virtual void setTrueBlock(TIntermTyped* tb) { trueBlock = tb; }
getFalseBlock()1619 virtual TIntermNode* getFalseBlock() const { return falseBlock; }
setFalseBlock(TIntermTyped * fb)1620 virtual void setFalseBlock(TIntermTyped* fb) { falseBlock = fb; }
getAsSelectionNode()1621 virtual TIntermSelection* getAsSelectionNode() { return this; }
getAsSelectionNode()1622 virtual const TIntermSelection* getAsSelectionNode() const { return this; }
1623
setNoShortCircuit()1624 void setNoShortCircuit() { shortCircuit = false; }
getShortCircuit()1625 bool getShortCircuit() const { return shortCircuit; }
1626
setFlatten()1627 void setFlatten() { flatten = true; }
setDontFlatten()1628 void setDontFlatten() { dontFlatten = true; }
getFlatten()1629 bool getFlatten() const { return flatten; }
getDontFlatten()1630 bool getDontFlatten() const { return dontFlatten; }
1631
1632 protected:
1633 TIntermTyped* condition;
1634 TIntermNode* trueBlock;
1635 TIntermNode* falseBlock;
1636 bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
1637 bool flatten; // true if flatten requested
1638 bool dontFlatten; // true if requested to not flatten
1639 };
1640
1641 //
1642 // For switch statements. Designed use is that a switch will have sequence of nodes
1643 // that are either case/default nodes or a *single* node that represents all the code
1644 // in between (if any) consecutive case/defaults. So, a traversal need only deal with
1645 // 0 or 1 nodes per case/default statement.
1646 //
1647 class TIntermSwitch : public TIntermNode {
1648 public:
TIntermSwitch(TIntermTyped * cond,TIntermAggregate * b)1649 TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
1650 flatten(false), dontFlatten(false) {}
1651 virtual void traverse(TIntermTraverser*);
getCondition()1652 virtual TIntermNode* getCondition() const { return condition; }
getBody()1653 virtual TIntermAggregate* getBody() const { return body; }
getAsSwitchNode()1654 virtual TIntermSwitch* getAsSwitchNode() { return this; }
getAsSwitchNode()1655 virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
1656
setFlatten()1657 void setFlatten() { flatten = true; }
setDontFlatten()1658 void setDontFlatten() { dontFlatten = true; }
getFlatten()1659 bool getFlatten() const { return flatten; }
getDontFlatten()1660 bool getDontFlatten() const { return dontFlatten; }
1661
1662 protected:
1663 TIntermTyped* condition;
1664 TIntermAggregate* body;
1665 bool flatten; // true if flatten requested
1666 bool dontFlatten; // true if requested to not flatten
1667 };
1668
1669 enum TVisit
1670 {
1671 EvPreVisit,
1672 EvInVisit,
1673 EvPostVisit
1674 };
1675
1676 //
1677 // For traversing the tree. User should derive from this,
1678 // put their traversal specific data in it, and then pass
1679 // it to a Traverse method.
1680 //
1681 // When using this, just fill in the methods for nodes you want visited.
1682 // Return false from a pre-visit to skip visiting that node's subtree.
1683 //
1684 // Explicitly set postVisit to true if you want post visiting, otherwise,
1685 // filled in methods will only be called at pre-visit time (before processing
1686 // the subtree). Similarly for inVisit for in-order visiting of nodes with
1687 // multiple children.
1688 //
1689 // If you only want post-visits, explicitly turn off preVisit (and inVisit)
1690 // and turn on postVisit.
1691 //
1692 // In general, for the visit*() methods, return true from interior nodes
1693 // to have the traversal continue on to children.
1694 //
1695 // If you process children yourself, or don't want them processed, return false.
1696 //
1697 class TIntermTraverser {
1698 public:
POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator ())1699 POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
1700 TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
1701 preVisit(preVisit),
1702 inVisit(inVisit),
1703 postVisit(postVisit),
1704 rightToLeft(rightToLeft),
1705 depth(0),
1706 maxDepth(0) { }
~TIntermTraverser()1707 virtual ~TIntermTraverser() { }
1708
visitSymbol(TIntermSymbol *)1709 virtual void visitSymbol(TIntermSymbol*) { }
visitConstantUnion(TIntermConstantUnion *)1710 virtual void visitConstantUnion(TIntermConstantUnion*) { }
visitBinary(TVisit,TIntermBinary *)1711 virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
visitUnary(TVisit,TIntermUnary *)1712 virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
visitSelection(TVisit,TIntermSelection *)1713 virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
visitAggregate(TVisit,TIntermAggregate *)1714 virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
visitLoop(TVisit,TIntermLoop *)1715 virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
visitBranch(TVisit,TIntermBranch *)1716 virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
visitSwitch(TVisit,TIntermSwitch *)1717 virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
1718
getMaxDepth()1719 int getMaxDepth() const { return maxDepth; }
1720
incrementDepth(TIntermNode * current)1721 void incrementDepth(TIntermNode *current)
1722 {
1723 depth++;
1724 maxDepth = (std::max)(maxDepth, depth);
1725 path.push_back(current);
1726 }
1727
decrementDepth()1728 void decrementDepth()
1729 {
1730 depth--;
1731 path.pop_back();
1732 }
1733
getParentNode()1734 TIntermNode *getParentNode()
1735 {
1736 return path.size() == 0 ? nullptr : path.back();
1737 }
1738
1739 const bool preVisit;
1740 const bool inVisit;
1741 const bool postVisit;
1742 const bool rightToLeft;
1743
1744 protected:
1745 TIntermTraverser& operator=(TIntermTraverser&);
1746
1747 int depth;
1748 int maxDepth;
1749
1750 // All the nodes from root to the current node's parent during traversing.
1751 TVector<TIntermNode *> path;
1752 };
1753
1754 // KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
1755 // sized with the same symbol, involving no operations"
SameSpecializationConstants(TIntermTyped * node1,TIntermTyped * node2)1756 inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
1757 {
1758 return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
1759 node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
1760 }
1761
1762 } // end namespace glslang
1763
1764 #endif // __INTERMEDIATE_H
1765