xref: /aosp_15_r20/external/skia/src/gpu/ganesh/ops/GrOp.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "include/core/SkMatrix.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkString.h"
14 #include "include/core/SkTypes.h"
15 #include "include/private/SkColorData.h"
16 #include "include/private/base/SkDebug.h"
17 #include "include/private/base/SkNoncopyable.h"
18 #include "include/private/base/SkTo.h"
19 #include "include/private/gpu/ganesh/GrTypesPriv.h"
20 #include "src/core/SkTraceEvent.h"
21 #include "src/gpu/ganesh/GrSurfaceProxy.h"
22 
23 #include <atomic>
24 #include <cstddef>
25 #include <cstdint>
26 #include <memory>
27 #include <new>
28 #include <utility>
29 
30 class GrAppliedClip;
31 class GrCaps;
32 class GrDstProxyView;
33 class GrOpFlushState;
34 class GrPaint;
35 class GrRecordingContext;
36 class GrSurfaceProxyView;
37 class SkArenaAlloc;
38 enum class GrXferBarrierFlags;
39 
40 /**
41  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
42  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
43  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
44  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
45  * and minimize state changes.
46  *
47  * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
48  * one takes on the union of the data and the other is left empty. The merged op becomes responsible
49  * for drawing the data from both the original ops. When ops are chained each op maintains its own
50  * data but they are linked in a list and the head op becomes responsible for executing the work for
51  * the chain.
52  *
53  * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
54  * it must be the case that any op that can chain with A will either merge or chain with any op
55  * that can chain to B.
56  *
57  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
58  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
59  * in turn depend upon the clip.
60  */
61 #define GR_OP_SPEW 0
62 #if GR_OP_SPEW
63     #define GrOP_SPEW(code) code
64     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
65 #else
66     #define GrOP_SPEW(code)
67     #define GrOP_INFO(...)
68 #endif
69 
70 // Print out op information at flush time
71 #define GR_FLUSH_TIME_OP_SPEW 0
72 
73 // A helper macro to generate a class static id
74 #define DEFINE_OP_CLASS_ID \
75     static uint32_t ClassID() { \
76         static uint32_t kClassID = GenOpClassID(); \
77         return kClassID; \
78     }
79 
80 class GrOp : private SkNoncopyable {
81 public:
82     using Owner = std::unique_ptr<GrOp>;
83 
84     template<typename Op, typename... Args>
Make(GrRecordingContext * context,Args &&...args)85     static Owner Make(GrRecordingContext* context, Args&&... args) {
86         return Owner{new Op(std::forward<Args>(args)...)};
87     }
88 
89     template<typename Op, typename... Args>
90     static Owner MakeWithProcessorSet(
91             GrRecordingContext* context, const SkPMColor4f& color,
92             GrPaint&& paint, Args&&... args);
93 
94     template<typename Op, typename... Args>
MakeWithExtraMemory(GrRecordingContext * context,size_t extraSize,Args &&...args)95     static Owner MakeWithExtraMemory(
96             GrRecordingContext* context, size_t extraSize, Args&&... args) {
97         void* bytes = ::operator new(sizeof(Op) + extraSize);
98         return Owner{new (bytes) Op(std::forward<Args>(args)...)};
99     }
100 
101     virtual ~GrOp() = default;
102 
103     virtual const char* name() const = 0;
104 
visitProxies(const GrVisitProxyFunc &)105     virtual void visitProxies(const GrVisitProxyFunc&) const {
106         // This default implementation assumes the op has no proxies
107     }
108 
109     enum class CombineResult {
110         /**
111          * The op that combineIfPossible was called on now represents its own work plus that of
112          * the passed op. The passed op should be destroyed without being flushed. Currently it
113          * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
114          * chain (though the op on which combineIfPossible() was called may be).
115          */
116         kMerged,
117         /**
118          * The caller *may* (but is not required) to chain these ops together. If they are chained
119          * then prepare() and execute() will be called on the head op but not the other ops in the
120          * chain. The head op will prepare and execute on behalf of all the ops in the chain.
121          */
122         kMayChain,
123         /**
124          * The ops cannot be combined.
125          */
126         kCannotCombine
127     };
128 
129     // The arenas are the same as what was available when the op was created.
130     CombineResult combineIfPossible(GrOp* that, SkArenaAlloc* alloc, const GrCaps& caps);
131 
bounds()132     const SkRect& bounds() const {
133         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
134         return fBounds;
135     }
136 
setClippedBounds(const SkRect & clippedBounds)137     void setClippedBounds(const SkRect& clippedBounds) {
138         fBounds = clippedBounds;
139         // The clipped bounds already incorporate any effect of the bounds flags.
140         fBoundsFlags = 0;
141     }
142 
hasAABloat()143     bool hasAABloat() const {
144         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
145         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
146     }
147 
hasZeroArea()148     bool hasZeroArea() const {
149         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
150         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
151     }
152 
delete(void * p)153     void operator delete(void* p) { ::operator delete(p); }
154 
155     /**
156      * Helper for safely down-casting to a GrOp subclass
157      */
cast()158     template <typename T> const T& cast() const {
159         SkASSERT(T::ClassID() == this->classID());
160         return *static_cast<const T*>(this);
161     }
162 
cast()163     template <typename T> T* cast() {
164         SkASSERT(T::ClassID() == this->classID());
165         return static_cast<T*>(this);
166     }
167 
classID()168     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
169 
170     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()171     uint32_t uniqueID() const {
172         if (kIllegalOpID == fUniqueID) {
173             fUniqueID = GenOpID();
174         }
175         return fUniqueID;
176     }
177 
178     /**
179      * This can optionally be called before 'prepare' (but after sorting). Each op that overrides
180      * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called
181      * ahead of time and when it has not been called).
182      */
prePrepare(GrRecordingContext * context,const GrSurfaceProxyView & dstView,GrAppliedClip * clip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)183     void prePrepare(GrRecordingContext* context, const GrSurfaceProxyView& dstView,
184                     GrAppliedClip* clip, const GrDstProxyView& dstProxyView,
185                     GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp) {
186         TRACE_EVENT0_ALWAYS("skia.gpu", TRACE_STR_STATIC(name()));
187         this->onPrePrepare(context, dstView, clip, dstProxyView, renderPassXferBarriers,
188                            colorLoadOp);
189     }
190 
191     /**
192      * Called prior to executing. The op should perform any resource creation or data transfers
193      * necessary before execute() is called.
194      */
prepare(GrOpFlushState * state)195     void prepare(GrOpFlushState* state) {
196         TRACE_EVENT0_ALWAYS("skia.gpu", TRACE_STR_STATIC(name()));
197         this->onPrepare(state);
198     }
199 
200     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state,const SkRect & chainBounds)201     void execute(GrOpFlushState* state, const SkRect& chainBounds) {
202         TRACE_EVENT0_ALWAYS("skia.gpu", TRACE_STR_STATIC(name()));
203         this->onExecute(state, chainBounds);
204     }
205 
206     /** Used for spewing information about ops when debugging. */
207 #if defined(GPU_TEST_UTILS)
dumpInfo()208     virtual SkString dumpInfo() const final {
209         return SkStringPrintf("%s\nOpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
210                               this->onDumpInfo().c_str(), fBounds.fLeft, fBounds.fTop,
211                               fBounds.fRight, fBounds.fBottom);
212     }
213 #endif
214 
215     /**
216      * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
217      * subclass. E.g.:
218      *     for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
219      *         // ...
220      *     }
221      */
222     template <typename OpSubclass = GrOp> class ChainRange {
223     private:
224         class Iter {
225         public:
Iter(const OpSubclass * head)226             explicit Iter(const OpSubclass* head) : fCurr(head) {}
227             inline Iter& operator++() {
228                 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
229             }
230             const OpSubclass& operator*() const { return *fCurr; }
231             bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
232 
233         private:
234             const OpSubclass* fCurr;
235         };
236         const OpSubclass* fHead;
237 
238     public:
ChainRange(const OpSubclass * head)239         explicit ChainRange(const OpSubclass* head) : fHead(head) {}
begin()240         Iter begin() { return Iter(fHead); }
end()241         Iter end() { return Iter(nullptr); }
242     };
243 
244     /**
245      * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
246      * must be of the same subclass.
247      */
248     void chainConcat(GrOp::Owner);
249     /** Returns true if this is the head of a chain (including a length 1 chain). */
isChainHead()250     bool isChainHead() const { return !fPrevInChain; }
251     /** Returns true if this is the tail of a chain (including a length 1 chain). */
isChainTail()252     bool isChainTail() const { return !fNextInChain; }
253     /** The next op in the chain. */
nextInChain()254     GrOp* nextInChain() const { return fNextInChain.get(); }
255     /** The previous op in the chain. */
prevInChain()256     GrOp* prevInChain() const { return fPrevInChain; }
257     /**
258      * Cuts the chain after this op. The returned op is the op that was previously next in the
259      * chain or null if this was already a tail.
260      */
261     GrOp::Owner cutChain();
SkDEBUGCODE(void validateChain (GrOp * expectedTail=nullptr)const;)262     SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const;)
263 
264 #ifdef SK_DEBUG
265     virtual void validate() const {}
266 #endif
267 
268 protected:
269     GrOp(uint32_t classID);
270 
271     /**
272      * Indicates that the op will produce geometry that extends beyond its bounds for the
273      * purpose of ensuring that the fragment shader runs on partially covered pixels for
274      * non-MSAA antialiasing.
275      */
276     enum class HasAABloat : bool {
277         kNo = false,
278         kYes = true
279     };
280     /**
281      * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device
282      * space is also considered a hairline.
283      */
284     enum class IsHairline : bool {
285         kNo = false,
286         kYes = true
287     };
288 
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsHairline zeroArea)289     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) {
290         fBounds = newBounds;
291         this->setBoundsFlags(aabloat, zeroArea);
292     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsHairline zeroArea)293     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
294                               HasAABloat aabloat, IsHairline zeroArea) {
295         m.mapRect(&fBounds, srcBounds);
296         this->setBoundsFlags(aabloat, zeroArea);
297     }
makeFullScreen(GrSurfaceProxy * proxy)298     void makeFullScreen(GrSurfaceProxy* proxy) {
299         this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo);
300     }
301 
GenOpClassID()302     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
303 
304 private:
joinBounds(const GrOp & that)305     void joinBounds(const GrOp& that) {
306         if (that.hasAABloat()) {
307             fBoundsFlags |= kAABloat_BoundsFlag;
308         }
309         if (that.hasZeroArea()) {
310             fBoundsFlags |= kZeroArea_BoundsFlag;
311         }
312         return fBounds.joinPossiblyEmptyRect(that.fBounds);
313     }
314 
onCombineIfPossible(GrOp *,SkArenaAlloc *,const GrCaps &)315     virtual CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) {
316         return CombineResult::kCannotCombine;
317     }
318 
319     // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two?
320     virtual void onPrePrepare(GrRecordingContext*,
321                               const GrSurfaceProxyView& writeView,
322                               GrAppliedClip*,
323                               const GrDstProxyView&,
324                               GrXferBarrierFlags renderPassXferBarriers,
325                               GrLoadOp colorLoadOp) = 0;
326     virtual void onPrepare(GrOpFlushState*) = 0;
327     // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
328     // Otherwise, this op's bounds.
329     virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
330 #if defined(GPU_TEST_UTILS)
onDumpInfo()331     virtual SkString onDumpInfo() const { return SkString(); }
332 #endif
333 
GenID(std::atomic<uint32_t> * idCounter)334     static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
335         uint32_t id = idCounter->fetch_add(1, std::memory_order_relaxed);
336         if (id == 0) {
337             SK_ABORT("This should never wrap as it should only be called once for each GrOp "
338                      "subclass.");
339         }
340         return id;
341     }
342 
setBoundsFlags(HasAABloat aabloat,IsHairline zeroArea)343     void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) {
344         fBoundsFlags = 0;
345         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
346         fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
347     }
348 
349     static constexpr uint16_t kIllegalOpID = 0;
350 
351     enum BoundsFlags {
352         kAABloat_BoundsFlag                     = 0x1,
353         kZeroArea_BoundsFlag                    = 0x2,
354         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
355     };
356 
357     Owner                               fNextInChain{nullptr};
358     GrOp*                               fPrevInChain = nullptr;
359     const uint16_t                      fClassID;
360     uint16_t                            fBoundsFlags;
361 
GenOpID()362     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
363     mutable uint32_t                    fUniqueID = SK_InvalidUniqueID;
364     SkRect                              fBounds;
365 
366     static std::atomic<uint32_t> gCurrOpUniqueID;
367     static std::atomic<uint32_t> gCurrOpClassID;
368 };
369 
370 #endif
371