xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrDrawingManager.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDrawingManager_DEFINED
9 #define GrDrawingManager_DEFINED
10 
11 #include "include/core/SkRefCnt.h"
12 #include "include/core/SkSpan.h"
13 #include "include/private/base/SkDebug.h"
14 #include "include/private/base/SkTArray.h"
15 #include "src/gpu/AtlasTypes.h"
16 #include "src/gpu/ganesh/GrBufferAllocPool.h"
17 #include "src/gpu/ganesh/GrCaps.h"
18 #include "src/gpu/ganesh/GrHashMapWithCache.h"
19 #include "src/gpu/ganesh/GrSamplerState.h"
20 #include "src/gpu/ganesh/GrSurfaceProxy.h"
21 #include "src/gpu/ganesh/PathRenderer.h"
22 #include "src/gpu/ganesh/PathRendererChain.h"
23 
24 #include <cstddef>
25 #include <cstdint>
26 #include <memory>
27 #include <vector>
28 
29 // Enabling this will print out which path renderers are being chosen
30 #define GR_PATH_RENDERER_SPEW 0
31 
32 class GrArenas;
33 class GrDeferredDisplayList;
34 class GrDirectContext;
35 class GrGpuBuffer;
36 class GrOnFlushCallbackObject;
37 class GrOpFlushState;
38 class GrRecordingContext;
39 class GrRenderTargetProxy;
40 class GrRenderTask;
41 class GrResourceAllocator;
42 class GrSemaphore;
43 class GrSurfaceProxyView;
44 class GrTextureResolveRenderTask;
45 class SkData;
46 enum GrSurfaceOrigin : int;
47 enum class GrColorType;
48 enum class GrSemaphoresSubmitted : bool;
49 struct GrFlushInfo;
50 struct GrMipLevel;
51 struct SkIRect;
52 
53 namespace SkSurfaces {
54 enum class BackendSurfaceAccess;
55 }
56 namespace skgpu {
57 class MutableTextureState;
58 namespace ganesh {
59 class AtlasPathRenderer;
60 class OpsTask;
61 class SoftwarePathRenderer;
62 }  // namespace ganesh
63 }  // namespace skgpu
64 
65 class GrDrawingManager {
66 public:
67     ~GrDrawingManager();
68 
69     void freeGpuResources();
70 
71     // OpsTasks created at flush time are stored and handled different from the others.
72     sk_sp<skgpu::ganesh::OpsTask> newOpsTask(GrSurfaceProxyView, sk_sp<GrArenas> arenas);
73 
74     // Adds 'atlasTask' to the DAG and leaves it open.
75     //
76     // If 'previousAtlasTask' is provided, closes it and configures dependencies to guarantee
77     // previousAtlasTask and all its users are completely out of service before atlasTask executes.
78     void addAtlasTask(sk_sp<GrRenderTask> atlasTask, GrRenderTask* previousAtlasTask);
79 
80     // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
81     // method will only add the new render task to the list. However, it adds the task before the
82     // last task in the list. It is up to the caller to call addProxy() on the returned object.
83     GrTextureResolveRenderTask* newTextureResolveRenderTaskBefore(const GrCaps&);
84 
85     // Creates a render task that can resolve MSAA and/or regenerate mimap levels on the passed in
86     // proxy. The task is appended to the end of the current list of tasks.
87     void newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
88                                      GrSurfaceProxy::ResolveFlags,
89                                      const GrCaps&);
90 
91     // Create a new render task that will cause the gpu to wait on semaphores before executing any
92     // more RenderTasks that target proxy. It is possible for this wait to also block additional
93     // work (even to other proxies) that has already been recorded or will be recorded later. The
94     // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
95     // signaled.
96     void newWaitRenderTask(const sk_sp<GrSurfaceProxy>& proxy,
97                            std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
98                            int numSemaphores);
99 
100     // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
101     // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
102     // to be copied. The surfaceColorType says how we should interpret the data when reading back
103     // from the source. DstColorType describes how the data should be stored in the dstBuffer.
104     // DstOffset is the offset into the dstBuffer where we will start writing data.
105     void newTransferFromRenderTask(const sk_sp<GrSurfaceProxy>& srcProxy, const SkIRect& srcRect,
106                                    GrColorType surfaceColorType, GrColorType dstColorType,
107                                    sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
108 
109     // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
110     // pixels copied are specified by srcRect. They are copied to the dstRect in dstProxy. Some
111     // backends and formats may require dstRect to have the same size as srcRect. Regardless,
112     // srcRect must be contained by src's dimensions and dstRect must be contained by dst's
113     // dimensions. Any clipping, aspect-ratio adjustment, etc. must be handled prior to this call.
114     //
115     // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
116     // the backend-specific limitations. On success the task is returned so that the caller may mark
117     // it skippable if the copy is later deemed unnecessary.
118     sk_sp<GrRenderTask> newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,
119                                           SkIRect dstRect,
120                                           const sk_sp<GrSurfaceProxy>& src,
121                                           SkIRect srcRect,
122                                           GrSamplerState::Filter filter,
123                                           GrSurfaceOrigin);
124 
125     // Adds a render task that copies the range [srcOffset, srcOffset + size] from src to
126     // [dstOffset, dstOffset + size] in dst. The src buffer must have type kXferCpuToGpu and the
127     // dst must NOT have type kXferCpuToGpu. Neither buffer may be mapped when this executes.
128     // Because this is used to insert transfers to vertex/index buffers between draws and we don't
129     // track dependencies with buffers, this task is a hard boundary for task reordering.
130     void newBufferTransferTask(sk_sp<GrGpuBuffer> src,
131                                size_t srcOffset,
132                                sk_sp<GrGpuBuffer> dst,
133                                size_t dstOffset,
134                                size_t size);
135 
136     // Adds a render task that copies the src SkData to [dstOffset, dstOffset + src->size()] in dst.
137     // The dst must not have type kXferCpuToGpu and must not be mapped. Because this is used to
138     // insert updata to vertex/index buffers between draws and we don't track dependencies with
139     // buffers, this task is a hard boundary for task reordering.
140     void newBufferUpdateTask(sk_sp<SkData> src, sk_sp<GrGpuBuffer> dst, size_t dstOffset);
141 
142     // Adds a task that writes the data from the passed GrMipLevels to dst. The lifetime of the
143     // pixel data in the levels should be tied to the passed SkData or the caller must flush the
144     // context before the data may become invalid. srcColorType is the color type of the
145     // GrMipLevels. dstColorType is the color type being used with dst and must be compatible with
146     // dst's format according to GrCaps::areColorTypeAndFormatCompatible().
147     bool newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
148                             SkIRect rect,
149                             GrColorType srcColorType,
150                             GrColorType dstColorType,
151                             const GrMipLevel[],
152                             int levelCount);
153 
getContext()154     GrRecordingContext* getContext() { return fContext; }
155 
156     using PathRenderer = skgpu::ganesh::PathRenderer;
157     using PathRendererChain = skgpu::ganesh::PathRendererChain;
158 
159     PathRenderer* getPathRenderer(const PathRenderer::CanDrawPathArgs&,
160                                   bool allowSW,
161                                   PathRendererChain::DrawType,
162                                   PathRenderer::StencilSupport* = nullptr);
163 
164     PathRenderer* getSoftwarePathRenderer();
165 
166     // Returns a direct pointer to the atlas path renderer, or null if it is not supported and
167     // turned on.
168     skgpu::ganesh::AtlasPathRenderer* getAtlasPathRenderer();
169 
170     // Returns a direct pointer to the tessellation path renderer, or null if it is not supported
171     // and turned on.
172     PathRenderer* getTessellationPathRenderer();
173 
174     void flushIfNecessary();
175 
176     static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels);
177 
178     GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>,
179                                         SkSurfaces::BackendSurfaceAccess,
180                                         const GrFlushInfo&,
181                                         const skgpu::MutableTextureState* newState);
182 
183     void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
184 
185 #if defined(GPU_TEST_UTILS)
186     void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
testingOnly_getOptionsForPathRendererChain()187     PathRendererChain::Options testingOnly_getOptionsForPathRendererChain() {
188         return fOptionsForPathRendererChain;
189     }
190 #endif
191 
192     GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
193     skgpu::ganesh::OpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
194     void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
195 
196     void moveRenderTasksToDDL(GrDeferredDisplayList* ddl);
197     void createDDLTask(sk_sp<const GrDeferredDisplayList>,
198                        sk_sp<GrRenderTargetProxy> newDest);
199 
200     // This is public so it can be called by an SkImage factory (in SkImages namespace).
201     // It is not meant to be directly called in other situations.
202     bool flush(SkSpan<GrSurfaceProxy*> proxies,
203                SkSurfaces::BackendSurfaceAccess access,
204                const GrFlushInfo&,
205                const skgpu::MutableTextureState* newState);
206 
207 private:
208     GrDrawingManager(GrRecordingContext*,
209                      const PathRendererChain::Options&,
210                      bool reduceOpsTaskSplitting);
211 
212     bool wasAbandoned() const;
213 
214     void closeActiveOpsTask();
215 
216     // return true if any GrRenderTasks were actually executed; false otherwise
217     bool executeRenderTasks(GrOpFlushState*);
218 
219     void removeRenderTasks();
220 
221     void sortTasks();
222 
223     // Attempt to reorder tasks to reduce render passes, and check the memory budget of the
224     // resulting intervals. Returns whether the reordering was successful & the memory budget
225     // acceptable. If it returns true, fDAG has been updated to reflect the reordered tasks.
226     bool reorderTasks(GrResourceAllocator*);
227 
228     void closeAllTasks();
229 
230     GrRenderTask* appendTask(sk_sp<GrRenderTask>);
231     GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>);
232 
233     bool submitToGpu();
234 
235     SkDEBUGCODE(void validate() const;)
236 
237     friend class GrDirectContext; // access to: flush & cleanup
238     friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
239     friend class GrRecordingContext;  // access to: ctor
240 
241     static const int kNumPixelGeometries = 5; // The different pixel geometries
242     static const int kNumDFTOptions = 2;      // DFT or no DFT
243 
244     GrRecordingContext*                        fContext;
245 
246     // This cache is used by both the vertex and index pools. It reuses memory across multiple
247     // flushes.
248     sk_sp<GrBufferAllocPool::CpuBufferCache>   fCpuBufferCache;
249 
250     skia_private::TArray<sk_sp<GrRenderTask>>  fDAG;
251     std::vector<int>                           fReorderBlockerTaskIndices;
252     skgpu::ganesh::OpsTask*                    fActiveOpsTask = nullptr;
253 
254     PathRendererChain::Options                 fOptionsForPathRendererChain;
255     std::unique_ptr<PathRendererChain>         fPathRendererChain;
256     sk_sp<skgpu::ganesh::SoftwarePathRenderer> fSoftwarePathRenderer;
257 
258     skgpu::TokenTracker                        fTokenTracker;
259     bool                                       fFlushing = false;
260     const bool                                 fReduceOpsTaskSplitting;
261 
262     skia_private::TArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
263 
264     struct SurfaceIDKeyTraits {
GetInvalidKeySurfaceIDKeyTraits265         static uint32_t GetInvalidKey() {
266             return GrSurfaceProxy::UniqueID::InvalidID().asUInt();
267         }
268     };
269 
270     GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks;
271 };
272 
273 #endif
274