xref: /aosp_15_r20/external/skia/src/gpu/graphite/ScratchResourceManager.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2024 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_graphite_ScratchResourceManager_DEFINED
9 #define skgpu_graphite_ScratchResourceManager_DEFINED
10 
11 #include "include/core/SkRefCnt.h"
12 #include "include/core/SkSize.h"
13 #include "include/private/base/SkTArray.h"
14 #include "src/core/SkTHash.h"
15 
16 #include <string_view>
17 
18 namespace skgpu::graphite {
19 
20 class Resource;
21 class ResourceProvider;
22 class Texture;
23 class TextureInfo;
24 class TextureProxy;
25 
26 // NOTE: This is temporary while atlas management requires flushing an entire Recorder. That
27 // can break a scratch Device into multiple DrawTasks and the proxy read count needs to count
28 // all reads regardless of which DrawTask is referenced. Once scratch devices only produce a
29 // single DrawTask, DrawTask can hold the pending read count directly.
30 class ProxyReadCountMap {
31 public:
32     ProxyReadCountMap() = default;
33 
increment(const TextureProxy * proxy)34     void increment(const TextureProxy* proxy) {
35         int* count = fCounts.find(proxy);
36         if (!count) {
37             count = fCounts.set(proxy, 0);
38         }
39         (*count)++;
40     }
41 
decrement(const TextureProxy * proxy)42     bool decrement(const TextureProxy* proxy) {
43         int* count = fCounts.find(proxy);
44         SkASSERT(count && *count > 0);
45         (*count)--;
46         return *count == 0;
47     }
48 
get(const TextureProxy * proxy)49     int get(const TextureProxy* proxy) const {
50         const int* count = fCounts.find(proxy);
51         return count ? *count : 0;
52     }
53 
54 private:
55     skia_private::THashMap<const TextureProxy*, int> fCounts;
56 };
57 
58 /**
59  * ScratchResourceManager helps coordinate the reuse of resources *within* a Recording that would
60  * not otherwise be returned from the ResourceProvider/Cache because the Recorder is holds usage
61  * refs on the resources and they are typically not Shareable.
62  *
63  * A ScratchResourceManager maintains a pool of resources that have been handed out for some use
64  * case and then been explicitly returned by the original holder. It is up to the callers to
65  * return resources in an optimal manner (for best reuse) and not use them after they've been
66  * returned for a later task's use. To help callers manage when they can return resources,
67  * the manager maintains a stack that corresponds with the depth-first traversal of the tasks
68  * during prepareResources() and provides hooks to register listeners that are invoked when tasks
69  * read or sample resources.
70  *
71  * Once all uninstantiated resources are assigned and prepareResources() succeeds, the
72  * ScratchResourceManager can be discarded. The reuse within a Recording's task graph is fixed at
73  * that point and remains valid even if the recording is replayed.
74  */
75 class ScratchResourceManager {
76 public:
77     ScratchResourceManager(ResourceProvider* resourceProvider,
78                            std::unique_ptr<ProxyReadCountMap>);
79     ~ScratchResourceManager();
80 
81     // Get a scratch texture with the given size and texture info. The returned texture will
82     // not be reusable until the caller invokes `returnResource()`. At that point, subsequent
83     // compatible calls to getScratchTexture() may return the texture. If there is no compatible
84     // available texture to be reused, the ResourceProvider will be used to find or create one.
85     //
86     // It is the caller's responsibility to determine when it's acceptable to return a resource.
87     // That said, it's not mandatory that the scratch resources be returned. In that case, they just
88     // stop being available for reuse for later tasks in a Recording.
89     sk_sp<Texture> getScratchTexture(SkISize, const TextureInfo&, std::string_view label);
90 
91     // TODO: Eventually update ScratchBuffer and DrawBufferManager to leverage the
92     // ScratchResourceManager. There are a few open issues to address first:
93     //  - ScratchBuffer uses RAII to return the resource; ScratchResourceManager could adopt this
94     //    for buffers but that may only make sense if textures could also operate that way.
95     //    Alternatively, ScratchBuffer remains an RAII abstraction on top of ScratchResourceManager.
96     //  - ScratchResourceManager is currently only available in snap(), but DrawBufferManager needs
97     //    to be available at all times because a DrawPass could be created whenever. b/335644795
98     //    considers moving all DrawPass creation into snap() so that would avoid this issue.
99     //    Alternatively, ScratchResourceManager could have the same lifetime as the buffer manager.
100 
101     // Mark the resource as available for reuse. Must have been previously returned by this manager.
102     // If the caller does not ensure that all of its uses of the resource are prepared before
103     // tasks that are processed after this call, then undefined results can occur.
104     void returnTexture(sk_sp<Texture>);
105 
106     // Graphite accumulates tasks into a graph (implicit dependencies defined by the order they are
107     // added to the root task list, or explicitly when appending child tasks). The depth-first
108     // traversal of this graph helps impose constraints on the read/write windows of resources. To
109     // help Tasks with this tracking, ScratchResourceManager maintains a stack of lists of "pending
110     // uses".
111     //
112     // Each recursion in the depth-first traversal of the task graph pushes the stack. Going up
113     // pops the stack. A "pending use" allows a task that modifies a resource to register a
114     // listener that is triggered when either its scope is popped off or a consuming task that
115     // reads that resource notifies the ScratchResourceManager (e.g. a RenderPassTask or CopyTask
116     // that sample a scratch texture). Internally, the listeners can decrement a pending read count
117     // or otherwise determine when to call returnResource() without having to be coupled directly to
118     // the consuming tasks.
119     //
120     // When a task calls notifyResourcesConsumed(), all "pending use" listeners in the current
121     // scope are invoked and removed from the list. This means that tasks must be externally
122     // organized such that only the tasks that prepare the scratch resources for that consuming task
123     // are at the same depth. Intermingling writes to multiple scratch textures before they are
124     // sampled by separate renderpasses would mean that all the scratch textures could be returned
125     // for reuse at the first renderpass. Instead, a TaskList can be used to group the scratch
126     // writes with the renderpass that samples it to introduce a scope in the stack. Alternatively,
127     // if the caller constructs a single list directly to avoid this issue, the extra stack
128     // manipulation can be avoided.
129     class PendingUseListener {
130     public:
~PendingUseListener()131         virtual ~PendingUseListener() {}
132 
133         virtual void onUseCompleted(ScratchResourceManager*) = 0;
134     };
135 
136     // Push a new scope onto the stack, preventing previously added pending listeners from being
137     // invoked when a task consumes resources.
138     void pushScope();
139 
140     // Pop the current scope off the stack. This does not invoke any pending listeners that were
141     // not consumed by a task within the ending scope. This can happen if an offscreen layer is
142     // flushed in a Recording snap() before it's actually been drawn to its target. That final draw
143     // can then happen in a subsequent Recording even. By not invoking the pending listener, it will
144     // not return the scratch resource, correctly keeping it in use across multiple Recordings.
145     // TODO: Eventually, the above scenario should not happen, but that requires atlasing to not
146     // force a flush of every Device. Once that is the case, popScope() can ideally assert that
147     // there are no more pending listeners to invoke (otherwise it means the tasks were linked
148     // incorrectly).
149     void popScope();
150 
151     // Invoked by tasks that sample from or read from resources. All pending listeners that were
152     // marked in the current scope will be invoked.
153     void notifyResourcesConsumed();
154 
155     // Register a listener that will be invoked on the next call to notifyResourcesConsumed() or
156     // popScope() within the current scope. Registering the same listener multiple times will invoke
157     // it multiple times.
158     //
159     // The ScratchResourceManager does not take ownership of these listeners; they are assumed to
160     // live for as long as the prepareResources() phase of snapping a Recording.
161     void markResourceInUse(PendingUseListener* listener);
162 
163     // Temporary access to the proxy read counts stored in the ScratchResourceManager
pendingReadCount(const TextureProxy * proxy)164     int pendingReadCount(const TextureProxy* proxy) const {
165         return fProxyReadCounts->get(proxy);
166     }
167 
168     // Returns true if the read count reached zero; must only be called if it was > 0 previously.
removePendingRead(const TextureProxy * proxy)169     bool removePendingRead(const TextureProxy* proxy) {
170         return fProxyReadCounts->decrement(proxy);
171     }
172 
173 private:
174     struct ScratchTexture {
175         sk_sp<Texture> fTexture;
176         bool fAvailable;
177     };
178 
179     // If there are no available resources for reuse, new or cached resources will be fetched from
180     // this ResourceProvider.
181     ResourceProvider* fResourceProvider;
182 
183     // ScratchResourceManager will maintain separate pools based on the type of Resource since the
184     // callers always need a specific sub-Resource and it limits the size of each search pool. It
185     // also allows for type-specific search heuristics by when selecting an available resource.
186     skia_private::TArray<ScratchTexture> fScratchTextures;
187 
188     // This single list is organized into a stack of sublists by using null pointers to mark the
189     // start of a new scope.
190     skia_private::TArray<PendingUseListener*> fListenerStack;
191 
192     std::unique_ptr<ProxyReadCountMap> fProxyReadCounts;
193 };
194 
195 } // namespace skgpu::graphite
196 
197 #endif // skgpu_graphite_ResourceReuseManager_DEFINED
198