xref: /aosp_15_r20/external/skia/src/gpu/ganesh/GrGpu.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10 
11 #include "include/core/SkData.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkRefCnt.h"
14 #include "include/core/SkSpan.h"
15 #include "include/core/SkTypes.h"
16 #include "include/gpu/GpuTypes.h"
17 #include "include/gpu/ganesh/GrBackendSurface.h"
18 #include "include/gpu/ganesh/GrTypes.h"
19 #include "include/private/base/SkTArray.h"
20 #include "include/private/gpu/ganesh/GrTypesPriv.h"
21 #include "src/gpu/ganesh/GrCaps.h"
22 #include "src/gpu/ganesh/GrGpuBuffer.h"  // IWYU pragma: keep
23 #include "src/gpu/ganesh/GrOpsRenderPass.h"
24 #include "src/gpu/ganesh/GrSamplerState.h"
25 #include "src/gpu/ganesh/GrXferProcessor.h"
26 
27 #include <array>
28 #include <cstddef>
29 #include <cstdint>
30 #include <memory>
31 #include <optional>
32 #include <string_view>
33 
34 class GrAttachment;
35 class GrBackendSemaphore;
36 class GrDirectContext;
37 class GrGLContext;
38 class GrProgramDesc;
39 class GrProgramInfo;
40 class GrRenderTarget;
41 class GrRingBuffer;
42 class GrSemaphore;
43 class GrStagingBufferManager;
44 class GrSurface;
45 class GrSurfaceProxy;
46 class GrTexture;
47 class GrThreadSafePipelineBuilder;
48 class SkJSONWriter;
49 class SkString;
50 enum class SkTextureCompressionType;
51 struct GrVkDrawableInfo;
52 struct SkISize;
53 struct SkImageInfo;
54 
55 namespace SkSurfaces {
56 enum class BackendSurfaceAccess;
57 }
58 namespace skgpu {
59 class AutoCallback;
60 class MutableTextureState;
61 class RefCntedCallback;
62 }  // namespace skgpu
63 
64 // This is sufficient for the GL implementation (which is all we have now). It can become a
65 // "Backend" SkAnySubclass type to cover other backends in the future.
66 struct GrTimerQuery {
67     uint32_t query;
68 };
69 
70 class GrGpu {
71 public:
72     GrGpu(GrDirectContext* direct);
73     virtual ~GrGpu();
74 
getContext()75     GrDirectContext* getContext() { return fContext; }
getContext()76     const GrDirectContext* getContext() const { return fContext; }
77 
78     /**
79      * Gets the capabilities of the draw target.
80      */
caps()81     const GrCaps* caps() const { return fCaps.get(); }
refCaps()82     sk_sp<const GrCaps> refCaps() const { return fCaps; }
83 
stagingBufferManager()84     virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
85 
uniformsRingBuffer()86     virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
87 
88     enum class DisconnectType {
89         // No cleanup should be attempted, immediately cease making backend API calls
90         kAbandon,
91         // Free allocated resources (not known by GrResourceCache) before returning and
92         // ensure no backend backend 3D API calls will be made after disconnect() returns.
93         kCleanup,
94     };
95 
96     // Called by context when the underlying backend context is already or will be destroyed
97     // before GrDirectContext.
98     virtual void disconnect(DisconnectType);
99 
100     virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
101     virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
102 
103     // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
104     // into an unrecoverable, lost state.
isDeviceLost()105     virtual bool isDeviceLost() const { return false; }
106 
107     /**
108      * The GrGpu object normally assumes that no outsider is setting state
109      * within the underlying 3D API's context/device/whatever. This call informs
110      * the GrGpu that the state was modified and it shouldn't make assumptions
111      * about the state.
112      */
113     void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
114 
115     /**
116      * Creates a texture object. If renderable is kYes then the returned texture can
117      * be used as a render target by calling GrTexture::asRenderTarget(). Not all
118      * pixel configs can be used as render targets. Support for configs as textures
119      * or render targets can be checked using GrCaps.
120      *
121      * @param dimensions     dimensions of the texture to be created.
122      * @param format         the format for the texture (not currently used).
123      * @param renderable     should the resulting texture be renderable
124      * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
125      *                       kYes. If renderable is kNo then this must be 1.
126      * @param budgeted       does this texture count against the resource cache budget?
127      * @param isProtected    should the texture be created as protected.
128      * @param texels         array of mipmap levels containing texel data to load.
129      *                       If level i has pixels then it is assumed that its dimensions are
130      *                       max(1, floor(dimensions.fWidth / 2)) by
131      *                       max(1, floor(dimensions.fHeight / 2)).
132      *                       If texels[i].fPixels == nullptr for all i <= mipLevelCount or
133      *                       mipLevelCount is 0 then the texture's contents are uninitialized.
134      *                       If a level has non-null pixels, its row bytes must be a multiple of the
135      *                       config's bytes-per-pixel. The row bytes must be tight to the
136      *                       level width if !caps->writePixelsRowBytesSupport().
137      *                       If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
138      *                       then all levels must have non-null pixels. All levels must have
139      *                       non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
140      * @param textureColorType The color type interpretation of the texture for the purpose of
141      *                       of uploading texel data.
142      * @param srcColorType   The color type of data in texels[].
143      * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
144      *                       floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
145      *                       must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
146      *                       true.
147      * @return  The texture object if successful, otherwise nullptr.
148      */
149     sk_sp<GrTexture> createTexture(SkISize dimensions,
150                                    const GrBackendFormat& format,
151                                    GrTextureType textureType,
152                                    GrRenderable renderable,
153                                    int renderTargetSampleCnt,
154                                    skgpu::Budgeted budgeted,
155                                    GrProtected isProtected,
156                                    GrColorType textureColorType,
157                                    GrColorType srcColorType,
158                                    const GrMipLevel texels[],
159                                    int texelLevelCount,
160                                    std::string_view label);
161 
162     /**
163      * Simplified createTexture() interface for when there is no initial texel data to upload.
164      */
165     sk_sp<GrTexture> createTexture(SkISize dimensions,
166                                    const GrBackendFormat& format,
167                                    GrTextureType textureType,
168                                    GrRenderable renderable,
169                                    int renderTargetSampleCnt,
170                                    skgpu::Mipmapped mipmapped,
171                                    skgpu::Budgeted budgeted,
172                                    GrProtected isProtected,
173                                    std::string_view label);
174 
175     sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
176                                              const GrBackendFormat& format,
177                                              skgpu::Budgeted budgeted,
178                                              skgpu::Mipmapped mipmapped,
179                                              GrProtected isProtected,
180                                              const void* data,
181                                              size_t dataSize);
182 
183     /**
184      * Implements GrResourceProvider::wrapBackendTexture
185      */
186     sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
187                                         GrWrapOwnership,
188                                         GrWrapCacheable,
189                                         GrIOType);
190 
191     sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
192                                                   GrWrapOwnership,
193                                                   GrWrapCacheable);
194 
195     /**
196      * Implements GrResourceProvider::wrapRenderableBackendTexture
197      */
198     sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
199                                                   int sampleCnt,
200                                                   GrWrapOwnership,
201                                                   GrWrapCacheable);
202 
203     /**
204      * Implements GrResourceProvider::wrapBackendRenderTarget
205      */
206     sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
207 
208     /**
209      * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
210      */
211     sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
212                                                               const GrVkDrawableInfo&);
213 
214     /**
215      * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
216      *
217      * @param size            size of buffer to create.
218      * @param intendedType    hint to the graphics subsystem about what the buffer will be used for.
219      * @param accessPattern   hint to the graphics subsystem about how the data will be accessed.
220      *
221      * @return the buffer if successful, otherwise nullptr.
222      */
223     sk_sp<GrGpuBuffer> createBuffer(size_t size,
224                                     GrGpuBufferType intendedType,
225                                     GrAccessPattern accessPattern);
226 
227     /**
228      * Resolves MSAA. The resolveRect must already be in the native destination space.
229      */
230     void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
231 
232     /**
233      * Uses the base of the texture to recompute the contents of the other levels.
234      */
235     bool regenerateMipMapLevels(GrTexture*);
236 
237     /**
238      * If the backend API has stateful texture bindings, this resets them back to defaults.
239      */
240     void resetTextureBindings();
241 
242     /**
243      * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
244      *
245      * @param surface           the surface to read from
246      * @param rect              the rectangle of pixels to read
247      * @param surfaceColorType  the color type for this use of the surface.
248      * @param dstColorType      the color type of the destination buffer.
249      * @param buffer            memory to read the rectangle into.
250      * @param rowBytes          the number of bytes between consecutive rows. Must be a multiple of
251      *                          dstColorType's bytes-per-pixel. Must be tight to width if
252      *                          !caps->readPixelsRowBytesSupport().
253      *
254      * @return true if the read succeeded, false if not. The read can fail
255      *              because of the surface doesn't support reading, the color type
256      *              is not allowed for the format of the surface or if the rectangle
257      *              read is not contained in the surface.
258      */
259     bool readPixels(GrSurface* surface,
260                     SkIRect rect,
261                     GrColorType surfaceColorType,
262                     GrColorType dstColorType,
263                     void* buffer,
264                     size_t rowBytes);
265 
266     /**
267      * Updates the pixels in a rectangle of a surface.  No sRGB/linear conversions are performed.
268      *
269      * @param surface            the surface to write to.
270      * @param rect               the rectangle of pixels to overwrite
271      * @param surfaceColorType   the color type for this use of the surface.
272      * @param srcColorType       the color type of the source buffer.
273      * @param texels             array of mipmap levels containing texture data. Row bytes must be a
274      *                           multiple of srcColorType's bytes-per-pixel. Must be tight to level
275      *                           width if !caps->writePixelsRowBytesSupport().
276      * @param mipLevelCount      number of levels in 'texels'
277      * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
278      *                           sampling. This is currently only used by Vulkan for inline uploads
279      *                           to set that layout back to sampled after doing the upload. Inline
280      *                           uploads currently can happen between draws in a single op so it is
281      *                           not trivial to break up the OpsTask into two tasks when we see
282      *                           an inline upload. However, once we are able to support doing that
283      *                           we can remove this parameter.
284      *
285      * @return true if the write succeeded, false if not. The read can fail
286      *              because of the surface doesn't support writing (e.g. read only),
287      *              the color type is not allowed for the format of the surface or
288      *              if the rectangle written is not contained in the surface.
289      */
290     bool writePixels(GrSurface* surface,
291                      SkIRect rect,
292                      GrColorType surfaceColorType,
293                      GrColorType srcColorType,
294                      const GrMipLevel texels[],
295                      int mipLevelCount,
296                      bool prepForTexSampling = false);
297 
298     /**
299      * Helper for the case of a single level.
300      */
301     bool writePixels(GrSurface* surface,
302                      SkIRect rect,
303                      GrColorType surfaceColorType,
304                      GrColorType srcColorType,
305                      const void* buffer,
306                      size_t rowBytes,
307                      bool prepForTexSampling = false) {
308         GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
309         return this->writePixels(surface,
310                                  rect,
311                                  surfaceColorType,
312                                  srcColorType,
313                                  &mipLevel,
314                                  1,
315                                  prepForTexSampling);
316     }
317 
318     /**
319      * Transfer bytes from one GPU buffer to another. The src buffer must have type kXferCpuToGpu
320      * and the dst buffer must not. Neither buffer may currently be mapped. The offsets and size
321      * must be aligned to GrCaps::transferFromBufferToBufferAlignment.
322      *
323      * @param src        the buffer to read from
324      * @param srcOffset  the aligned offset at the src at which the transfer begins.
325      * @param dst        the buffer to write to
326      * @param dstOffset  the aligned offset in the dst at which the transfer begins
327      * @param size       the aligned number of bytes to transfer;
328      */
329     bool transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
330                                     size_t srcOffset,
331                                     sk_sp<GrGpuBuffer> dst,
332                                     size_t dstOffset,
333                                     size_t size);
334 
335     /**
336      * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
337      * the base level is written to.
338      *
339      * @param texture          the texture to write to.
340      * @param rect             the rectangle of pixels in the texture to overwrite
341      * @param textureColorType the color type for this use of the surface.
342      * @param bufferColorType  the color type of the transfer buffer's pixel data
343      * @param transferBuffer   GrBuffer to read pixels from (type must be "kXferCpuToGpu")
344      * @param offset           offset from the start of the buffer
345      * @param rowBytes         number of bytes between consecutive rows in the buffer. Must be a
346      *                         multiple of bufferColorType's bytes-per-pixel. Must be tight to
347      *                         rect.width() if !caps->writePixelsRowBytesSupport().
348      */
349     bool transferPixelsTo(GrTexture* texture,
350                           SkIRect rect,
351                           GrColorType textureColorType,
352                           GrColorType bufferColorType,
353                           sk_sp<GrGpuBuffer> transferBuffer,
354                           size_t offset,
355                           size_t rowBytes);
356 
357     /**
358      * Reads the pixels from a rectangle of a surface into a buffer. Use
359      * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
360      * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
361      *
362      * If successful the row bytes in the buffer is always:
363      *   GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
364      *
365      * Asserts that the caller has passed a properly aligned offset and that the buffer is
366      * large enough to hold the result
367      *
368      * @param surface          the surface to read from.
369      * @param rect             the rectangle of pixels to read
370      * @param surfaceColorType the color type for this use of the surface.
371      * @param bufferColorType  the color type of the transfer buffer's pixel data
372      * @param transferBuffer   GrBuffer to write pixels to (type must be "kXferGpuToCpu")
373      * @param offset           offset from the start of the buffer
374      */
375     bool transferPixelsFrom(GrSurface* surface,
376                             SkIRect rect,
377                             GrColorType surfaceColorType,
378                             GrColorType bufferColorType,
379                             sk_sp<GrGpuBuffer> transferBuffer,
380                             size_t offset);
381 
382     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
383     // take place at higher levels and this function implement faster copy paths. The src and dst
384     // rects are pre-clipped. The src rect and dst rect are guaranteed to be within the
385     // src/dst bounds and non-empty. They must also be in their exact device space coords, including
386     // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
387     // then we don't need to preserve any data on the dst surface outside of the copy.
388     //
389     // Backends may or may not support src and dst rects with differing dimensions. This can assume
390     // that GrCaps.canCopySurface() returned true for these surfaces and rects.
391     bool copySurface(GrSurface* dst, const SkIRect& dstRect,
392                      GrSurface* src, const SkIRect& srcRect,
393                      GrSamplerState::Filter filter);
394 
395     // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
396     // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
397     // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
398     // provided but 'renderTarget' has a stencil buffer then that is a signal that the
399     // render target's stencil buffer should be ignored.
400     GrOpsRenderPass* getOpsRenderPass(
401             GrRenderTarget* renderTarget,
402             bool useMSAASurface,
403             GrAttachment* stencil,
404             GrSurfaceOrigin,
405             const SkIRect& bounds,
406             const GrOpsRenderPass::LoadAndStoreInfo&,
407             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
408             const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
409             GrXferBarrierFlags renderPassXferBarriers);
410 
411     // Called by GrDrawingManager when flushing.
412     // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
413     // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
414     // inserted semaphores.
415     void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
416                           SkSurfaces::BackendSurfaceAccess access,
417                           const GrFlushInfo&,
418                           std::optional<GrTimerQuery> timerQuery,
419                           const skgpu::MutableTextureState* newState);
420 
421     // Called before render tasks are executed during a flush.
willExecute()422     virtual void willExecute() {}
423 
submitToGpu()424     bool submitToGpu() {
425         return this->submitToGpu(GrSubmitInfo());
426     }
427     bool submitToGpu(const GrSubmitInfo& info);
428 
429     virtual void submit(GrOpsRenderPass*) = 0;
430 
431     [[nodiscard]] virtual std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned = true) = 0;
432     virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
433                                                               GrSemaphoreWrapType,
434                                                               GrWrapOwnership) = 0;
435     virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
436     virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
437 
startTimerQuery()438     virtual std::optional<GrTimerQuery> startTimerQuery() { return {}; }
439 
440     virtual void addFinishedCallback(skgpu::AutoCallback, std::optional<GrTimerQuery> = {}) = 0;
441     virtual void checkFinishedCallbacks() = 0;
442     virtual void finishOutstandingGpuWork() = 0;
443 
444     // NOLINTNEXTLINE(performance-unnecessary-value-param)
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)445     virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
446 
447     /**
448      * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
449      * the internal OOM state to false. Otherwise, returns false.
450      */
451     bool checkAndResetOOMed();
452 
453     /**
454      *  Put this texture in a safe and known state for use across multiple contexts. Depending on
455      *  the backend, this may return a GrSemaphore. If so, other contexts should wait on that
456      *  semaphore before using this texture.
457      */
458     virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
459 
460     /**
461      * Frees any backend specific objects that are not currently in use by the GPU. This is called
462      * when the client is trying to free up as much GPU memory as possible. We will not release
463      * resources connected to programs/pipelines since the cost to recreate those is significantly
464      * higher that other resources.
465      */
releaseUnlockedBackendObjects()466     virtual void releaseUnlockedBackendObjects() {}
467 
468     ///////////////////////////////////////////////////////////////////////////
469     // Debugging and Stats
470 
471     class Stats {
472     public:
473 #if GR_GPU_STATS
474         Stats() = default;
475 
reset()476         void reset() { *this = {}; }
477 
textureCreates()478         int textureCreates() const { return fTextureCreates; }
incTextureCreates()479         void incTextureCreates() { fTextureCreates++; }
480 
textureUploads()481         int textureUploads() const { return fTextureUploads; }
incTextureUploads()482         void incTextureUploads() { fTextureUploads++; }
483 
transfersToTexture()484         int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()485         void incTransfersToTexture() { fTransfersToTexture++; }
486 
transfersFromSurface()487         int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()488         void incTransfersFromSurface() { fTransfersFromSurface++; }
489 
incBufferTransfers()490         void incBufferTransfers() { fBufferTransfers++; }
bufferTransfers()491         int bufferTransfers() const { return fBufferTransfers; }
492 
stencilAttachmentCreates()493         int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()494         void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
495 
msaaAttachmentCreates()496         int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()497         void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
498 
numDraws()499         int numDraws() const { return fNumDraws; }
incNumDraws()500         void incNumDraws() { fNumDraws++; }
501 
numFailedDraws()502         int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()503         void incNumFailedDraws() { ++fNumFailedDraws; }
504 
numSubmitToGpus()505         int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()506         void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
507 
numScratchTexturesReused()508         int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()509         void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
510 
numScratchMSAAAttachmentsReused()511         int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()512         void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
513 
renderPasses()514         int renderPasses() const { return fRenderPasses; }
incRenderPasses()515         void incRenderPasses() { fRenderPasses++; }
516 
numReorderedDAGsOverBudget()517         int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()518         void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
519 
520 #if defined(GPU_TEST_UTILS)
521         void dump(SkString*);
522         void dumpKeyValuePairs(
523                 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values);
524 #endif
525     private:
526         int fTextureCreates = 0;
527         int fTextureUploads = 0;
528         int fTransfersToTexture = 0;
529         int fTransfersFromSurface = 0;
530         int fBufferTransfers = 0;
531         int fStencilAttachmentCreates = 0;
532         int fMSAAAttachmentCreates = 0;
533         int fNumDraws = 0;
534         int fNumFailedDraws = 0;
535         int fNumSubmitToGpus = 0;
536         int fNumScratchTexturesReused = 0;
537         int fNumScratchMSAAAttachmentsReused = 0;
538         int fRenderPasses = 0;
539         int fNumReorderedDAGsOverBudget = 0;
540 
541 #else  // !GR_GPU_STATS
542 
543 #if defined(GPU_TEST_UTILS)
544         void dump(SkString*) {}
545         void dumpKeyValuePairs(skia_private::TArray<SkString>*, skia_private::TArray<double>*) {}
546 #endif
547         void incTextureCreates() {}
548         void incTextureUploads() {}
549         void incTransfersToTexture() {}
550         void incBufferTransfers() {}
551         void incTransfersFromSurface() {}
552         void incStencilAttachmentCreates() {}
553         void incMSAAAttachmentCreates() {}
554         void incNumDraws() {}
555         void incNumFailedDraws() {}
556         void incNumSubmitToGpus() {}
557         void incNumScratchTexturesReused() {}
558         void incNumScratchMSAAAttachmentsReused() {}
559         void incRenderPasses() {}
560         void incNumReorderedDAGsOverBudget() {}
561 #endif
562     };
563 
stats()564     Stats* stats() { return &fStats; }
565     void dumpJSON(SkJSONWriter*) const;
566 
567 
568     /**
569      * Creates a texture directly in the backend API without wrapping it in a GrTexture.
570      * Must be matched with a call to deleteBackendTexture().
571      *
572      * If data is null the texture is uninitialized.
573      *
574      * If data represents a color then all texture levels are cleared to that color.
575      *
576      * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
577      * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
578      * levels must be sized correctly according to the MIP sizes implied by dimensions. They
579      * must all have the same color type and that color type must be compatible with the
580      * texture format.
581      */
582     GrBackendTexture createBackendTexture(SkISize dimensions,
583                                           const GrBackendFormat&,
584                                           GrRenderable,
585                                           skgpu::Mipmapped,
586                                           GrProtected,
587                                           std::string_view label);
588 
589     bool clearBackendTexture(const GrBackendTexture&,
590                              sk_sp<skgpu::RefCntedCallback> finishedCallback,
591                              std::array<float, 4> color);
592 
593     /**
594      * Same as the createBackendTexture case except compressed backend textures can
595      * never be renderable.
596      */
597     GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
598                                                     const GrBackendFormat&,
599                                                     skgpu::Mipmapped,
600                                                     GrProtected);
601 
602     bool updateCompressedBackendTexture(const GrBackendTexture&,
603                                         sk_sp<skgpu::RefCntedCallback> finishedCallback,
604                                         const void* data,
605                                         size_t length);
606 
setBackendTextureState(const GrBackendTexture &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)607     virtual bool setBackendTextureState(const GrBackendTexture&,
608                                         const skgpu::MutableTextureState&,
609                                         skgpu::MutableTextureState* previousState,
610                                         // NOLINTNEXTLINE(performance-unnecessary-value-param)
611                                         sk_sp<skgpu::RefCntedCallback> finishedCallback) {
612         return false;
613     }
614 
setBackendRenderTargetState(const GrBackendRenderTarget &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)615     virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
616                                              const skgpu::MutableTextureState&,
617                                              skgpu::MutableTextureState* previousState,
618                                             // NOLINTNEXTLINE(performance-unnecessary-value-param)
619                                              sk_sp<skgpu::RefCntedCallback> finishedCallback) {
620         return false;
621     }
622 
623     /**
624      * Frees a texture created by createBackendTexture(). If ownership of the backend
625      * texture has been transferred to a context using adopt semantics this should not be called.
626      */
627     virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
628 
629     /**
630      * In this case we have a program descriptor and a program info but no render target.
631      */
632     virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
633 
precompileShader(const SkData & key,const SkData & data)634     virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
635 
636 #if defined(GPU_TEST_UTILS)
637     /** Check a handle represents an actual texture in the backend API that has not been freed. */
638     virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
639 
640     /**
641      * Creates a GrBackendRenderTarget that can be wrapped using
642      * SkSurfaces::WrapBackendRenderTarget. Ideally this is a non-textureable allocation to
643      * differentiate from testing with SkSurfaces::WrapBackendTexture. When sampleCnt > 1 this
644      * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
645      * buffer for resolving. If the color is non-null the backing store should be cleared to the
646      * passed in color.
647      */
648     virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
649             SkISize dimensions,
650             GrColorType,
651             int sampleCount = 1,
652             GrProtected = GrProtected::kNo) = 0;
653 
654     /**
655      * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
656      * is up to the caller.
657      */
658     virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
659 
660     // This is only to be used in GL-specific tests.
glContextForTesting()661     virtual const GrGLContext* glContextForTesting() const { return nullptr; }
662 
663     // This is only to be used by testing code
resetShaderCacheForTesting()664     virtual void resetShaderCacheForTesting() const {}
665 
666     /**
667      * Inserted as a pair around a block of code to do a GPU frame capture.
668      * Currently only works with the Metal backend.
669      */
testingOnly_startCapture()670     virtual void testingOnly_startCapture() {}
testingOnly_stopCapture()671     virtual void testingOnly_stopCapture() {}
672 #endif
673 
674     // width and height may be larger than rt (if underlying API allows it).
675     // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
676     // the GrAttachment.
677     virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
678                                                       SkISize dimensions,
679                                                       int numStencilSamples) = 0;
680 
681     virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
682 
683     // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
684     virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
685                                                    const GrBackendFormat& format,
686                                                    int numSamples,
687                                                    GrProtected isProtected,
688                                                    GrMemoryless isMemoryless) = 0;
689 
handleDirtyContext()690     void handleDirtyContext() {
691         if (fResetBits) {
692             this->resetContext();
693         }
694     }
695 
storeVkPipelineCacheData()696     virtual void storeVkPipelineCacheData() {}
697 
698     // Called before certain draws in order to guarantee coherent results from dst reads.
699     virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
700 
701 protected:
702     static bool CompressedDataIsCorrect(SkISize dimensions,
703                                         SkTextureCompressionType,
704                                         skgpu::Mipmapped,
705                                         const void* data,
706                                         size_t length);
707 
708     // If the surface is a texture this marks its mipmaps as dirty.
709     void didWriteToSurface(GrSurface* surface,
710                            GrSurfaceOrigin origin,
711                            const SkIRect* bounds,
712                            uint32_t mipLevels = 1) const;
713 
setOOMed()714     void setOOMed() { fOOMed = true; }
715 
716     Stats                            fStats;
717 
718     // Subclass must call this to initialize caps in its constructor.
719     void initCaps(sk_sp<const GrCaps> caps);
720 
721 private:
endTimerQuery(const GrTimerQuery &)722     virtual void endTimerQuery(const GrTimerQuery&) { SK_ABORT("timer query not supported."); }
723 
724     virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
725                                                     const GrBackendFormat&,
726                                                     GrRenderable,
727                                                     skgpu::Mipmapped,
728                                                     GrProtected,
729                                                     std::string_view label) = 0;
730 
731     virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
732                                                               const GrBackendFormat&,
733                                                               skgpu::Mipmapped,
734                                                               GrProtected) = 0;
735 
736     virtual bool onClearBackendTexture(const GrBackendTexture&,
737                                        sk_sp<skgpu::RefCntedCallback> finishedCallback,
738                                        std::array<float, 4> color) = 0;
739 
740     virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
741                                                   sk_sp<skgpu::RefCntedCallback> finishedCallback,
742                                                   const void* data,
743                                                   size_t length) = 0;
744 
745     // called when the 3D context state is unknown. Subclass should emit any
746     // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)747     virtual void onResetContext(uint32_t resetBits) {}
748 
749     // Implementation of resetTextureBindings.
onResetTextureBindings()750     virtual void onResetTextureBindings() {}
751 
752     // overridden by backend-specific derived class to create objects.
753     // Texture size, renderablility, format support, sample count will have already been validated
754     // in base class before onCreateTexture is called.
755     // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
756     virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
757                                              const GrBackendFormat&,
758                                              GrRenderable,
759                                              int renderTargetSampleCnt,
760                                              skgpu::Budgeted,
761                                              GrProtected,
762                                              int mipLevelCoont,
763                                              uint32_t levelClearMask,
764                                              std::string_view label) = 0;
765     virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
766                                                        const GrBackendFormat&,
767                                                        skgpu::Budgeted,
768                                                        skgpu::Mipmapped,
769                                                        GrProtected,
770                                                        const void* data,
771                                                        size_t dataSize) = 0;
772     virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
773                                                   GrWrapOwnership,
774                                                   GrWrapCacheable,
775                                                   GrIOType) = 0;
776 
777     virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
778                                                             GrWrapOwnership,
779                                                             GrWrapCacheable) = 0;
780 
781     virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
782                                                             int sampleCnt,
783                                                             GrWrapOwnership,
784                                                             GrWrapCacheable) = 0;
785     virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
786     virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
787                                                                         const GrVkDrawableInfo&);
788 
789     virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size,
790                                               GrGpuBufferType intendedType,
791                                               GrAccessPattern) = 0;
792 
793     // overridden by backend-specific derived class to perform the surface read
794     virtual bool onReadPixels(GrSurface*,
795                               SkIRect,
796                               GrColorType surfaceColorType,
797                               GrColorType dstColorType,
798                               void*,
799                               size_t rowBytes) = 0;
800 
801     // overridden by backend-specific derived class to perform the surface write
802     virtual bool onWritePixels(GrSurface*,
803                                SkIRect,
804                                GrColorType surfaceColorType,
805                                GrColorType srcColorType,
806                                const GrMipLevel[],
807                                int mipLevelCount,
808                                bool prepForTexSampling) = 0;
809 
810     // overridden by backend-specific derived class to perform the buffer transfer
811     virtual bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
812                                               size_t srcOffset,
813                                               sk_sp<GrGpuBuffer> dst,
814                                               size_t dstOffset,
815                                               size_t size) = 0;
816 
817     // overridden by backend-specific derived class to perform the texture transfer
818     virtual bool onTransferPixelsTo(GrTexture*,
819                                     SkIRect,
820                                     GrColorType textureColorType,
821                                     GrColorType bufferColorType,
822                                     sk_sp<GrGpuBuffer> transferBuffer,
823                                     size_t offset,
824                                     size_t rowBytes) = 0;
825 
826     // overridden by backend-specific derived class to perform the surface transfer
827     virtual bool onTransferPixelsFrom(GrSurface*,
828                                       SkIRect,
829                                       GrColorType surfaceColorType,
830                                       GrColorType bufferColorType,
831                                       sk_sp<GrGpuBuffer> transferBuffer,
832                                       size_t offset) = 0;
833 
834     // overridden by backend-specific derived class to perform the resolve
835     virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
836 
837     // overridden by backend specific derived class to perform mip map level regeneration.
838     virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
839 
840     // overridden by backend specific derived class to perform the copy surface
841     virtual bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
842                                GrSurface* src, const SkIRect& srcRect,
843                                GrSamplerState::Filter) = 0;
844 
845     virtual GrOpsRenderPass* onGetOpsRenderPass(
846             GrRenderTarget* renderTarget,
847             bool useMSAASurface,
848             GrAttachment* stencil,
849             GrSurfaceOrigin,
850             const SkIRect& bounds,
851             const GrOpsRenderPass::LoadAndStoreInfo&,
852             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
853             const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
854             GrXferBarrierFlags renderPassXferBarriers) = 0;
855 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)856     virtual void prepareSurfacesForBackendAccessAndStateUpdates(
857             SkSpan<GrSurfaceProxy*> proxies,
858             SkSurfaces::BackendSurfaceAccess access,
859             const skgpu::MutableTextureState* newState) {}
860 
861     virtual bool onSubmitToGpu(const GrSubmitInfo& info) = 0;
862 
863     void reportSubmitHistograms();
onReportSubmitHistograms()864     virtual void onReportSubmitHistograms() {}
865 
866 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)867     virtual void onDumpJSON(SkJSONWriter*) const {}
868 #endif
869 
870     sk_sp<GrTexture> createTextureCommon(SkISize,
871                                          const GrBackendFormat&,
872                                          GrTextureType textureType,
873                                          GrRenderable,
874                                          int renderTargetSampleCnt,
875                                          skgpu::Budgeted,
876                                          GrProtected,
877                                          int mipLevelCnt,
878                                          uint32_t levelClearMask,
879                                          std::string_view label);
880 
resetContext()881     void resetContext() {
882         this->onResetContext(fResetBits);
883         fResetBits = 0;
884     }
885 
886     void callSubmittedProcs(bool success);
887 
888     sk_sp<const GrCaps>             fCaps;
889 
890     uint32_t fResetBits;
891     // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
892     GrDirectContext* fContext;
893 
894     struct SubmittedProc {
SubmittedProcSubmittedProc895         SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
896                 : fProc(proc), fContext(context) {}
897 
898         GrGpuSubmittedProc fProc;
899         GrGpuSubmittedContext fContext;
900     };
901     skia_private::STArray<4, SubmittedProc> fSubmittedProcs;
902 
903     bool fOOMed = false;
904 
905 #if SK_HISTOGRAMS_ENABLED
906     int fCurrentSubmitRenderPassCount = 0;
907 #endif
908 
909     using INHERITED = SkRefCnt;
910 };
911 
912 #endif
913