xref: /aosp_15_r20/external/skia/include/gpu/ganesh/GrDirectContext.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2020 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDirectContext_DEFINED
9 #define GrDirectContext_DEFINED
10 
11 #include "include/core/SkColor.h"
12 #include "include/core/SkRefCnt.h"
13 #include "include/core/SkTypes.h"
14 #include "include/gpu/GpuTypes.h"
15 #include "include/gpu/ganesh/GrContextOptions.h"
16 #include "include/gpu/ganesh/GrRecordingContext.h"
17 #include "include/gpu/ganesh/GrTypes.h"
18 
19 #include <chrono>
20 #include <cstddef>
21 #include <cstdint>
22 #include <memory>
23 #include <string_view>
24 
25 class GrAtlasManager;
26 class GrBackendSemaphore;
27 class GrBackendFormat;
28 class GrBackendTexture;
29 class GrBackendRenderTarget;
30 class GrClientMappedBufferManager;
31 class GrContextThreadSafeProxy;
32 class GrDirectContextPriv;
33 class GrGpu;
34 class GrResourceCache;
35 class GrResourceProvider;
36 class SkData;
37 class SkImage;
38 class SkPixmap;
39 class SkSurface;
40 class SkTaskGroup;
41 class SkTraceMemoryDump;
42 enum SkColorType : int;
43 enum class SkTextureCompressionType;
44 struct GrMockOptions;
45 struct GrD3DBackendContext; // IWYU pragma: keep
46 
47 namespace skgpu {
48     class MutableTextureState;
49 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
50     namespace ganesh { class SmallPathAtlasMgr; }
51 #endif
52 }
53 namespace sktext { namespace gpu { class StrikeCache; } }
54 namespace wgpu { class Device; } // IWYU pragma: keep
55 
56 namespace SkSurfaces {
57 enum class BackendSurfaceAccess;
58 }
59 
60 class SK_API GrDirectContext : public GrRecordingContext {
61 public:
62 #ifdef SK_DIRECT3D
63     /**
64      * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
65      * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
66      */
67     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
68     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
69 #endif
70 
71     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
72     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
73 
74     ~GrDirectContext() override;
75 
76     /**
77      * The context normally assumes that no outsider is setting state
78      * within the underlying 3D API's context/device/whatever. This call informs
79      * the context that the state was modified and it should resend. Shouldn't
80      * be called frequently for good performance.
81      * The flag bits, state, is dependent on which backend is used by the
82      * context, either GL or D3D (possible in future).
83      */
84     void resetContext(uint32_t state = kAll_GrBackendState);
85 
86     /**
87      * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
88      * the context has modified the bound texture will have texture id 0 bound. This does not
89      * flush the context. Calling resetContext() does not change the set that will be bound
90      * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
91      * all unit/target combinations are considered to have unmodified bindings until the context
92      * subsequently modifies them (meaning if this is called twice in a row with no intervening
93      * context usage then the second call is a no-op.)
94      */
95     void resetGLTextureBindings();
96 
97     /**
98      * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
99      * usable. Call this if you have lost the associated GPU context, and thus internal texture,
100      * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
101      * context and any of its created resource objects will not make backend 3D API calls. Content
102      * rendered but not previously flushed may be lost. After this function is called all subsequent
103      * calls on the context will fail or be no-ops.
104      *
105      * The typical use case for this function is that the underlying 3D context was lost and further
106      * API calls may crash.
107      *
108      * This call is not valid to be made inside ReleaseProcs passed into SkSurface or SkImages. The
109      * call will simply fail (and assert in debug) if it is called while inside a ReleaseProc.
110      *
111      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
112      * create the context must be kept alive even after abandoning the context. Those objects must
113      * live for the lifetime of the context object itself. The reason for this is so that
114      * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
115      * cleaned up even in a device lost state.
116      */
117     void abandonContext() override;
118 
119     /**
120      * Returns true if the context was abandoned or if the backend specific context has gotten into
121      * an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
122      * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
123      * context.
124      */
125     bool abandoned() override;
126 
127     /**
128      * Returns true if the backend specific context has gotten into an unrecoverarble, lost state
129      * (e.g. in Vulkan backend if we've gotten a VK_ERROR_DEVICE_LOST). If the backend context is
130      * lost, this call will also abandon this context.
131      */
132     bool isDeviceLost();
133 
134     // TODO: Remove this from public after migrating Chrome.
135     sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
136 
137     /**
138      * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
139      * reset and will return false until another out-of-memory error is reported by the 3D API. If
140      * the context is abandoned then this will report false.
141      *
142      * Currently this is implemented for:
143      *
144      * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
145      * therefore hide the error from Skia. Also, it is not advised to use this in combination with
146      * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
147      * checking the GL context for OOM.
148      *
149      * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
150      * occurred.
151      */
152     bool oomed();
153 
154     /**
155      * This is similar to abandonContext() however the underlying 3D context is not yet lost and
156      * the context will cleanup all allocated resources before returning. After returning it will
157      * assume that the underlying context may no longer be valid.
158      *
159      * The typical use case for this function is that the client is going to destroy the 3D context
160      * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
161      * elsewhere by either the client or Skia objects).
162      *
163      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
164      * create the context must be alive before calling releaseResourcesAndAbandonContext.
165      */
166     void releaseResourcesAndAbandonContext();
167 
168     ///////////////////////////////////////////////////////////////////////////
169     // Resource Cache
170 
171     /** DEPRECATED
172      *  Return the current GPU resource cache limits.
173      *
174      *  @param maxResources If non-null, will be set to -1.
175      *  @param maxResourceBytes If non-null, returns maximum number of bytes of
176      *                          video memory that can be held in the cache.
177      */
178     void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
179 
180     /**
181      *  Return the current GPU resource cache limit in bytes.
182      */
183     size_t getResourceCacheLimit() const;
184 
185     /**
186      *  Gets the current GPU resource cache usage.
187      *
188      *  @param resourceCount If non-null, returns the number of resources that are held in the
189      *                       cache.
190      *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
191      *                          in the cache.
192      */
193     void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
194 
195     /**
196      *  Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
197      */
198     size_t getResourceCachePurgeableBytes() const;
199 
200     /** DEPRECATED
201      *  Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
202      *  limit, it will be purged (LRU) to keep the cache within the limit.
203      *
204      *  @param maxResources Unused.
205      *  @param maxResourceBytes The maximum number of bytes of video memory
206      *                          that can be held in the cache.
207      */
208     void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
209 
210     /**
211      *  Specify the GPU resource cache limit. If the cache currently exceeds this limit,
212      *  it will be purged (LRU) to keep the cache within the limit.
213      *
214      *  @param maxResourceBytes The maximum number of bytes of video memory
215      *                          that can be held in the cache.
216      */
217     void setResourceCacheLimit(size_t maxResourceBytes);
218 
219     /**
220      * Frees GPU created by the context. Can be called to reduce GPU memory
221      * pressure.
222      */
223     void freeGpuResources();
224 
225     /**
226      * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
227      * otherwise marked for deletion, regardless of whether the context is under budget.
228 
229      *
230      * @param msNotUsed   Only unlocked resources not used in these last milliseconds will be
231      *                    cleaned up.
232      * @param opts        Specify which resources should be cleaned up. If kScratchResourcesOnly
233      *                    then, all unlocked scratch resources older than 'msNotUsed' will be purged
234      *                    but the unlocked resources with persistent data will remain. If
235      *                    kAllResources
236      */
237 
238     void performDeferredCleanup(
239             std::chrono::milliseconds msNotUsed,
240             GrPurgeResourceOptions opts = GrPurgeResourceOptions::kAllResources);
241 
242     // Temporary compatibility API for Android.
purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)243     void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
244         this->performDeferredCleanup(msNotUsed);
245     }
246 
247     /**
248      * Purge unlocked resources from the cache until the the provided byte count has been reached
249      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
250      * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
251      * resource types.
252      *
253      * @param maxBytesToPurge the desired number of bytes to be purged.
254      * @param preferScratchResources If true scratch resources will be purged prior to other
255      *                               resource types.
256      */
257     void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
258 
259     /**
260      * This entry point is intended for instances where an app has been backgrounded or
261      * suspended.
262      * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
263      * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
264      * then all unlocked resources will be purged.
265      * In either case, after the unlocked resources are purged a separate pass will be made to
266      * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
267      * some resources with persistent data may be purged to be under budget).
268      *
269      * @param opts If kScratchResourcesOnly only unlocked scratch resources will be purged prior
270      *             enforcing the budget requirements.
271      */
272     void purgeUnlockedResources(GrPurgeResourceOptions opts);
273 
274     /*
275      * Gets the types of GPU stats supported by this Context.
276      */
277     skgpu::GpuStatsFlags supportedGpuStats() const;
278 
279     /**
280      * Gets the maximum supported texture size.
281      */
282     using GrRecordingContext::maxTextureSize;
283 
284     /**
285      * Gets the maximum supported render target size.
286      */
287     using GrRecordingContext::maxRenderTargetSize;
288 
289     /**
290      * Can a SkImage be created with the given color type.
291      */
292     using GrRecordingContext::colorTypeSupportedAsImage;
293 
294     /**
295      * Does this context support protected content?
296      */
297     using GrRecordingContext::supportsProtectedContent;
298 
299     /**
300      * Can a SkSurface be created with the given color type. To check whether MSAA is supported
301      * use maxSurfaceSampleCountForColorType().
302      */
303     using GrRecordingContext::colorTypeSupportedAsSurface;
304 
305     /**
306      * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
307      * rendering is supported for the color type. 0 is returned if rendering to this color type
308      * is not supported at all.
309      */
310     using GrRecordingContext::maxSurfaceSampleCountForColorType;
311 
312     ///////////////////////////////////////////////////////////////////////////
313     // Misc.
314 
315     /**
316      * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
317      * executing any more commands on the GPU. We only guarantee blocking transfer and fragment
318      * shader work, but may block earlier stages as well depending on the backend.If this call
319      * returns false, then the GPU back-end will not wait on any passed in semaphores, and the
320      * client will still own the semaphores, regardless of the value of deleteSemaphoresAfterWait.
321      *
322      * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
323      * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
324      * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
325      * flush calls.
326      *
327      * This is not supported on the GL backend.
328      */
329     bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
330               bool deleteSemaphoresAfterWait = true);
331 
332     /**
333      * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
334      * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
335      * GrContext::submit(sync).
336      */
337     void flushAndSubmit(GrSyncCpu sync = GrSyncCpu::kNo) {
338         this->flush(GrFlushInfo());
339         this->submit(sync);
340     }
341 
342     /**
343      * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
344      * objects. A call to `submit` is always required to ensure work is actually sent to
345      * the gpu. Some specific API details:
346      *     GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
347      *         sync objects from the flush will not be valid until a submission occurs.
348      *
349      *     Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
350      *         buffer or encoder objects. However, these objects are not sent to the gpu until a
351      *         submission occurs.
352      *
353      * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
354      * submitted to the gpu during the next submit call (it is possible Skia failed to create a
355      * subset of the semaphores). The client should not wait on these semaphores until after submit
356      * has been called, and must keep them alive until then. If this call returns
357      * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
358      * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
359      * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
360      * client is still responsible for deleting any initialized semaphores.
361      * Regardless of semaphore submission the context will still be flushed. It should be
362      * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
363      * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
364      * take this as a failure if they passed in semaphores to be submitted.
365      */
366     GrSemaphoresSubmitted flush(const GrFlushInfo& info);
367 
flush()368     void flush() { this->flush(GrFlushInfo()); }
369 
370     /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not
371      *  texture-backed (including promise texture images) or if the GrDirectContext does not
372      *  have the same context ID as the context backing the image then this is a no-op.
373      *  If the image was not used in any non-culled draws in the current queue of work for the
374      *  passed GrDirectContext then this is a no-op unless the GrFlushInfo contains semaphores or
375      *  a finish proc. Those are respected even when the image has not been used.
376      *  @param image    the non-null image to flush.
377      *  @param info     flush options
378      */
379     GrSemaphoresSubmitted flush(const sk_sp<const SkImage>& image, const GrFlushInfo& info);
380     void flush(const sk_sp<const SkImage>& image);
381 
382     /** Version of flush() that uses a default GrFlushInfo. Also submits the flushed work to the
383      *   GPU.
384      */
385     void flushAndSubmit(const sk_sp<const SkImage>& image);
386 
387     /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface
388      *  MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent
389      *  to the gpu. Some specific API details:
390      *      GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
391      *          sync objects from the flush will not be valid until a submission occurs.
392      *
393      *      Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
394      *          buffer or encoder objects. However, these objects are not sent to the gpu until a
395      *          submission occurs.
396      *
397      *  The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is
398      *  passed in.
399      *
400      *  If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU.
401      *
402      *  If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is
403      *  treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the
404      *  SkSurface will be transferred back to its original queue. If the SkSurface was created by
405      *  wrapping a VkImage, the queue will be set to the queue which was originally passed in on
406      *  the GrVkImageInfo. Additionally, if the original queue was not external or foreign the
407      *  layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.
408      *
409      *  The GrFlushInfo describes additional options to flush. Please see documentation at
410      *  GrFlushInfo for more info.
411      *
412      *  If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
413      *  submitted to the gpu during the next submit call (it is possible Skia failed to create a
414      *  subset of the semaphores). The client should not wait on these semaphores until after submit
415      *  has been called, but must keep them alive until then. If a submit flag was passed in with
416      *  the flush these valid semaphores can we waited on immediately. If this call returns
417      *  GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
418      *  the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in
419      *  with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
420      *  client is still responsible for deleting any initialized semaphores.
421      *  Regardless of semaphore submission the context will still be flushed. It should be
422      *  emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
423      *  happen. It simply means there were no semaphores submitted to the GPU. A caller should only
424      *  take this as a failure if they passed in semaphores to be submitted.
425      *
426      *  Pending surface commands are flushed regardless of the return result.
427      *
428      *  @param surface  The GPU backed surface to be flushed. Has no effect on a CPU-backed surface.
429      *  @param access  type of access the call will do on the backend object after flush
430      *  @param info    flush options
431      */
432     GrSemaphoresSubmitted flush(SkSurface* surface,
433                                 SkSurfaces::BackendSurfaceAccess access,
434                                 const GrFlushInfo& info);
435 
436     /**
437      *  Same as above except:
438      *
439      *  If a skgpu::MutableTextureState is passed in, at the end of the flush we will transition
440      *  the surface to be in the state requested by the skgpu::MutableTextureState. If the surface
441      *  (or SkImage or GrBackendSurface wrapping the same backend object) is used again after this
442      *  flush the state may be changed and no longer match what is requested here. This is often
443      *  used if the surface will be used for presenting or external use and the client wants backend
444      *  object to be prepped for that use. A finishedProc or semaphore on the GrFlushInfo will also
445      *  include the work for any requested state change.
446      *
447      *  If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
448      *  VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
449      *  tell Skia to not change those respective states.
450      *
451      *  @param surface  The GPU backed surface to be flushed. Has no effect on a CPU-backed surface.
452      *  @param info     flush options
453      *  @param newState optional state change request after flush
454      */
455     GrSemaphoresSubmitted flush(SkSurface* surface,
456                                 const GrFlushInfo& info,
457                                 const skgpu::MutableTextureState* newState = nullptr);
458 
459     /** Call to ensure all reads/writes of the surface have been issued to the underlying 3D API.
460      *  Skia will correctly order its own draws and pixel operations. This must to be used to ensure
461      *  correct ordering when the surface backing store is accessed outside Skia (e.g. direct use of
462      *  the 3D API or a windowing system). This is equivalent to
463      *  calling ::flush with a default GrFlushInfo followed by ::submit(syncCpu).
464      *
465      *  Has no effect on a CPU-backed surface.
466      */
467     void flushAndSubmit(SkSurface* surface, GrSyncCpu sync = GrSyncCpu::kNo);
468 
469     /**
470      * Flushes the given surface with the default GrFlushInfo.
471      *
472      *  Has no effect on a CPU-backed surface.
473      */
474     void flush(SkSurface* surface);
475 
476     /**
477      * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
478      * value of the submit will indicate whether or not the submission to the GPU was successful.
479      *
480      * If the call returns true, all previously passed in semaphores in flush calls will have been
481      * submitted to the GPU and they can safely be waited on. The caller should wait on those
482      * semaphores or perform some other global synchronization before deleting the semaphores.
483      *
484      * If it returns false, then those same semaphores will not have been submitted and we will not
485      * try to submit them again. The caller is free to delete the semaphores at any time.
486      *
487      * If GrSubmitInfo::fSync flag is GrSyncCpu::kYes, this function will return once the gpu has
488      * finished with all submitted work.
489      *
490      * If GrSubmitInfo::fMarkBoundary flag is GrMarkFrameBoundary::kYes and the GPU supports a way
491      * to be notified about frame boundaries, then we will notify the GPU during/after the
492      * submission of work to the GPU. GrSubmitInfo::fFrameID is a frame ID that is passed to the
493      * GPU when marking a boundary. Ideally this value should be unique for each frame. Currently
494      * marking frame boundaries is only supported with the Vulkan backend and only if the
495      * VK_EXT_frame_boudnary extenstion is available.
496      */
497     bool submit(const GrSubmitInfo&);
498 
499     bool submit(GrSyncCpu sync = GrSyncCpu::kNo) {
500         GrSubmitInfo info;
501         info.fSync = sync;
502 
503         return this->submit(info);
504     }
505 
506 
507     /**
508      * Checks whether any asynchronous work is complete and if so calls related callbacks.
509      */
510     void checkAsyncWorkCompletion();
511 
512     /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
513     // Chrome is using this!
514     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
515 
516     bool supportsDistanceFieldText() const;
517 
518     void storeVkPipelineCacheData();
519 
520     /**
521      * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
522      * It is guaranteed that this backend format will be the one used by the following
523      * SkColorType and GrSurfaceCharacterization-based createBackendTexture methods.
524      *
525      * The caller should check that the returned format is valid.
526      */
527     using GrRecordingContext::defaultBackendFormat;
528 
529     /**
530      * The explicitly allocated backend texture API allows clients to use Skia to create backend
531      * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
532      *
533      * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
534      * before deleting the context used to create them. If the backend is Vulkan, the textures must
535      * be deleted before abandoning the context as well. Additionally, clients should only delete
536      * these objects on the thread for which that context is active.
537      *
538      * The client is responsible for ensuring synchronization between different uses
539      * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
540      * surface, rewrapping it in a image and drawing the image will require explicit
541      * synchronization on the client's part).
542      */
543 
544      /**
545       * If possible, create an uninitialized backend texture. The client should ensure that the
546       * returned backend texture is valid.
547       * For the Vulkan backend the layout of the created VkImage will be:
548       *      VK_IMAGE_LAYOUT_UNDEFINED.
549       */
550     GrBackendTexture createBackendTexture(int width,
551                                           int height,
552                                           const GrBackendFormat&,
553                                           skgpu::Mipmapped,
554                                           GrRenderable,
555                                           GrProtected = GrProtected::kNo,
556                                           std::string_view label = {});
557 
558     /**
559      * If possible, create an uninitialized backend texture. The client should ensure that the
560      * returned backend texture is valid.
561      * If successful, the created backend texture will be compatible with the provided
562      * SkColorType.
563      * For the Vulkan backend the layout of the created VkImage will be:
564      *      VK_IMAGE_LAYOUT_UNDEFINED.
565      */
566     GrBackendTexture createBackendTexture(int width,
567                                           int height,
568                                           SkColorType,
569                                           skgpu::Mipmapped,
570                                           GrRenderable,
571                                           GrProtected = GrProtected::kNo,
572                                           std::string_view label = {});
573 
574     /**
575      * If possible, create a backend texture initialized to a particular color. The client should
576      * ensure that the returned backend texture is valid. The client can pass in a finishedProc
577      * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
578      * client is required to call `submit` to send the upload work to the gpu. The
579      * finishedProc will always get called even if we failed to create the GrBackendTexture.
580      * For the Vulkan backend the layout of the created VkImage will be:
581      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
582      */
583     GrBackendTexture createBackendTexture(int width,
584                                           int height,
585                                           const GrBackendFormat&,
586                                           const SkColor4f& color,
587                                           skgpu::Mipmapped,
588                                           GrRenderable,
589                                           GrProtected = GrProtected::kNo,
590                                           GrGpuFinishedProc finishedProc = nullptr,
591                                           GrGpuFinishedContext finishedContext = nullptr,
592                                           std::string_view label = {});
593 
594     /**
595      * If possible, create a backend texture initialized to a particular color. The client should
596      * ensure that the returned backend texture is valid. The client can pass in a finishedProc
597      * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
598      * client is required to call `submit` to send the upload work to the gpu. The
599      * finishedProc will always get called even if we failed to create the GrBackendTexture.
600      * If successful, the created backend texture will be compatible with the provided
601      * SkColorType.
602      * For the Vulkan backend the layout of the created VkImage will be:
603      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
604      */
605     GrBackendTexture createBackendTexture(int width,
606                                           int height,
607                                           SkColorType,
608                                           const SkColor4f& color,
609                                           skgpu::Mipmapped,
610                                           GrRenderable,
611                                           GrProtected = GrProtected::kNo,
612                                           GrGpuFinishedProc finishedProc = nullptr,
613                                           GrGpuFinishedContext finishedContext = nullptr,
614                                           std::string_view label = {});
615 
616     /**
617      * If possible, create a backend texture initialized with the provided pixmap data. The client
618      * should ensure that the returned backend texture is valid. The client can pass in a
619      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
620      * deleted. The client is required to call `submit` to send the upload work to the gpu.
621      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
622      * If successful, the created backend texture will be compatible with the provided
623      * pixmap(s). Compatible, in this case, means that the backend format will be the result
624      * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
625      * when this call returns.
626      * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
627      * the data for all the mipmap levels must be provided. In the mipmapped case all the
628      * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
629      * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
630      * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
631      * Note: the pixmap's alphatypes and colorspaces are ignored.
632      * For the Vulkan backend the layout of the created VkImage will be:
633      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
634      */
635     GrBackendTexture createBackendTexture(const SkPixmap srcData[],
636                                           int numLevels,
637                                           GrSurfaceOrigin,
638                                           GrRenderable,
639                                           GrProtected,
640                                           GrGpuFinishedProc finishedProc = nullptr,
641                                           GrGpuFinishedContext finishedContext = nullptr,
642                                           std::string_view label = {});
643 
644     /**
645      * Convenience version createBackendTexture() that takes just a base level pixmap.
646      */
647      GrBackendTexture createBackendTexture(const SkPixmap& srcData,
648                                            GrSurfaceOrigin textureOrigin,
649                                            GrRenderable renderable,
650                                            GrProtected isProtected,
651                                            GrGpuFinishedProc finishedProc = nullptr,
652                                            GrGpuFinishedContext finishedContext = nullptr,
653                                            std::string_view label = {});
654 
655     // Deprecated versions that do not take origin and assume top-left.
656     GrBackendTexture createBackendTexture(const SkPixmap srcData[],
657                                           int numLevels,
658                                           GrRenderable renderable,
659                                           GrProtected isProtected,
660                                           GrGpuFinishedProc finishedProc = nullptr,
661                                           GrGpuFinishedContext finishedContext = nullptr,
662                                           std::string_view label = {});
663 
664     GrBackendTexture createBackendTexture(const SkPixmap& srcData,
665                                           GrRenderable renderable,
666                                           GrProtected isProtected,
667                                           GrGpuFinishedProc finishedProc = nullptr,
668                                           GrGpuFinishedContext finishedContext = nullptr,
669                                           std::string_view label = {});
670 
671     /**
672      * If possible, updates a backend texture to be filled to a particular color. The client should
673      * check the return value to see if the update was successful. The client can pass in a
674      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
675      * deleted. The client is required to call `submit` to send the upload work to the gpu.
676      * The finishedProc will always get called even if we failed to update the GrBackendTexture.
677      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
678      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
679      */
680     bool updateBackendTexture(const GrBackendTexture&,
681                               const SkColor4f& color,
682                               GrGpuFinishedProc finishedProc,
683                               GrGpuFinishedContext finishedContext);
684 
685     /**
686      * If possible, updates a backend texture to be filled to a particular color. The data in
687      * GrBackendTexture and passed in color is interpreted with respect to the passed in
688      * SkColorType. The client should check the return value to see if the update was successful.
689      * The client can pass in a finishedProc to be notified when the data has been uploaded by the
690      * gpu and the texture can be deleted. The client is required to call `submit` to send
691      * the upload work to the gpu. The finishedProc will always get called even if we failed to
692      * update the GrBackendTexture.
693      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
694      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
695      */
696     bool updateBackendTexture(const GrBackendTexture&,
697                               SkColorType skColorType,
698                               const SkColor4f& color,
699                               GrGpuFinishedProc finishedProc,
700                               GrGpuFinishedContext finishedContext);
701 
702     /**
703      * If possible, updates a backend texture filled with the provided pixmap data. The client
704      * should check the return value to see if the update was successful. The client can pass in a
705      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
706      * deleted. The client is required to call `submit` to send the upload work to the gpu.
707      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
708      * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
709      * means that the backend format is compatible with the base pixmap's colortype. The src data
710      * can be deleted when this call returns.
711      * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
712      * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
713      * Additionally, all the miplevels must be sized correctly (please see
714      * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
715      * pixmap data is vertically flipped in the texture.
716      * Note: the pixmap's alphatypes and colorspaces are ignored.
717      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
718      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
719      */
720     bool updateBackendTexture(const GrBackendTexture&,
721                               const SkPixmap srcData[],
722                               int numLevels,
723                               GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
724                               GrGpuFinishedProc finishedProc = nullptr,
725                               GrGpuFinishedContext finishedContext = nullptr);
726 
727     /**
728      * Convenience version of updateBackendTexture that takes just a base level pixmap.
729      */
730     bool updateBackendTexture(const GrBackendTexture& texture,
731                               const SkPixmap& srcData,
732                               GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
733                               GrGpuFinishedProc finishedProc = nullptr,
734                               GrGpuFinishedContext finishedContext = nullptr) {
735         return this->updateBackendTexture(texture,
736                                           &srcData,
737                                           1,
738                                           textureOrigin,
739                                           finishedProc,
740                                           finishedContext);
741     }
742 
743     // Deprecated version that does not take origin and assumes top-left.
744     bool updateBackendTexture(const GrBackendTexture& texture,
745                              const SkPixmap srcData[],
746                              int numLevels,
747                              GrGpuFinishedProc finishedProc,
748                              GrGpuFinishedContext finishedContext);
749 
750     /**
751      * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
752      * guaranteed to match the backend format used by the following
753      * createCompressedBackendTexture methods that take a CompressionType.
754      *
755      * The caller should check that the returned format is valid.
756      */
757     using GrRecordingContext::compressedBackendFormat;
758 
759     /**
760      *If possible, create a compressed backend texture initialized to a particular color. The
761      * client should ensure that the returned backend texture is valid. The client can pass in a
762      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
763      * deleted. The client is required to call `submit` to send the upload work to the gpu.
764      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
765      * For the Vulkan backend the layout of the created VkImage will be:
766      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
767      */
768     GrBackendTexture createCompressedBackendTexture(int width,
769                                                     int height,
770                                                     const GrBackendFormat&,
771                                                     const SkColor4f& color,
772                                                     skgpu::Mipmapped,
773                                                     GrProtected = GrProtected::kNo,
774                                                     GrGpuFinishedProc finishedProc = nullptr,
775                                                     GrGpuFinishedContext finishedContext = nullptr);
776 
777     GrBackendTexture createCompressedBackendTexture(int width,
778                                                     int height,
779                                                     SkTextureCompressionType,
780                                                     const SkColor4f& color,
781                                                     skgpu::Mipmapped,
782                                                     GrProtected = GrProtected::kNo,
783                                                     GrGpuFinishedProc finishedProc = nullptr,
784                                                     GrGpuFinishedContext finishedContext = nullptr);
785 
786     /**
787      * If possible, create a backend texture initialized with the provided raw data. The client
788      * should ensure that the returned backend texture is valid. The client can pass in a
789      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
790      * deleted. The client is required to call `submit` to send the upload work to the gpu.
791      * The finishedProc will always get called even if we failed to create the GrBackendTexture
792      * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
793      * the data for all the mipmap levels must be provided. Additionally, all the miplevels
794      * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
795      * For the Vulkan backend the layout of the created VkImage will be:
796      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
797      */
798     GrBackendTexture createCompressedBackendTexture(int width,
799                                                     int height,
800                                                     const GrBackendFormat&,
801                                                     const void* data,
802                                                     size_t dataSize,
803                                                     skgpu::Mipmapped,
804                                                     GrProtected = GrProtected::kNo,
805                                                     GrGpuFinishedProc finishedProc = nullptr,
806                                                     GrGpuFinishedContext finishedContext = nullptr);
807 
808     GrBackendTexture createCompressedBackendTexture(int width,
809                                                     int height,
810                                                     SkTextureCompressionType,
811                                                     const void* data,
812                                                     size_t dataSize,
813                                                     skgpu::Mipmapped,
814                                                     GrProtected = GrProtected::kNo,
815                                                     GrGpuFinishedProc finishedProc = nullptr,
816                                                     GrGpuFinishedContext finishedContext = nullptr);
817 
818     /**
819      * If possible, updates a backend texture filled with the provided color. If the texture is
820      * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
821      * should check the return value to see if the update was successful. The client can pass in a
822      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
823      * deleted. The client is required to call `submit` to send the upload work to the gpu.
824      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
825      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
826      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
827      */
828     bool updateCompressedBackendTexture(const GrBackendTexture&,
829                                         const SkColor4f& color,
830                                         GrGpuFinishedProc finishedProc,
831                                         GrGpuFinishedContext finishedContext);
832 
833     /**
834      * If possible, updates a backend texture filled with the provided raw data. The client
835      * should check the return value to see if the update was successful. The client can pass in a
836      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
837      * deleted. The client is required to call `submit` to send the upload work to the gpu.
838      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
839      * If a mipmapped texture is passed in, the data for all the mipmap levels must be provided.
840      * Additionally, all the miplevels must be sized correctly (please see
841      * SkMipMap::ComputeLevelSize and ComputeLevelCount).
842      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
843      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
844      */
845     bool updateCompressedBackendTexture(const GrBackendTexture&,
846                                         const void* data,
847                                         size_t dataSize,
848                                         GrGpuFinishedProc finishedProc,
849                                         GrGpuFinishedContext finishedContext);
850 
851     /**
852      * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
853      * skgpu::MutableTextureState. All objects that wrap the backend surface (i.e. SkSurfaces and
854      * SkImages) will also be aware of this state change. This call does not submit the state change
855      * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
856      * for this call is ordered linearly with all other calls that require GrContext::submit to be
857      * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
858      * called with finishedContext after the state transition is known to have occurred on the GPU.
859      *
860      * See skgpu::MutableTextureState to see what state can be set via this call.
861      *
862      * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
863      * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
864      * tell Skia to not change those respective states.
865      *
866      * If previousState is not null and this returns true, then Skia will have filled in
867      * previousState to have the values of the state before this call.
868      */
869     bool setBackendTextureState(const GrBackendTexture&,
870                                 const skgpu::MutableTextureState&,
871                                 skgpu::MutableTextureState* previousState = nullptr,
872                                 GrGpuFinishedProc finishedProc = nullptr,
873                                 GrGpuFinishedContext finishedContext = nullptr);
874     bool setBackendRenderTargetState(const GrBackendRenderTarget&,
875                                      const skgpu::MutableTextureState&,
876                                      skgpu::MutableTextureState* previousState = nullptr,
877                                      GrGpuFinishedProc finishedProc = nullptr,
878                                      GrGpuFinishedContext finishedContext = nullptr);
879 
880     void deleteBackendTexture(const GrBackendTexture&);
881 
882     // This interface allows clients to pre-compile shaders and populate the runtime program cache.
883     // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
884     //
885     // Steps to use this API:
886     //
887     // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
888     //    something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
889     //    will ensure that the blobs are SkSL, and are suitable for pre-compilation.
890     // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
891     //
892     // 3) Switch over to shipping your application. Include the key/data pairs from above.
893     // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
894     //    This will compile the SkSL to create a GL program, and populate the runtime cache.
895     //
896     // This is only guaranteed to work if the context/device used in step #2 are created in the
897     // same way as the one used in step #4, and the same GrContextOptions are specified.
898     // Using cached shader blobs on a different device or driver are undefined.
899     bool precompileShader(const SkData& key, const SkData& data);
900 
901 #ifdef SK_ENABLE_DUMP_GPU
902     /** Returns a string with detailed information about the context & GPU, in JSON format. */
903     SkString dump() const;
904 #endif
905 
906     class DirectContextID {
907     public:
908         static GrDirectContext::DirectContextID Next();
909 
DirectContextID()910         DirectContextID() : fID(SK_InvalidUniqueID) {}
911 
912         bool operator==(const DirectContextID& that) const { return fID == that.fID; }
913         bool operator!=(const DirectContextID& that) const { return !(*this == that); }
914 
makeInvalid()915         void makeInvalid() { fID = SK_InvalidUniqueID; }
isValid()916         bool isValid() const { return fID != SK_InvalidUniqueID; }
917 
918     private:
DirectContextID(uint32_t id)919         constexpr DirectContextID(uint32_t id) : fID(id) {}
920         uint32_t fID;
921     };
922 
directContextID()923     DirectContextID directContextID() const { return fDirectContextID; }
924 
925     // Provides access to functions that aren't part of the public API.
926     GrDirectContextPriv priv();
927     const GrDirectContextPriv priv() const;  // NOLINT(readability-const-return-type)
928 
929 protected:
930     GrDirectContext(GrBackendApi backend,
931                     const GrContextOptions& options,
932                     sk_sp<GrContextThreadSafeProxy> proxy);
933 
934     bool init() override;
935 
onGetAtlasManager()936     GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
937 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
938     skgpu::ganesh::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
939 #endif
940 
asDirectContext()941     GrDirectContext* asDirectContext() override { return this; }
942 
943 private:
944     // This call will make sure out work on the GPU is finished and will execute any outstanding
945     // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
946     // outstanding work on the gpu. The main use currently for this function is when tearing down or
947     // abandoning the context.
948     //
949     // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
950     // are abandoning the context we don't want the client to be able to use the GrDirectContext to
951     // issue more commands during the callback. Thus before calling this function we set the
952     // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
953     // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
954     // bool is used for this signal.
955     void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
956 
957     // This delete callback needs to be the first thing on the GrDirectContext so that it is the
958     // last thing destroyed. The callback may signal the client to clean up things that may need
959     // to survive the lifetime of some of the other objects on the GrDirectCotnext. So make sure
960     // we don't call it until all else has been destroyed.
961     class DeleteCallbackHelper {
962     public:
DeleteCallbackHelper(GrDirectContextDestroyedContext context,GrDirectContextDestroyedProc proc)963         DeleteCallbackHelper(GrDirectContextDestroyedContext context,
964                              GrDirectContextDestroyedProc proc)
965                 : fContext(context), fProc(proc) {}
966 
~DeleteCallbackHelper()967         ~DeleteCallbackHelper() {
968             if (fProc) {
969                 fProc(fContext);
970             }
971         }
972 
973     private:
974         GrDirectContextDestroyedContext fContext;
975         GrDirectContextDestroyedProc fProc;
976     };
977     std::unique_ptr<DeleteCallbackHelper> fDeleteCallbackHelper;
978 
979     const DirectContextID                   fDirectContextID;
980     // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
981     // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
982     // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
983     // invoked after objects they depend upon have already been destroyed.
984     std::unique_ptr<SkTaskGroup>              fTaskGroup;
985     std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
986     std::unique_ptr<GrGpu>                    fGpu;
987     std::unique_ptr<GrResourceCache>          fResourceCache;
988     std::unique_ptr<GrResourceProvider>       fResourceProvider;
989 
990     // This is incremented before we start calling ReleaseProcs from GrSurfaces and decremented
991     // after. A ReleaseProc may trigger code causing another resource to get freed so we to track
992     // the count to know if we in a ReleaseProc at any level. When this is set to a value greated
993     // than zero we will not allow abandonContext calls to be made on the context.
994     int                                     fInsideReleaseProcCnt = 0;
995 
996     bool                                    fDidTestPMConversions;
997     // true if the PM/UPM conversion succeeded; false otherwise
998     bool                                    fPMUPMConversionsRoundTrip;
999 
1000     GrContextOptions::PersistentCache*      fPersistentCache;
1001 
1002     std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
1003     std::unique_ptr<GrAtlasManager> fAtlasManager;
1004 
1005 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
1006     std::unique_ptr<skgpu::ganesh::SmallPathAtlasMgr> fSmallPathAtlasMgr;
1007 #endif
1008 
1009     friend class GrDirectContextPriv;
1010 };
1011 
1012 
1013 #endif
1014