1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrDirectContextPriv_DEFINED
9 #define GrDirectContextPriv_DEFINED
10
11 #include "include/core/SkRefCnt.h"
12 #include "include/core/SkSpan.h"
13 #include "include/core/SkSurface.h"
14 #include "include/gpu/ganesh/GrContextOptions.h"
15 #include "include/gpu/ganesh/GrContextThreadSafeProxy.h"
16 #include "include/gpu/ganesh/GrDirectContext.h"
17 #include "include/private/base/SkAssert.h"
18 #include "include/private/base/SkTArray.h"
19 #include "src/gpu/ganesh/GrGpu.h"
20 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
21
22 #include <cstddef>
23 #include <memory>
24 #include <utility>
25
26 class GrAtlasManager;
27 class GrClientMappedBufferManager;
28 class GrDeferredDisplayList;
29 class GrFragmentProcessor;
30 class GrOnFlushCallbackObject;
31 class GrProgramDesc;
32 class GrProgramInfo;
33 class GrRenderTargetProxy;
34 class GrResourceCache;
35 class GrResourceProvider;
36 class GrSurfaceProxy;
37 class SkImage;
38 class SkString;
39 class SkTaskGroup;
40 enum class GrBackendApi : unsigned int;
41 enum class GrSemaphoresSubmitted : bool;
42 struct GrFlushInfo;
43
44 namespace skgpu {
45 class MutableTextureState;
46 enum class MaskFormat : int;
47 namespace ganesh {
48 class SmallPathAtlasMgr;
49 }
50 } // namespace skgpu
51 namespace sktext {
52 namespace gpu {
53 class StrikeCache;
54 }
55 } // namespace sktext
56
57 /** Class that adds methods to GrDirectContext that are only intended for use internal to Skia.
58 This class is purely a privileged window into GrDirectContext. It should never have additional
59 data members or virtual methods. */
60 class GrDirectContextPriv : public GrRecordingContextPriv {
61 public:
Make(GrBackendApi backend,const GrContextOptions & options,sk_sp<GrContextThreadSafeProxy> proxy)62 static sk_sp<GrDirectContext> Make(GrBackendApi backend,
63 const GrContextOptions& options,
64 sk_sp<GrContextThreadSafeProxy> proxy) {
65 return sk_sp<GrDirectContext>(new GrDirectContext(backend, options, std::move(proxy)));
66 }
67
Init(const sk_sp<GrDirectContext> & ctx)68 static bool Init(const sk_sp<GrDirectContext>& ctx) {
69 SkASSERT(ctx);
70 return ctx->init();
71 }
72
SetGpu(const sk_sp<GrDirectContext> & ctx,std::unique_ptr<GrGpu> gpu)73 static void SetGpu(const sk_sp<GrDirectContext>& ctx, std::unique_ptr<GrGpu> gpu) {
74 SkASSERT(ctx);
75 ctx->fGpu = std::move(gpu);
76 }
77
context()78 GrDirectContext* context() { return static_cast<GrDirectContext*>(fContext); }
context()79 const GrDirectContext* context() const { return static_cast<const GrDirectContext*>(fContext); }
80
getStrikeCache()81 sktext::gpu::StrikeCache* getStrikeCache() { return this->context()->fStrikeCache.get(); }
82
83 /**
84 * Finalizes all pending reads and writes to the surfaces and also performs an MSAA resolves
85 * if necessary. The GrSurfaceProxy array is treated as a hint. If it is supplied the context
86 * will guarantee that the draws required for those proxies are flushed but it could do more.
87 * If no array is provided then all current work will be flushed.
88 *
89 * It is not necessary to call this before reading the render target via Skia/GrContext.
90 * GrContext will detect when it must perform a resolve before reading pixels back from the
91 * surface or using it as a texture.
92 */
93 GrSemaphoresSubmitted flushSurfaces(
94 SkSpan<GrSurfaceProxy*>,
95 SkSurfaces::BackendSurfaceAccess = SkSurfaces::BackendSurfaceAccess::kNoAccess,
96 const GrFlushInfo& = {},
97 const skgpu::MutableTextureState* newState = nullptr);
98
99 /** Version of above that flushes for a single proxy. Null is allowed. */
100 GrSemaphoresSubmitted flushSurface(
101 GrSurfaceProxy* proxy,
102 SkSurfaces::BackendSurfaceAccess access = SkSurfaces::BackendSurfaceAccess::kNoAccess,
103 const GrFlushInfo& info = {},
104 const skgpu::MutableTextureState* newState = nullptr) {
105 size_t size = proxy ? 1 : 0;
106 return this->flushSurfaces({&proxy, size}, access, info, newState);
107 }
108
109 /**
110 * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words,
111 * did we find a pair of round-trip preserving conversion effects?
112 */
113 bool validPMUPMConversionExists();
114
115 /**
116 * These functions create premul <-> unpremul effects, using specialized round-trip effects.
117 */
118 std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
119 std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
120
getTaskGroup()121 SkTaskGroup* getTaskGroup() { return this->context()->fTaskGroup.get(); }
122
resourceProvider()123 GrResourceProvider* resourceProvider() { return this->context()->fResourceProvider.get(); }
resourceProvider()124 const GrResourceProvider* resourceProvider() const {
125 return this->context()->fResourceProvider.get();
126 }
127
getResourceCache()128 GrResourceCache* getResourceCache() { return this->context()->fResourceCache.get(); }
129
getGpu()130 GrGpu* getGpu() { return this->context()->fGpu.get(); }
getGpu()131 const GrGpu* getGpu() const { return this->context()->fGpu.get(); }
132
133 // This accessor should only ever be called by the GrOpFlushState.
getAtlasManager()134 GrAtlasManager* getAtlasManager() {
135 return this->context()->onGetAtlasManager();
136 }
137
138 // This accessor should only ever be called by the GrOpFlushState.
139 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
getSmallPathAtlasMgr()140 skgpu::ganesh::SmallPathAtlasMgr* getSmallPathAtlasMgr() {
141 return this->context()->onGetSmallPathAtlasMgr();
142 }
143 #endif
144
145 void createDDLTask(sk_sp<const GrDeferredDisplayList>,
146 sk_sp<GrRenderTargetProxy> newDest);
147
148 bool compile(const GrProgramDesc&, const GrProgramInfo&);
149
getPersistentCache()150 GrContextOptions::PersistentCache* getPersistentCache() {
151 return this->context()->fPersistentCache;
152 }
153
clientMappedBufferManager()154 GrClientMappedBufferManager* clientMappedBufferManager() {
155 return this->context()->fMappedBufferManager.get();
156 }
157
setInsideReleaseProc(bool inside)158 void setInsideReleaseProc(bool inside) {
159 if (inside) {
160 this->context()->fInsideReleaseProcCnt++;
161 } else {
162 SkASSERT(this->context()->fInsideReleaseProcCnt > 0);
163 this->context()->fInsideReleaseProcCnt--;
164 }
165 }
166
167 #if defined(GPU_TEST_UTILS)
168 /** Reset GPU stats */
169 void resetGpuStats() const;
170
171 /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
172 void dumpCacheStats(SkString*) const;
173 void dumpCacheStatsKeyValuePairs(
174 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values) const;
175 void printCacheStats() const;
176
177 /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
178 void dumpGpuStats(SkString*) const;
179 void dumpGpuStatsKeyValuePairs(
180 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values) const;
181 void printGpuStats() const;
182
183 /** These are only active if GR_GPU_STATS == 1. */
184 void resetContextStats();
185 void dumpContextStats(SkString*) const;
186 void dumpContextStatsKeyValuePairs(
187 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values) const;
188 void printContextStats() const;
189
190 /** Get pointer to atlas texture for given mask format. Note that this wraps an
191 actively mutating texture in an SkImage. This could yield unexpected results
192 if it gets cached or used more generally. */
193 sk_sp<SkImage> testingOnly_getFontAtlasImage(skgpu::MaskFormat format, unsigned int index = 0);
194
195 void testingOnly_flushAndRemoveOnFlushCallbackObject(GrOnFlushCallbackObject*);
196 #endif
197
198 private:
GrDirectContextPriv(GrDirectContext * dContext)199 explicit GrDirectContextPriv(GrDirectContext* dContext) : GrRecordingContextPriv(dContext) {}
200 GrDirectContextPriv& operator=(const GrDirectContextPriv&) = delete;
201
202 // No taking addresses of this type.
203 const GrDirectContextPriv* operator&() const;
204 GrDirectContextPriv* operator&();
205
206 friend class GrDirectContext; // to construct/copy this type.
207
208 using INHERITED = GrRecordingContextPriv;
209 };
210
priv()211 inline GrDirectContextPriv GrDirectContext::priv() { return GrDirectContextPriv(this); }
212
213 // NOLINTNEXTLINE(readability-const-return-type)
priv()214 inline const GrDirectContextPriv GrDirectContext::priv() const {
215 return GrDirectContextPriv(const_cast<GrDirectContext*>(this));
216 }
217
218 #endif
219