xref: /aosp_15_r20/external/skia/src/gpu/ganesh/mtl/GrMtlGpu.mm (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/ganesh/mtl/GrMtlGpu.h"
9
10#include "include/core/SkColorSpace.h"
11#include "include/core/SkTextureCompressionType.h"
12#include "include/gpu/GpuTypes.h"
13#include "include/gpu/ganesh/mtl/GrMtlBackendSemaphore.h"
14#include "include/gpu/ganesh/mtl/GrMtlBackendSurface.h"
15#include "include/private/gpu/ganesh/GrTypesPriv.h"
16#include "src/base/SkMathPriv.h"
17#include "src/base/SkRectMemcpy.h"
18#include "src/core/SkCompressedDataUtils.h"
19#include "src/core/SkMipmap.h"
20#include "src/gpu/DataUtils.h"
21#include "src/gpu/ganesh/GrBackendUtils.h"
22#include "src/gpu/ganesh/GrDataUtils.h"
23#include "src/gpu/ganesh/GrDirectContextPriv.h"
24#include "src/gpu/ganesh/GrImageInfo.h"
25#include "src/gpu/ganesh/GrPixmap.h"
26#include "src/gpu/ganesh/GrRenderTarget.h"
27#include "src/gpu/ganesh/GrResourceProvider.h"
28#include "src/gpu/ganesh/GrTexture.h"
29#include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
30#include "src/gpu/ganesh/mtl/GrMtlBuffer.h"
31#include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h"
32#include "src/gpu/ganesh/mtl/GrMtlOpsRenderPass.h"
33#include "src/gpu/ganesh/mtl/GrMtlPipelineStateBuilder.h"
34#include "src/gpu/ganesh/mtl/GrMtlRenderCommandEncoder.h"
35#include "src/gpu/ganesh/mtl/GrMtlSemaphore.h"
36#include "src/gpu/ganesh/mtl/GrMtlTexture.h"
37#include "src/gpu/ganesh/mtl/GrMtlTextureRenderTarget.h"
38#include "src/gpu/ganesh/mtl/GrMtlUtil.h"
39#include "src/gpu/mtl/MtlUtilsPriv.h"
40
41#import <simd/simd.h>
42
43using namespace skia_private;
44
45#if !__has_feature(objc_arc)
46#error This file must be compiled with Arc. Use -fobjc-arc flag
47#endif
48
49GR_NORETAIN_BEGIN
50
51#if defined(GPU_TEST_UTILS)
52// set to 1 if you want to do GPU capture of each commandBuffer
53#define GR_METAL_CAPTURE_COMMANDBUFFER 0
54#endif
55
56std::unique_ptr<GrGpu> GrMtlGpu::Make(const GrMtlBackendContext& context,
57                                      const GrContextOptions& options,
58                                      GrDirectContext* direct) {
59    if (!context.fDevice || !context.fQueue) {
60        return nullptr;
61    }
62    if (@available(macOS 10.14, iOS 10.0, tvOS 10.0, *)) {
63        // no warning needed
64    } else {
65        SkDebugf("*** Error ***: Skia's Metal backend no longer supports this OS version.\n");
66#ifdef SK_BUILD_FOR_IOS
67        SkDebugf("Minimum supported version is iOS 10.0.\n");
68#else
69        SkDebugf("Minimum supported version is MacOS 10.14.\n");
70#endif
71        return nullptr;
72    }
73
74    id<MTLDevice> GR_NORETAIN device = (__bridge id<MTLDevice>)(context.fDevice.get());
75    id<MTLCommandQueue> GR_NORETAIN queue = (__bridge id<MTLCommandQueue>)(context.fQueue.get());
76
77    return std::unique_ptr<GrGpu>(new GrMtlGpu(direct,
78                                               options,
79                                               device,
80                                               queue));
81}
82
83// This constant determines how many OutstandingCommandBuffers are allocated together as a block in
84// the deque. As such it needs to balance allocating too much memory vs. incurring
85// allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
86// command buffers we expect to see.
87static const int kDefaultOutstandingAllocCnt = 8;
88
89GrMtlGpu::GrMtlGpu(GrDirectContext* direct, const GrContextOptions& options,
90                   id<MTLDevice> device, id<MTLCommandQueue> queue)
91        : INHERITED(direct)
92        , fDevice(device)
93        , fQueue(queue)
94        , fOutstandingCommandBuffers(sizeof(OutstandingCommandBuffer), kDefaultOutstandingAllocCnt)
95        , fResourceProvider(this)
96        , fStagingBufferManager(this)
97        , fUniformsRingBuffer(this, 128 * 1024, 256, GrGpuBufferType::kUniform)
98        , fDisconnected(false) {
99    fMtlCaps.reset(new GrMtlCaps(options, fDevice));
100    this->initCaps(fMtlCaps);
101#if GR_METAL_CAPTURE_COMMANDBUFFER
102    this->testingOnly_startCapture();
103#endif
104    fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
105}
106
107GrMtlGpu::~GrMtlGpu() {
108    if (!fDisconnected) {
109        this->destroyResources();
110    }
111}
112
113void GrMtlGpu::disconnect(DisconnectType type) {
114    INHERITED::disconnect(type);
115
116    if (!fDisconnected) {
117        this->destroyResources();
118        fDisconnected = true;
119    }
120}
121
122GrThreadSafePipelineBuilder* GrMtlGpu::pipelineBuilder() {
123    return nullptr;
124}
125
126sk_sp<GrThreadSafePipelineBuilder> GrMtlGpu::refPipelineBuilder() {
127    return nullptr;
128}
129
130void GrMtlGpu::destroyResources() {
131    this->submitCommandBuffer(SyncQueue::kForce_SyncQueue);
132    // if there's no work we won't release the command buffer, so we do it here
133    fCurrentCmdBuffer = nil;
134
135    // We used a placement new for each object in fOutstandingCommandBuffers, so we're responsible
136    // for calling the destructor on each of them as well.
137    while (!fOutstandingCommandBuffers.empty()) {
138        OutstandingCommandBuffer* buffer =
139                (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
140        // make sure we remove before deleting as deletion might try to kick off another submit
141        fOutstandingCommandBuffers.pop_front();
142        buffer->~OutstandingCommandBuffer();
143    }
144
145    fStagingBufferManager.reset();
146
147    fResourceProvider.destroyResources();
148
149    fQueue = nil;
150    fDevice = nil;
151}
152
153GrOpsRenderPass* GrMtlGpu::onGetOpsRenderPass(
154            GrRenderTarget* renderTarget, bool useMSAASurface, GrAttachment* stencil,
155            GrSurfaceOrigin origin, const SkIRect& bounds,
156            const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
157            const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
158            const TArray<GrSurfaceProxy*, true>& sampledProxies,
159            GrXferBarrierFlags renderPassXferBarriers) {
160    // For the given render target and requested render pass features we need to find a compatible
161    // framebuffer to use.
162    GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(renderTarget);
163
164    // TODO: support DMSAA
165    SkASSERT(!useMSAASurface ||
166             (renderTarget->numSamples() > 1));
167
168    bool withResolve = false;
169
170    // Figure out if we can use a Resolve store action for this render pass. When we set up
171    // the render pass we'll update the color load/store ops since we don't want to ever load
172    // or store the msaa color attachment, but may need to for the resolve attachment.
173    if (useMSAASurface && this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
174        withResolve = true;
175    }
176
177    sk_sp<GrMtlFramebuffer> framebuffer =
178            sk_ref_sp(mtlRT->getFramebuffer(withResolve, SkToBool(stencil)));
179    if (!framebuffer) {
180        return nullptr;
181    }
182
183    return new GrMtlOpsRenderPass(this, renderTarget, std::move(framebuffer), origin, colorInfo,
184                                  stencilInfo);
185}
186
187GrMtlCommandBuffer* GrMtlGpu::commandBuffer() {
188    if (!fCurrentCmdBuffer) {
189#if GR_METAL_CAPTURE_COMMANDBUFFER
190        this->testingOnly_startCapture();
191#endif
192        // Create a new command buffer for the next submit
193        fCurrentCmdBuffer = GrMtlCommandBuffer::Make(fQueue);
194    }
195
196    SkASSERT(fCurrentCmdBuffer);
197    return fCurrentCmdBuffer.get();
198}
199
200void GrMtlGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
201    SkASSERT(buffer);
202    this->commandBuffer()->addGrBuffer(std::move(buffer));
203}
204
205void GrMtlGpu::submit(GrOpsRenderPass* renderPass) {
206    GrMtlOpsRenderPass* mtlRenderPass = reinterpret_cast<GrMtlOpsRenderPass*>(renderPass);
207    mtlRenderPass->submit();
208    delete renderPass;
209}
210
211bool GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
212    if (!fCurrentCmdBuffer || !fCurrentCmdBuffer->hasWork()) {
213        if (sync == SyncQueue::kForce_SyncQueue) {
214            this->finishOutstandingGpuWork();
215            this->checkForFinishedCommandBuffers();
216        }
217        // We need to manually call the finishedCallbacks since we don't add this
218        // to the OutstandingCommandBuffer list
219        if (fCurrentCmdBuffer) {
220            fCurrentCmdBuffer->callFinishedCallbacks();
221        }
222        return true;
223    }
224
225    SkASSERT(fCurrentCmdBuffer);
226    bool didCommit = fCurrentCmdBuffer->commit(sync == SyncQueue::kForce_SyncQueue);
227    if (didCommit) {
228        new (fOutstandingCommandBuffers.push_back()) OutstandingCommandBuffer(fCurrentCmdBuffer);
229    }
230
231    // We don't create a new command buffer here because we may end up using it
232    // in the next frame, and that confuses the GPU debugger. Instead we
233    // create when we next need one.
234    fCurrentCmdBuffer.reset();
235
236    // If the freeing of any resources held by a finished command buffer causes us to send
237    // a new command to the gpu we'll create the new command buffer in commandBuffer(), above.
238    this->checkForFinishedCommandBuffers();
239
240#if GR_METAL_CAPTURE_COMMANDBUFFER
241    this->testingOnly_stopCapture();
242#endif
243    return didCommit;
244}
245
246void GrMtlGpu::checkForFinishedCommandBuffers() {
247    // Iterate over all the outstanding command buffers to see if any have finished. The command
248    // buffers are in order from oldest to newest, so we start at the front to check if their fence
249    // has signaled. If so we pop it off and move onto the next.
250    // Repeat till we find a command list that has not finished yet (and all others afterwards are
251    // also guaranteed to not have finished).
252    OutstandingCommandBuffer* front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
253    while (front && (*front)->isCompleted()) {
254        // Make sure we remove before deleting as deletion might try to kick off another submit
255        fOutstandingCommandBuffers.pop_front();
256        // Since we used placement new we are responsible for calling the destructor manually.
257        front->~OutstandingCommandBuffer();
258        front = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.front();
259    }
260}
261
262void GrMtlGpu::finishOutstandingGpuWork() {
263    // wait for the last command buffer we've submitted to finish
264    OutstandingCommandBuffer* back =
265            (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
266    if (back) {
267        (*back)->waitUntilCompleted();
268    }
269}
270
271void GrMtlGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
272    SkASSERT(finishedCallback);
273    // Besides the current commandbuffer, we also add the finishedCallback to the newest outstanding
274    // commandbuffer. Our contract for calling the proc is that all previous submitted cmdbuffers
275    // have finished when we call it. However, if our current command buffer has no work when it is
276    // flushed it will drop its ref to the callback immediately. But the previous work may not have
277    // finished. It is safe to only add the proc to the newest outstanding commandbuffer cause that
278    // must finish after all previously submitted command buffers.
279    OutstandingCommandBuffer* back = (OutstandingCommandBuffer*)fOutstandingCommandBuffers.back();
280    if (back) {
281        (*back)->addFinishedCallback(finishedCallback);
282    }
283    commandBuffer()->addFinishedCallback(std::move(finishedCallback));
284}
285
286bool GrMtlGpu::onSubmitToGpu(const GrSubmitInfo& info) {
287    if (info.fSync == GrSyncCpu::kYes) {
288        return this->submitCommandBuffer(kForce_SyncQueue);
289    } else {
290        return this->submitCommandBuffer(kSkip_SyncQueue);
291    }
292}
293
294std::unique_ptr<GrSemaphore> GrMtlGpu::prepareTextureForCrossContextUsage(GrTexture*) {
295    this->submitToGpu();
296    return nullptr;
297}
298
299sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size,
300                                            GrGpuBufferType type,
301                                            GrAccessPattern accessPattern) {
302    return GrMtlBuffer::Make(this, size, type, accessPattern);
303}
304
305static bool check_max_blit_width(int widthInPixels) {
306    if (widthInPixels > 32767) {
307        SkASSERT(false); // surfaces should not be this wide anyway
308        return false;
309    }
310    return true;
311}
312
313bool GrMtlGpu::uploadToTexture(GrMtlTexture* tex,
314                               SkIRect rect,
315                               GrColorType dataColorType,
316                               const GrMipLevel texels[],
317                               int mipLevelCount) {
318    SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
319    // The assumption is either that we have no mipmaps, or that our rect is the entire texture
320    SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(tex->dimensions()));
321
322    // We assume that if the texture has mip levels, we either upload to all the levels or just the
323    // first.
324    SkASSERT(mipLevelCount == 1 || mipLevelCount == (tex->maxMipmapLevel() + 1));
325
326    if (!check_max_blit_width(rect.width())) {
327        return false;
328    }
329    if (rect.isEmpty()) {
330        return false;
331    }
332
333    SkASSERT(this->mtlCaps().surfaceSupportsWritePixels(tex));
334    SkASSERT(this->mtlCaps().areColorTypeAndFormatCompatible(dataColorType, tex->backendFormat()));
335
336    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
337    SkASSERT(mtlTexture);
338    // Either upload only the first miplevel or all miplevels
339    SkASSERT(1 == mipLevelCount || mipLevelCount == (int)mtlTexture.mipmapLevelCount);
340
341    if (mipLevelCount == 1 && !texels[0].fPixels) {
342        return true;   // no data to upload
343    }
344
345    for (int i = 0; i < mipLevelCount; ++i) {
346        // We do not allow any gaps in the mip data
347        if (!texels[i].fPixels) {
348            return false;
349        }
350    }
351
352    size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
353
354    TArray<size_t> individualMipOffsets(mipLevelCount);
355    size_t combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
356                                                                 rect.size(),
357                                                                 &individualMipOffsets,
358                                                                 mipLevelCount);
359    SkASSERT(combinedBufferSize);
360
361
362    // offset value must be a multiple of the destination texture's pixel size in bytes
363    size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
364    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
365            combinedBufferSize, alignment);
366    if (!slice.fBuffer) {
367        return false;
368    }
369    char* bufferData = (char*)slice.fOffsetMapPtr;
370    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
371
372    int currentWidth = rect.width();
373    int currentHeight = rect.height();
374    SkDEBUGCODE(int layerHeight = tex->height());
375    MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
376
377    auto cmdBuffer = this->commandBuffer();
378    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
379    if (!blitCmdEncoder) {
380        return false;
381    }
382#ifdef SK_ENABLE_MTL_DEBUG_INFO
383    [blitCmdEncoder pushDebugGroup:@"uploadToTexture"];
384#endif
385    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
386        if (texels[currentMipLevel].fPixels) {
387            SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
388            const size_t trimRowBytes = currentWidth * bpp;
389            const size_t rowBytes = texels[currentMipLevel].fRowBytes;
390
391            // copy data into the buffer, skipping any trailing bytes
392            char* dst = bufferData + individualMipOffsets[currentMipLevel];
393            const char* src = (const char*)texels[currentMipLevel].fPixels;
394            SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
395
396            [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
397                              sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
398                         sourceBytesPerRow: trimRowBytes
399                       sourceBytesPerImage: trimRowBytes*currentHeight
400                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
401                                 toTexture: mtlTexture
402                          destinationSlice: 0
403                          destinationLevel: currentMipLevel
404                         destinationOrigin: origin];
405        }
406        currentWidth = std::max(1, currentWidth/2);
407        currentHeight = std::max(1, currentHeight/2);
408        SkDEBUGCODE(layerHeight = currentHeight);
409    }
410#ifdef SK_BUILD_FOR_MAC
411    if (this->mtlCaps().isMac()) {
412        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
413    }
414#endif
415#ifdef SK_ENABLE_MTL_DEBUG_INFO
416    [blitCmdEncoder popDebugGroup];
417#endif
418
419    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
420        tex->markMipmapsDirty();
421    }
422
423    return true;
424}
425
426bool GrMtlGpu::clearTexture(GrMtlTexture* tex, size_t bpp, uint32_t levelMask) {
427    SkASSERT(this->mtlCaps().isFormatTexturable(tex->mtlTexture().pixelFormat));
428
429    if (!levelMask) {
430        return true;
431    }
432
433    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
434    SkASSERT(mtlTexture);
435    // Either upload only the first miplevel or all miplevels
436    int mipLevelCount = (int)mtlTexture.mipmapLevelCount;
437
438    TArray<size_t> individualMipOffsets(mipLevelCount);
439    size_t combinedBufferSize = 0;
440    int currentWidth = tex->width();
441    int currentHeight = tex->height();
442
443    // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
444    // config. This works with the assumption that the bytes in pixel config is always a power of 2.
445    // TODO: can we just copy from a single buffer the size of the largest cleared level w/o a perf
446    // penalty?
447    SkASSERT((bpp & (bpp - 1)) == 0);
448    const size_t alignmentMask = 0x3 | (bpp - 1);
449    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
450        if (levelMask & (1 << currentMipLevel)) {
451            const size_t trimmedSize = currentWidth * bpp * currentHeight;
452            const size_t alignmentDiff = combinedBufferSize & alignmentMask;
453            if (alignmentDiff != 0) {
454                combinedBufferSize += alignmentMask - alignmentDiff + 1;
455            }
456            individualMipOffsets.push_back(combinedBufferSize);
457            combinedBufferSize += trimmedSize;
458        }
459        currentWidth = std::max(1, currentWidth/2);
460        currentHeight = std::max(1, currentHeight/2);
461    }
462    SkASSERT(combinedBufferSize > 0 && !individualMipOffsets.empty());
463
464    size_t alignment = std::max(bpp, this->mtlCaps().getMinBufferAlignment());
465    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
466            combinedBufferSize, alignment);
467    if (!slice.fBuffer) {
468        return false;
469    }
470    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
471    id<MTLBuffer> transferBuffer = mtlBuffer->mtlBuffer();
472
473    auto cmdBuffer = this->commandBuffer();
474    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
475    if (!blitCmdEncoder) {
476        return false;
477    }
478#ifdef SK_ENABLE_MTL_DEBUG_INFO
479    [blitCmdEncoder pushDebugGroup:@"clearTexture"];
480#endif
481    // clear the buffer to transparent black
482    NSRange clearRange;
483    clearRange.location = 0;
484    clearRange.length = combinedBufferSize;
485    [blitCmdEncoder fillBuffer: transferBuffer
486                         range: clearRange
487                         value: 0];
488
489    // now copy buffer to texture
490    currentWidth = tex->width();
491    currentHeight = tex->height();
492    MTLOrigin origin = MTLOriginMake(0, 0, 0);
493    for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
494        if (levelMask & (1 << currentMipLevel)) {
495            const size_t rowBytes = currentWidth * bpp;
496
497            [blitCmdEncoder copyFromBuffer: transferBuffer
498                              sourceOffset: individualMipOffsets[currentMipLevel]
499                         sourceBytesPerRow: rowBytes
500                       sourceBytesPerImage: rowBytes * currentHeight
501                                sourceSize: MTLSizeMake(currentWidth, currentHeight, 1)
502                                 toTexture: mtlTexture
503                          destinationSlice: 0
504                          destinationLevel: currentMipLevel
505                         destinationOrigin: origin];
506        }
507        currentWidth = std::max(1, currentWidth/2);
508        currentHeight = std::max(1, currentHeight/2);
509    }
510    // Don't need didModifyRange: here because fillBuffer: happens on the GPU
511#ifdef SK_ENABLE_MTL_DEBUG_INFO
512    [blitCmdEncoder popDebugGroup];
513#endif
514
515    if (mipLevelCount < (int) tex->mtlTexture().mipmapLevelCount) {
516        tex->markMipmapsDirty();
517    }
518
519    return true;
520}
521
522sk_sp<GrAttachment> GrMtlGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
523                                                    SkISize dimensions, int numStencilSamples) {
524    MTLPixelFormat sFmt = this->mtlCaps().preferredStencilFormat();
525
526    fStats.incStencilAttachmentCreates();
527    return GrMtlAttachment::GrMtlAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
528}
529
530sk_sp<GrAttachment> GrMtlGpu::makeMSAAAttachment(SkISize dimensions,
531                                                 const GrBackendFormat& format,
532                                                 int numSamples,
533                                                 GrProtected isProtected,
534                                                 GrMemoryless isMemoryless) {
535    // Metal doesn't support protected textures
536    SkASSERT(isProtected == GrProtected::kNo);
537    // TODO: add memoryless support
538    SkASSERT(isMemoryless == GrMemoryless::kNo);
539
540    MTLPixelFormat pixelFormat = (MTLPixelFormat)GrBackendFormats::AsMtlFormat(format);
541    SkASSERT(pixelFormat != MTLPixelFormatInvalid);
542    SkASSERT(!skgpu::MtlFormatIsCompressed(pixelFormat));
543    SkASSERT(this->mtlCaps().isFormatRenderable(pixelFormat, numSamples));
544
545    fStats.incMSAAAttachmentCreates();
546    return GrMtlAttachment::MakeMSAA(this, dimensions, numSamples, pixelFormat);
547}
548
549sk_sp<GrTexture> GrMtlGpu::onCreateTexture(SkISize dimensions,
550                                           const GrBackendFormat& format,
551                                           GrRenderable renderable,
552                                           int renderTargetSampleCnt,
553                                           skgpu::Budgeted budgeted,
554                                           GrProtected isProtected,
555                                           int mipLevelCount,
556                                           uint32_t levelClearMask,
557                                           std::string_view label) {
558    // We don't support protected textures in Metal.
559    if (isProtected == GrProtected::kYes) {
560        return nullptr;
561    }
562    SkASSERT(mipLevelCount > 0);
563
564    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
565    SkASSERT(mtlPixelFormat != MTLPixelFormatInvalid);
566    SkASSERT(!this->caps()->isFormatCompressed(format));
567
568    sk_sp<GrMtlTexture> tex;
569    GrMipmapStatus mipmapStatus =
570            mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
571    if (renderable == GrRenderable::kYes) {
572        tex = GrMtlTextureRenderTarget::MakeNewTextureRenderTarget(
573                this, budgeted, dimensions, renderTargetSampleCnt, mtlPixelFormat, mipLevelCount,
574                mipmapStatus, label);
575    } else {
576        tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
577                                           mipLevelCount, mipmapStatus, label);
578    }
579
580    if (!tex) {
581        return nullptr;
582    }
583
584    if (levelClearMask) {
585        this->clearTexture(tex.get(),
586                           skgpu::MtlFormatBytesPerBlock(mtlPixelFormat),
587                           levelClearMask);
588    }
589
590    return std::move(tex);
591}
592
593sk_sp<GrTexture> GrMtlGpu::onCreateCompressedTexture(SkISize dimensions,
594                                                     const GrBackendFormat& format,
595                                                     skgpu::Budgeted budgeted,
596                                                     skgpu::Mipmapped mipmapped,
597                                                     GrProtected isProtected,
598                                                     const void* data,
599                                                     size_t dataSize) {
600    // We don't support protected textures in Metal.
601    if (isProtected == GrProtected::kYes) {
602        return nullptr;
603    }
604
605    SkASSERT(this->caps()->isFormatTexturable(format, GrTextureType::k2D));
606    SkASSERT(data);
607
608    if (!check_max_blit_width(dimensions.width())) {
609        return nullptr;
610    }
611
612    MTLPixelFormat mtlPixelFormat = GrBackendFormatAsMTLPixelFormat(format);
613    SkASSERT(this->caps()->isFormatCompressed(format));
614
615    int numMipLevels = 1;
616    if (mipmapped == skgpu::Mipmapped::kYes) {
617        numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
618    }
619
620    GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
621                                          ? GrMipmapStatus::kValid
622                                          : GrMipmapStatus::kNotAllocated;
623
624    auto tex = GrMtlTexture::MakeNewTexture(this, budgeted, dimensions, mtlPixelFormat,
625                                            numMipLevels, mipmapStatus,
626                                            /*label=*/"MtlGpu_CreateCompressedTexture");
627    if (!tex) {
628        return nullptr;
629    }
630
631    // Upload to texture
632    id<MTLTexture> GR_NORETAIN mtlTexture = tex->mtlTexture();
633    SkASSERT(mtlTexture);
634
635    auto compressionType = GrBackendFormatToCompressionType(format);
636    SkASSERT(compressionType != SkTextureCompressionType::kNone);
637
638    TArray<size_t> individualMipOffsets(numMipLevels);
639    SkDEBUGCODE(size_t combinedBufferSize =)
640            SkCompressedDataSize(compressionType,
641                                 dimensions,
642                                 &individualMipOffsets,
643                                 mipmapped == skgpu::Mipmapped::kYes);
644    SkASSERT(individualMipOffsets.size() == numMipLevels);
645    SkASSERT(dataSize == combinedBufferSize);
646
647    // offset value must be a multiple of the destination texture's pixel size in bytes
648    // for compressed textures, this is the block size
649    size_t alignment = SkCompressedBlockSize(compressionType);
650    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
651            dataSize, alignment);
652    if (!slice.fBuffer) {
653        return nullptr;
654    }
655    char* bufferData = (char*)slice.fOffsetMapPtr;
656    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
657
658    MTLOrigin origin = MTLOriginMake(0, 0, 0);
659
660    auto cmdBuffer = this->commandBuffer();
661    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
662    if (!blitCmdEncoder) {
663        return nullptr;
664    }
665#ifdef SK_ENABLE_MTL_DEBUG_INFO
666    [blitCmdEncoder pushDebugGroup:@"onCreateCompressedTexture"];
667#endif
668
669    // copy data into the buffer, skipping any trailing bytes
670    memcpy(bufferData, data, dataSize);
671
672    SkISize levelDimensions = dimensions;
673    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
674        const size_t levelRowBytes = skgpu::CompressedRowBytes(compressionType,
675                                                               levelDimensions.width());
676        size_t levelSize = SkCompressedDataSize(compressionType, levelDimensions, nullptr, false);
677
678        // TODO: can this all be done in one go?
679        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
680                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
681                     sourceBytesPerRow: levelRowBytes
682                   sourceBytesPerImage: levelSize
683                            sourceSize: MTLSizeMake(levelDimensions.width(),
684                                                    levelDimensions.height(), 1)
685                             toTexture: mtlTexture
686                      destinationSlice: 0
687                      destinationLevel: currentMipLevel
688                     destinationOrigin: origin];
689
690        levelDimensions = {std::max(1, levelDimensions.width() /2),
691                           std::max(1, levelDimensions.height()/2)};
692    }
693#ifdef SK_BUILD_FOR_MAC
694    if (this->mtlCaps().isMac()) {
695        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, dataSize)];
696    }
697#endif
698#ifdef SK_ENABLE_MTL_DEBUG_INFO
699    [blitCmdEncoder popDebugGroup];
700#endif
701
702    return std::move(tex);
703}
704
705// TODO: Extra retain/release can't be avoided here because of GetMtlTextureInfo copying the
706// sk_cfp. It would be useful to have a (possibly-internal-only?) API to get the raw pointer.
707static id<MTLTexture> get_texture_from_backend(const GrBackendTexture& backendTex) {
708    GrMtlTextureInfo textureInfo;
709    if (!GrBackendTextures::GetMtlTextureInfo(backendTex, &textureInfo)) {
710        return nil;
711    }
712    return GrGetMTLTexture(textureInfo.fTexture.get());
713}
714
715static id<MTLTexture> get_texture_from_backend(const GrBackendRenderTarget& backendRT) {
716    GrMtlTextureInfo textureInfo;
717    if (!GrBackendRenderTargets::GetMtlTextureInfo(backendRT, &textureInfo)) {
718        return nil;
719    }
720    return GrGetMTLTexture(textureInfo.fTexture.get());
721}
722
723sk_sp<GrTexture> GrMtlGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
724                                                GrWrapOwnership,
725                                                GrWrapCacheable cacheable,
726                                                GrIOType ioType) {
727    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
728    if (!mtlTexture) {
729        return nullptr;
730    }
731    // We don't currently support sampling from a MSAA texture in shaders.
732    if (mtlTexture.sampleCount != 1) {
733        return nullptr;
734    }
735
736    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
737                                            ioType);
738}
739
740sk_sp<GrTexture> GrMtlGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
741                                                          GrWrapOwnership,
742                                                          GrWrapCacheable cacheable) {
743    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
744    if (!mtlTexture) {
745        return nullptr;
746    }
747    // We don't currently support sampling from a MSAA texture in shaders.
748    if (mtlTexture.sampleCount != 1) {
749        return nullptr;
750    }
751
752    return GrMtlTexture::MakeWrappedTexture(this, backendTex.dimensions(), mtlTexture, cacheable,
753                                            kRead_GrIOType);
754}
755
756sk_sp<GrTexture> GrMtlGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
757                                                          int sampleCnt,
758                                                          GrWrapOwnership,
759                                                          GrWrapCacheable cacheable) {
760    id<MTLTexture> mtlTexture = get_texture_from_backend(backendTex);
761    if (!mtlTexture) {
762        return nullptr;
763    }
764    // We don't currently support sampling from a MSAA texture in shaders.
765    if (mtlTexture.sampleCount != 1) {
766        return nullptr;
767    }
768
769    const GrMtlCaps& caps = this->mtlCaps();
770
771    MTLPixelFormat format = mtlTexture.pixelFormat;
772    if (!caps.isFormatRenderable(format, sampleCnt)) {
773        return nullptr;
774    }
775
776    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
777        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
778    }
779
780    sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, format);
781    SkASSERT(sampleCnt);
782
783    return GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
784            this, backendTex.dimensions(), sampleCnt, mtlTexture, cacheable);
785}
786
787sk_sp<GrRenderTarget> GrMtlGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
788    if (!this->caps()->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
789        return nullptr;
790    }
791
792    id<MTLTexture> mtlTexture = get_texture_from_backend(backendRT);
793    if (!mtlTexture) {
794        return nullptr;
795    }
796
797    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
798        SkASSERT(MTLTextureUsageRenderTarget & mtlTexture.usage);
799    }
800
801    return GrMtlRenderTarget::MakeWrappedRenderTarget(this, backendRT.dimensions(),
802                                                      backendRT.sampleCnt(), mtlTexture);
803}
804
805bool GrMtlGpu::onRegenerateMipMapLevels(GrTexture* texture) {
806    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
807    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
808
809    // Automatic mipmap generation is only supported by color-renderable formats
810    if (!fMtlCaps->isFormatRenderable(mtlTexture.pixelFormat, 1) &&
811        // We have pixel configs marked as textureable-only that use RGBA8 as the internal format
812        MTLPixelFormatRGBA8Unorm != mtlTexture.pixelFormat) {
813        return false;
814    }
815
816    auto cmdBuffer = this->commandBuffer();
817    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
818    if (!blitCmdEncoder) {
819        return false;
820    }
821    [blitCmdEncoder generateMipmapsForTexture: mtlTexture];
822    this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(grMtlTexture->attachment()));
823
824    return true;
825}
826
827// Used to "clear" a backend texture to a constant color by transferring.
828static GrColorType mtl_format_to_backend_tex_clear_colortype(MTLPixelFormat format) {
829    switch(format) {
830        case MTLPixelFormatA8Unorm:         return GrColorType::kAlpha_8;
831        case MTLPixelFormatR8Unorm:         return GrColorType::kR_8;
832        case MTLPixelFormatB5G6R5Unorm:     return GrColorType::kBGR_565;
833        case MTLPixelFormatABGR4Unorm:      return GrColorType::kABGR_4444;
834        case MTLPixelFormatRGBA8Unorm:      return GrColorType::kRGBA_8888;
835        case MTLPixelFormatRGBA8Unorm_sRGB: return GrColorType::kRGBA_8888_SRGB;
836
837        case MTLPixelFormatRG8Unorm:        return GrColorType::kRG_88;
838        case MTLPixelFormatBGRA8Unorm:      return GrColorType::kBGRA_8888;
839        case MTLPixelFormatRGB10A2Unorm:    return GrColorType::kRGBA_1010102;
840        case MTLPixelFormatBGR10A2Unorm:    return GrColorType::kBGRA_1010102;
841        case MTLPixelFormatR16Float:        return GrColorType::kR_F16;
842        case MTLPixelFormatRGBA16Float:     return GrColorType::kRGBA_F16;
843        case MTLPixelFormatR16Unorm:        return GrColorType::kR_16;
844        case MTLPixelFormatRG16Unorm:       return GrColorType::kRG_1616;
845        case MTLPixelFormatRGBA16Unorm:     return GrColorType::kRGBA_16161616;
846        case MTLPixelFormatRG16Float:       return GrColorType::kRG_F16;
847        default:                            return GrColorType::kUnknown;
848    }
849
850    SkUNREACHABLE;
851}
852
853void copy_src_data(char* dst,
854                   size_t bytesPerPixel,
855                   const TArray<size_t>& individualMipOffsets,
856                   const GrPixmap srcData[],
857                   int numMipLevels,
858                   size_t bufferSize) {
859    SkASSERT(srcData && numMipLevels);
860    SkASSERT(individualMipOffsets.size() == numMipLevels);
861
862    for (int level = 0; level < numMipLevels; ++level) {
863        const size_t trimRB = srcData[level].width() * bytesPerPixel;
864        SkASSERT(individualMipOffsets[level] + trimRB * srcData[level].height() <= bufferSize);
865        SkRectMemcpy(dst + individualMipOffsets[level], trimRB,
866                     srcData[level].addr(), srcData[level].rowBytes(),
867                     trimRB, srcData[level].height());
868    }
869}
870
871bool GrMtlGpu::createMtlTextureForBackendSurface(MTLPixelFormat mtlFormat,
872                                                 SkISize dimensions,
873                                                 int sampleCnt,
874                                                 GrTexturable texturable,
875                                                 GrRenderable renderable,
876                                                 skgpu::Mipmapped mipmapped,
877                                                 GrMtlTextureInfo* info) {
878    SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
879
880    if (texturable == GrTexturable::kYes && !fMtlCaps->isFormatTexturable(mtlFormat)) {
881        return false;
882    }
883    if (renderable == GrRenderable::kYes && !fMtlCaps->isFormatRenderable(mtlFormat, 1)) {
884        return false;
885    }
886
887    if (!check_max_blit_width(dimensions.width())) {
888        return false;
889    }
890
891    auto desc = [[MTLTextureDescriptor alloc] init];
892    desc.pixelFormat = mtlFormat;
893    desc.width = dimensions.width();
894    desc.height = dimensions.height();
895    if (mipmapped == skgpu::Mipmapped::kYes) {
896        desc.mipmapLevelCount = 1 + SkPrevLog2(std::max(dimensions.width(), dimensions.height()));
897    }
898    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
899        desc.storageMode = MTLStorageModePrivate;
900        MTLTextureUsage usage = texturable == GrTexturable::kYes ? MTLTextureUsageShaderRead : 0;
901        usage |= renderable == GrRenderable::kYes ? MTLTextureUsageRenderTarget : 0;
902        desc.usage = usage;
903    }
904    if (sampleCnt != 1) {
905        desc.sampleCount = sampleCnt;
906        desc.textureType = MTLTextureType2DMultisample;
907    }
908    id<MTLTexture> testTexture = [fDevice newTextureWithDescriptor: desc];
909#ifdef SK_ENABLE_MTL_DEBUG_INFO
910    testTexture.label = @"testTexture";
911#endif
912    info->fTexture.reset(GrRetainPtrFromId(testTexture));
913    return true;
914}
915
916GrBackendTexture GrMtlGpu::onCreateBackendTexture(SkISize dimensions,
917                                                  const GrBackendFormat& format,
918                                                  GrRenderable renderable,
919                                                  skgpu::Mipmapped mipmapped,
920                                                  GrProtected isProtected,
921                                                  std::string_view label) {
922    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
923
924    GrMtlTextureInfo info;
925    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
926                                                 renderable, mipmapped, &info)) {
927        return {};
928    }
929
930    return GrBackendTextures::MakeMtl(dimensions.width(), dimensions.height(), mipmapped, info);
931}
932
933bool GrMtlGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
934                                     sk_sp<skgpu::RefCntedCallback> finishedCallback,
935                                     std::array<float, 4> color) {
936    GrMtlTextureInfo info;
937    SkAssertResult(GrBackendTextures::GetMtlTextureInfo(backendTexture, &info));
938
939    id<MTLTexture> GR_NORETAIN mtlTexture = GrGetMTLTexture(info.fTexture.get());
940
941    const MTLPixelFormat mtlFormat = mtlTexture.pixelFormat;
942
943    // Create a transfer buffer and fill with data.
944    size_t bytesPerPixel = skgpu::MtlFormatBytesPerBlock(mtlFormat);
945    size_t combinedBufferSize;
946
947    // Reuse the same buffer for all levels. Should be ok since we made the row bytes tight.
948    combinedBufferSize = bytesPerPixel*backendTexture.width()*backendTexture.height();
949
950    size_t alignment = std::max(bytesPerPixel, this->mtlCaps().getMinBufferAlignment());
951    GrStagingBufferManager::Slice slice = fStagingBufferManager.allocateStagingBufferSlice(
952            combinedBufferSize, alignment);
953    if (!slice.fBuffer) {
954        return false;
955    }
956    char* buffer = (char*)slice.fOffsetMapPtr;
957
958    auto colorType = mtl_format_to_backend_tex_clear_colortype(mtlFormat);
959    if (colorType == GrColorType::kUnknown) {
960        return false;
961    }
962    GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, backendTexture.dimensions());
963    auto rb = ii.minRowBytes();
964    SkASSERT(rb == bytesPerPixel*backendTexture.width());
965    if (!GrClearImage(ii, buffer, rb, color)) {
966        return false;
967    }
968
969    // Transfer buffer contents to texture
970    MTLOrigin origin = MTLOriginMake(0, 0, 0);
971
972    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
973    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
974    if (!blitCmdEncoder) {
975        return false;
976    }
977#ifdef SK_ENABLE_MTL_DEBUG_INFO
978    [blitCmdEncoder pushDebugGroup:@"onClearBackendTexture"];
979#endif
980    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
981
982    SkISize levelDimensions(backendTexture.dimensions());
983    int numMipLevels = mtlTexture.mipmapLevelCount;
984    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
985        size_t levelRowBytes;
986        size_t levelSize;
987
988        levelRowBytes = levelDimensions.width() * bytesPerPixel;
989        levelSize = levelRowBytes * levelDimensions.height();
990
991        // TODO: can this all be done in one go?
992        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
993                          sourceOffset: slice.fOffset
994                     sourceBytesPerRow: levelRowBytes
995                   sourceBytesPerImage: levelSize
996                            sourceSize: MTLSizeMake(levelDimensions.width(),
997                                                    levelDimensions.height(),
998                                                    1)
999                             toTexture: mtlTexture
1000                      destinationSlice: 0
1001                      destinationLevel: currentMipLevel
1002                     destinationOrigin: origin];
1003
1004        levelDimensions = {std::max(1, levelDimensions.width() / 2),
1005                           std::max(1, levelDimensions.height() / 2)};
1006    }
1007#ifdef SK_BUILD_FOR_MAC
1008    if (this->mtlCaps().isMac()) {
1009        [mtlBuffer->mtlBuffer() didModifyRange: NSMakeRange(slice.fOffset, combinedBufferSize)];
1010    }
1011#endif
1012    [blitCmdEncoder popDebugGroup];
1013
1014    if (finishedCallback) {
1015        this->addFinishedCallback(std::move(finishedCallback));
1016    }
1017
1018    return true;
1019}
1020
1021GrBackendTexture GrMtlGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1022                                                            const GrBackendFormat& format,
1023                                                            skgpu::Mipmapped mipmapped,
1024                                                            GrProtected isProtected) {
1025    const MTLPixelFormat mtlFormat = GrBackendFormatAsMTLPixelFormat(format);
1026
1027    GrMtlTextureInfo info;
1028    if (!this->createMtlTextureForBackendSurface(mtlFormat, dimensions, 1, GrTexturable::kYes,
1029                                                 GrRenderable::kNo, mipmapped, &info)) {
1030        return {};
1031    }
1032
1033    return GrBackendTextures::MakeMtl(dimensions.width(), dimensions.height(), mipmapped, info);
1034}
1035
1036bool GrMtlGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1037                                                sk_sp<skgpu::RefCntedCallback> finishedCallback,
1038                                                const void* data,
1039                                                size_t size) {
1040    GrMtlTextureInfo info;
1041    SkAssertResult(GrBackendTextures::GetMtlTextureInfo(backendTexture, &info));
1042
1043    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1044
1045    int numMipLevels = mtlTexture.mipmapLevelCount;
1046    skgpu::Mipmapped mipmapped = numMipLevels > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1047
1048    SkTextureCompressionType compression =
1049            GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1050    SkASSERT(compression != SkTextureCompressionType::kNone);
1051
1052    // Create a transfer buffer and fill with data.
1053    STArray<16, size_t> individualMipOffsets;
1054    size_t combinedBufferSize;
1055    combinedBufferSize = SkCompressedDataSize(compression,
1056                                              backendTexture.dimensions(),
1057                                              &individualMipOffsets,
1058                                              mipmapped == skgpu::Mipmapped::kYes);
1059    SkASSERT(individualMipOffsets.size() == numMipLevels);
1060
1061    size_t alignment = std::max(SkCompressedBlockSize(compression),
1062                                this->mtlCaps().getMinBufferAlignment());
1063    GrStagingBufferManager::Slice slice =
1064            fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1065    if (!slice.fBuffer) {
1066        return false;
1067    }
1068    char* buffer = (char*)slice.fOffsetMapPtr;
1069
1070    memcpy(buffer, data, size);
1071
1072    // Transfer buffer contents to texture
1073    MTLOrigin origin = MTLOriginMake(0, 0, 0);
1074
1075    GrMtlCommandBuffer* cmdBuffer = this->commandBuffer();
1076    id<MTLBlitCommandEncoder> blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1077    if (!blitCmdEncoder) {
1078        return false;
1079    }
1080#ifdef SK_ENABLE_MTL_DEBUG_INFO
1081    [blitCmdEncoder pushDebugGroup:@"onUpdateCompressedBackendTexture"];
1082#endif
1083    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
1084
1085    SkISize levelDimensions(backendTexture.dimensions());
1086    for (int currentMipLevel = 0; currentMipLevel < numMipLevels; currentMipLevel++) {
1087        size_t levelRowBytes;
1088        size_t levelSize;
1089
1090        levelRowBytes = skgpu::CompressedRowBytes(compression, levelDimensions.width());
1091        levelSize = SkCompressedDataSize(compression, levelDimensions, nullptr, false);
1092
1093        // TODO: can this all be done in one go?
1094        [blitCmdEncoder copyFromBuffer: mtlBuffer->mtlBuffer()
1095                          sourceOffset: slice.fOffset + individualMipOffsets[currentMipLevel]
1096                     sourceBytesPerRow: levelRowBytes
1097                   sourceBytesPerImage: levelSize
1098                            sourceSize: MTLSizeMake(levelDimensions.width(),
1099                                                    levelDimensions.height(),
1100                                                    1)
1101                             toTexture: mtlTexture
1102                      destinationSlice: 0
1103                      destinationLevel: currentMipLevel
1104                     destinationOrigin: origin];
1105
1106        levelDimensions = {std::max(1, levelDimensions.width() / 2),
1107                           std::max(1, levelDimensions.height() / 2)};
1108    }
1109#ifdef SK_BUILD_FOR_MAC
1110    if (this->mtlCaps().isMac()) {
1111        [mtlBuffer->mtlBuffer() didModifyRange:NSMakeRange(slice.fOffset, combinedBufferSize)];
1112    }
1113#endif
1114    [blitCmdEncoder popDebugGroup];
1115
1116    if (finishedCallback) {
1117        this->addFinishedCallback(std::move(finishedCallback));
1118    }
1119
1120    return true;
1121}
1122
1123void GrMtlGpu::deleteBackendTexture(const GrBackendTexture& tex) {
1124    SkASSERT(GrBackendApi::kMetal == tex.backend());
1125    // Nothing to do here, will get cleaned up when the GrBackendTexture object goes away
1126}
1127
1128bool GrMtlGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
1129
1130    GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
1131
1132    auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
1133                                 desc, programInfo, &stat);
1134    if (!pipelineState) {
1135        return false;
1136    }
1137
1138    return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
1139}
1140
1141bool GrMtlGpu::precompileShader(const SkData& key, const SkData& data) {
1142    return this->resourceProvider().precompileShader(key, data);
1143}
1144
1145#if defined(GPU_TEST_UTILS)
1146bool GrMtlGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
1147    SkASSERT(GrBackendApi::kMetal == tex.backend());
1148
1149    GrMtlTextureInfo info;
1150    if (!GrBackendTextures::GetMtlTextureInfo(tex, &info)) {
1151        return false;
1152    }
1153    id<MTLTexture> mtlTexture = GrGetMTLTexture(info.fTexture.get());
1154    if (!mtlTexture) {
1155        return false;
1156    }
1157    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
1158        return mtlTexture.usage & MTLTextureUsageShaderRead;
1159    } else {
1160        return true; // best we can do
1161    }
1162}
1163
1164GrBackendRenderTarget GrMtlGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
1165                                                                     GrColorType ct,
1166                                                                     int sampleCnt,
1167                                                                     GrProtected isProtected) {
1168    if (dimensions.width()  > this->caps()->maxRenderTargetSize() ||
1169        dimensions.height() > this->caps()->maxRenderTargetSize()) {
1170        return {};
1171    }
1172    if (isProtected == GrProtected::kYes) {
1173        return {};
1174    }
1175
1176    MTLPixelFormat format = this->mtlCaps().getFormatFromColorType(ct);
1177    sampleCnt = this->mtlCaps().getRenderTargetSampleCount(sampleCnt, format);
1178    if (sampleCnt == 0) {
1179        return {};
1180    }
1181
1182    GrMtlTextureInfo info;
1183    if (!this->createMtlTextureForBackendSurface(format,
1184                                                 dimensions,
1185                                                 sampleCnt,
1186                                                 GrTexturable::kNo,
1187                                                 GrRenderable::kYes,
1188                                                 skgpu::Mipmapped::kNo,
1189                                                 &info)) {
1190        return {};
1191    }
1192
1193    return GrBackendRenderTargets::MakeMtl(dimensions.width(), dimensions.height(), info);
1194}
1195
1196void GrMtlGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
1197    SkASSERT(GrBackendApi::kMetal == rt.backend());
1198
1199    GrMtlTextureInfo info;
1200    if (GrBackendRenderTargets::GetMtlTextureInfo(rt, &info)) {
1201        GrSubmitInfo submitInfo;
1202        submitInfo.fSync = GrSyncCpu::kYes;
1203        this->submitToGpu(submitInfo);
1204        // Nothing else to do here, will get cleaned up when the GrBackendRenderTarget
1205        // is deleted.
1206    }
1207}
1208#endif // defined(GPU_TEST_UTILS)
1209
1210void GrMtlGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src) {
1211    // TODO: Add support for subrectangles
1212    GrMtlRenderTarget* srcRT = static_cast<GrMtlRenderTarget*>(src->asRenderTarget());
1213    GrRenderTarget* dstRT = dst->asRenderTarget();
1214    GrMtlAttachment* dstAttachment;
1215    if (dstRT) {
1216        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1217        dstAttachment = mtlRT->colorAttachment();
1218    } else {
1219        SkASSERT(dst->asTexture());
1220        dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1221    }
1222
1223    this->resolve(dstAttachment, srcRT->colorAttachment());
1224}
1225
1226void GrMtlGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src,
1227                                 GrMtlAttachment* dstAttachment, GrMtlAttachment* srcAttachment,
1228                                 const SkIRect& srcRect, const SkIPoint& dstPoint) {
1229#ifdef SK_DEBUG
1230    SkASSERT(this->mtlCaps().canCopyAsBlit(dstAttachment->mtlFormat(), dstAttachment->numSamples(),
1231                                           srcAttachment->mtlFormat(), dstAttachment->numSamples(),
1232                                           srcRect, dstPoint, dst == src));
1233#endif
1234    id<MTLTexture> GR_NORETAIN dstTex = dstAttachment->mtlTexture();
1235    id<MTLTexture> GR_NORETAIN srcTex = srcAttachment->mtlTexture();
1236
1237    auto cmdBuffer = this->commandBuffer();
1238    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1239    if (!blitCmdEncoder) {
1240        return;
1241    }
1242#ifdef SK_ENABLE_MTL_DEBUG_INFO
1243    [blitCmdEncoder pushDebugGroup:@"copySurfaceAsBlit"];
1244#endif
1245    [blitCmdEncoder copyFromTexture: srcTex
1246                        sourceSlice: 0
1247                        sourceLevel: 0
1248                       sourceOrigin: MTLOriginMake(srcRect.x(), srcRect.y(), 0)
1249                         sourceSize: MTLSizeMake(srcRect.width(), srcRect.height(), 1)
1250                          toTexture: dstTex
1251                   destinationSlice: 0
1252                   destinationLevel: 0
1253                  destinationOrigin: MTLOriginMake(dstPoint.fX, dstPoint.fY, 0)];
1254#ifdef SK_ENABLE_MTL_DEBUG_INFO
1255    [blitCmdEncoder popDebugGroup];
1256#endif
1257    cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(dst));
1258    cmdBuffer->addGrSurface(sk_ref_sp<const GrSurface>(src));
1259}
1260
1261bool GrMtlGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
1262                             GrSurface* src, const SkIRect& srcRect,
1263                             GrSamplerState::Filter) {
1264    SkASSERT(!src->isProtected() && !dst->isProtected());
1265
1266    if (srcRect.size() != dstRect.size()) {
1267        return false;
1268    }
1269
1270    GrMtlAttachment* dstAttachment;
1271    GrMtlAttachment* srcAttachment;
1272    GrRenderTarget* dstRT = dst->asRenderTarget();
1273    if (dstRT) {
1274        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(dstRT);
1275        // This will technically return true for single sample rts that used DMSAA in which case we
1276        // don't have to pick the resolve attachment. But in that case the resolve and color
1277        // attachments will be the same anyways.
1278        if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1279            dstAttachment = mtlRT->resolveAttachment();
1280        } else {
1281            dstAttachment = mtlRT->colorAttachment();
1282        }
1283    } else if (dst->asTexture()) {
1284        dstAttachment = static_cast<GrMtlTexture*>(dst->asTexture())->attachment();
1285    } else {
1286        // The surface in a GrAttachment already
1287        dstAttachment = static_cast<GrMtlAttachment*>(dst);
1288    }
1289    GrRenderTarget* srcRT = src->asRenderTarget();
1290    if (srcRT) {
1291        GrMtlRenderTarget* mtlRT = static_cast<GrMtlRenderTarget*>(srcRT);
1292        // This will technically return true for single sample rts that used DMSAA in which case we
1293        // don't have to pick the resolve attachment. But in that case the resolve and color
1294        // attachments will be the same anyways.
1295        if (this->mtlCaps().renderTargetSupportsDiscardableMSAA(mtlRT)) {
1296            srcAttachment = mtlRT->resolveAttachment();
1297        } else {
1298            srcAttachment = mtlRT->colorAttachment();
1299        }
1300    } else if (src->asTexture()) {
1301        SkASSERT(src->asTexture());
1302        srcAttachment = static_cast<GrMtlTexture*>(src->asTexture())->attachment();
1303    } else {
1304        // The surface in a GrAttachment already
1305        srcAttachment = static_cast<GrMtlAttachment*>(src);
1306    }
1307
1308    MTLPixelFormat dstFormat = dstAttachment->mtlFormat();
1309    MTLPixelFormat srcFormat = srcAttachment->mtlFormat();
1310
1311    int dstSampleCnt = dstAttachment->sampleCount();
1312    int srcSampleCnt = srcAttachment->sampleCount();
1313
1314    const SkIPoint dstPoint = dstRect.topLeft();
1315    if (this->mtlCaps().canCopyAsResolve(dstFormat, dstSampleCnt,
1316                                         srcFormat, srcSampleCnt,
1317                                         SkToBool(srcRT), src->dimensions(),
1318                                         srcRect, dstPoint,
1319                                         dstAttachment == srcAttachment)) {
1320        this->copySurfaceAsResolve(dst, src);
1321        return true;
1322    }
1323
1324    if (srcAttachment->framebufferOnly() || dstAttachment->framebufferOnly()) {
1325        return false;
1326    }
1327
1328    if (this->mtlCaps().canCopyAsBlit(dstFormat, dstSampleCnt, srcFormat, srcSampleCnt,
1329                                      srcRect, dstPoint, dstAttachment == srcAttachment)) {
1330        this->copySurfaceAsBlit(dst, src, dstAttachment, srcAttachment, srcRect, dstPoint);
1331        return true;
1332    }
1333
1334    return false;
1335}
1336
1337bool GrMtlGpu::onWritePixels(GrSurface* surface,
1338                             SkIRect rect,
1339                             GrColorType surfaceColorType,
1340                             GrColorType srcColorType,
1341                             const GrMipLevel texels[],
1342                             int mipLevelCount,
1343                             bool prepForTexSampling) {
1344    GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(surface->asTexture());
1345    // TODO: In principle we should be able to support pure rendertargets as well, but
1346    // until we find a use case we'll only support texture rendertargets.
1347    if (!mtlTexture) {
1348        return false;
1349    }
1350    if (!mipLevelCount) {
1351        return false;
1352    }
1353#ifdef SK_DEBUG
1354    for (int i = 0; i < mipLevelCount; i++) {
1355        SkASSERT(texels[i].fPixels);
1356    }
1357#endif
1358    return this->uploadToTexture(mtlTexture, rect, srcColorType, texels, mipLevelCount);
1359}
1360
1361bool GrMtlGpu::onReadPixels(GrSurface* surface,
1362                            SkIRect rect,
1363                            GrColorType surfaceColorType,
1364                            GrColorType dstColorType,
1365                            void* buffer,
1366                            size_t rowBytes) {
1367    SkASSERT(surface);
1368
1369    if (surfaceColorType != dstColorType) {
1370        return false;
1371    }
1372
1373    int bpp = GrColorTypeBytesPerPixel(dstColorType);
1374    size_t transBufferRowBytes = bpp*rect.width();
1375    size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1376
1377    GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
1378    sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
1379            transBufferImageBytes,
1380            GrGpuBufferType::kXferGpuToCpu,
1381            kDynamic_GrAccessPattern,
1382            GrResourceProvider::ZeroInit::kNo);
1383
1384    if (!transferBuffer) {
1385        return false;
1386    }
1387
1388    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1389    if (!this->readOrTransferPixels(surface,
1390                                    rect,
1391                                    dstColorType,
1392                                    grMtlBuffer->mtlBuffer(),
1393                                    0,
1394                                    transBufferImageBytes,
1395                                    transBufferRowBytes)) {
1396        return false;
1397    }
1398    this->submitCommandBuffer(kForce_SyncQueue);
1399
1400    const void* mappedMemory = grMtlBuffer->mtlBuffer().contents;
1401
1402    SkRectMemcpy(buffer,
1403                 rowBytes,
1404                 mappedMemory,
1405                 transBufferRowBytes,
1406                 transBufferRowBytes,
1407                 rect.height());
1408
1409    return true;
1410}
1411
1412bool GrMtlGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
1413                                            size_t srcOffset,
1414                                            sk_sp<GrGpuBuffer> dst,
1415                                            size_t dstOffset,
1416                                            size_t size) {
1417    id<MTLBuffer> GR_NORETAIN mtlSrc =  static_cast<GrMtlBuffer*>(src.get())->mtlBuffer();
1418    id<MTLBuffer> GR_NORETAIN mtlDst =  static_cast<GrMtlBuffer*>(dst.get())->mtlBuffer();
1419    SkASSERT(mtlSrc);
1420    SkASSERT(mtlDst);
1421
1422    auto cmdBuffer = this->commandBuffer();
1423    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1424    if (!blitCmdEncoder) {
1425        return false;
1426    }
1427
1428#ifdef SK_ENABLE_MTL_DEBUG_INFO
1429    [blitCmdEncoder pushDebugGroup:@"onTransferFromBufferToBuffer"];
1430#endif
1431    [blitCmdEncoder copyFromBuffer: mtlSrc
1432                      sourceOffset: srcOffset
1433                          toBuffer: mtlDst
1434                 destinationOffset: dstOffset
1435                              size: size];
1436#ifdef SK_ENABLE_MTL_DEBUG_INFO
1437    [blitCmdEncoder popDebugGroup];
1438#endif
1439
1440    cmdBuffer->addGrBuffer(std::move(src));
1441    cmdBuffer->addGrBuffer(std::move(dst));
1442
1443    return true;
1444}
1445
1446bool GrMtlGpu::onTransferPixelsTo(GrTexture* texture,
1447                                  SkIRect rect,
1448                                  GrColorType textureColorType,
1449                                  GrColorType bufferColorType,
1450                                  sk_sp<GrGpuBuffer> transferBuffer,
1451                                  size_t offset,
1452                                  size_t rowBytes) {
1453    SkASSERT(texture);
1454    SkASSERT(transferBuffer);
1455    if (textureColorType != bufferColorType) {
1456        return false;
1457    }
1458
1459    GrMtlTexture* grMtlTexture = static_cast<GrMtlTexture*>(texture);
1460    id<MTLTexture> GR_NORETAIN mtlTexture = grMtlTexture->mtlTexture();
1461    SkASSERT(mtlTexture);
1462
1463    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1464    id<MTLBuffer> GR_NORETAIN mtlBuffer = grMtlBuffer->mtlBuffer();
1465    SkASSERT(mtlBuffer);
1466
1467    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1468    if (offset % bpp) {
1469        return false;
1470    }
1471    if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
1472        return false;
1473    }
1474
1475    MTLOrigin origin = MTLOriginMake(rect.left(), rect.top(), 0);
1476
1477    auto cmdBuffer = this->commandBuffer();
1478    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1479    if (!blitCmdEncoder) {
1480        return false;
1481    }
1482#ifdef SK_ENABLE_MTL_DEBUG_INFO
1483    [blitCmdEncoder pushDebugGroup:@"onTransferPixelsTo"];
1484#endif
1485    [blitCmdEncoder copyFromBuffer: mtlBuffer
1486                      sourceOffset: offset
1487                 sourceBytesPerRow: rowBytes
1488               sourceBytesPerImage: rowBytes*rect.height()
1489                        sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1490                         toTexture: mtlTexture
1491                  destinationSlice: 0
1492                  destinationLevel: 0
1493                 destinationOrigin: origin];
1494#ifdef SK_ENABLE_MTL_DEBUG_INFO
1495    [blitCmdEncoder popDebugGroup];
1496#endif
1497
1498    return true;
1499}
1500
1501bool GrMtlGpu::onTransferPixelsFrom(GrSurface* surface,
1502                                    SkIRect rect,
1503                                    GrColorType surfaceColorType,
1504                                    GrColorType bufferColorType,
1505                                    sk_sp<GrGpuBuffer> transferBuffer,
1506                                    size_t offset) {
1507    SkASSERT(surface);
1508    SkASSERT(transferBuffer);
1509
1510    if (surfaceColorType != bufferColorType) {
1511        return false;
1512    }
1513
1514    // Metal only supports offsets that are aligned to a pixel.
1515    size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1516    if (offset % bpp) {
1517        return false;
1518    }
1519    if (GrBackendFormatBytesPerPixel(surface->backendFormat()) != bpp) {
1520        return false;
1521    }
1522
1523    GrMtlBuffer* grMtlBuffer = static_cast<GrMtlBuffer*>(transferBuffer.get());
1524
1525    size_t transBufferRowBytes = bpp*rect.width();
1526    size_t transBufferImageBytes = transBufferRowBytes*rect.height();
1527
1528    return this->readOrTransferPixels(surface,
1529                                      rect,
1530                                      bufferColorType,
1531                                      grMtlBuffer->mtlBuffer(),
1532                                      offset,
1533                                      transBufferImageBytes,
1534                                      transBufferRowBytes);
1535}
1536
1537bool GrMtlGpu::readOrTransferPixels(GrSurface* surface,
1538                                    SkIRect rect,
1539                                    GrColorType dstColorType,
1540                                    id<MTLBuffer> transferBuffer,
1541                                    size_t offset,
1542                                    size_t imageBytes,
1543                                    size_t rowBytes) {
1544    if (!check_max_blit_width(rect.width())) {
1545        return false;
1546    }
1547
1548    id<MTLTexture> mtlTexture;
1549    if (GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(surface->asRenderTarget())) {
1550        if (rt->numSamples() > 1) {
1551            SkASSERT(rt->requiresManualMSAAResolve());  // msaa-render-to-texture not yet supported.
1552            mtlTexture = rt->resolveMTLTexture();
1553        } else {
1554            SkASSERT(!rt->requiresManualMSAAResolve());
1555            mtlTexture = rt->colorMTLTexture();
1556        }
1557    } else if (GrMtlTexture* texture = static_cast<GrMtlTexture*>(surface->asTexture())) {
1558        mtlTexture = texture->mtlTexture();
1559    }
1560    if (!mtlTexture) {
1561        return false;
1562    }
1563
1564    auto cmdBuffer = this->commandBuffer();
1565    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
1566    if (!blitCmdEncoder) {
1567        return false;
1568    }
1569#ifdef SK_ENABLE_MTL_DEBUG_INFO
1570    [blitCmdEncoder pushDebugGroup:@"readOrTransferPixels"];
1571#endif
1572    [blitCmdEncoder copyFromTexture: mtlTexture
1573                        sourceSlice: 0
1574                        sourceLevel: 0
1575                       sourceOrigin: MTLOriginMake(rect.left(), rect.top(), 0)
1576                         sourceSize: MTLSizeMake(rect.width(), rect.height(), 1)
1577                           toBuffer: transferBuffer
1578                  destinationOffset: offset
1579             destinationBytesPerRow: rowBytes
1580           destinationBytesPerImage: imageBytes];
1581#ifdef SK_BUILD_FOR_MAC
1582    if (this->mtlCaps().isMac()) {
1583        // Sync GPU data back to the CPU
1584        [blitCmdEncoder synchronizeResource: transferBuffer];
1585    }
1586#endif
1587#ifdef SK_ENABLE_MTL_DEBUG_INFO
1588    [blitCmdEncoder popDebugGroup];
1589#endif
1590
1591    return true;
1592}
1593
1594[[nodiscard]] std::unique_ptr<GrSemaphore> GrMtlGpu::makeSemaphore(bool /*isOwned*/) {
1595    SkASSERT(this->caps()->semaphoreSupport());
1596    return GrMtlSemaphore::Make(this);
1597}
1598
1599std::unique_ptr<GrSemaphore> GrMtlGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
1600                                                            GrSemaphoreWrapType /* wrapType */,
1601                                                            GrWrapOwnership /*ownership*/) {
1602    SkASSERT(this->caps()->backendSemaphoreSupport());
1603    return GrMtlSemaphore::MakeWrapped(GrBackendSemaphores::GetMtlHandle(semaphore),
1604                                       GrBackendSemaphores::GetMtlValue(semaphore));
1605}
1606
1607void GrMtlGpu::insertSemaphore(GrSemaphore* semaphore) {
1608    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1609        SkASSERT(semaphore);
1610        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1611
1612        this->commandBuffer()->encodeSignalEvent(mtlSem->event(), mtlSem->value());
1613    }
1614}
1615
1616void GrMtlGpu::waitSemaphore(GrSemaphore* semaphore) {
1617    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1618        SkASSERT(semaphore);
1619        GrMtlSemaphore* mtlSem = static_cast<GrMtlSemaphore*>(semaphore);
1620
1621        this->commandBuffer()->encodeWaitForEvent(mtlSem->event(), mtlSem->value());
1622    }
1623}
1624
1625void GrMtlGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect&) {
1626    SkASSERT(target->numSamples() > 1);
1627    GrMtlRenderTarget* rt = static_cast<GrMtlRenderTarget*>(target);
1628
1629    if (rt->resolveAttachment() && this->mtlCaps().renderTargetSupportsDiscardableMSAA(rt)) {
1630        // We would have resolved the RT during the render pass.
1631        return;
1632    }
1633
1634    this->resolve(static_cast<GrMtlRenderTarget*>(target)->resolveAttachment(),
1635                  static_cast<GrMtlRenderTarget*>(target)->colorAttachment());
1636}
1637
1638void GrMtlGpu::resolve(GrMtlAttachment* resolveAttachment,
1639                       GrMtlAttachment* msaaAttachment) {
1640    auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init];
1641    auto colorAttachment = renderPassDesc.colorAttachments[0];
1642    colorAttachment.texture = msaaAttachment->mtlTexture();
1643    colorAttachment.resolveTexture = resolveAttachment->mtlTexture();
1644    colorAttachment.loadAction = MTLLoadActionLoad;
1645    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1646
1647    GrMtlRenderCommandEncoder* cmdEncoder =
1648            this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr, nullptr);
1649    if (cmdEncoder) {
1650        cmdEncoder->setLabel(@"resolveTexture");
1651        this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(resolveAttachment));
1652        this->commandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(msaaAttachment));
1653    }
1654}
1655
1656GrMtlRenderCommandEncoder* GrMtlGpu::loadMSAAFromResolve(
1657        GrAttachment* dst, GrMtlAttachment* src, const SkIRect& srcRect,
1658        MTLRenderPassStencilAttachmentDescriptor* stencil) {
1659    if (!dst) {
1660        return nil;
1661    }
1662    if (!src || src->framebufferOnly()) {
1663        return nil;
1664    }
1665
1666    GrMtlAttachment* mtlDst = static_cast<GrMtlAttachment*>(dst);
1667
1668    MTLPixelFormat stencilFormat = stencil.texture.pixelFormat;
1669    auto renderPipeline = this->resourceProvider().findOrCreateMSAALoadPipeline(mtlDst->mtlFormat(),
1670                                                                                dst->numSamples(),
1671                                                                                stencilFormat);
1672
1673    // Set up rendercommandencoder
1674    auto renderPassDesc = [MTLRenderPassDescriptor new];
1675    auto colorAttachment = renderPassDesc.colorAttachments[0];
1676    colorAttachment.texture = mtlDst->mtlTexture();
1677    colorAttachment.loadAction = MTLLoadActionDontCare;
1678    colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
1679    colorAttachment.resolveTexture = src->mtlTexture();
1680
1681    renderPassDesc.stencilAttachment = stencil;
1682
1683    // We know in this case that the preceding renderCommandEncoder will not be compatible.
1684    // Either it's using a different rendertarget, or we are reading from the resolve and
1685    // hence we need to let the previous resolve finish. So we create a new one without checking.
1686    auto renderCmdEncoder =
1687                this->commandBuffer()->getRenderCommandEncoder(renderPassDesc, nullptr);
1688    if (!renderCmdEncoder) {
1689        return nullptr;
1690    }
1691
1692    // Bind pipeline
1693    renderCmdEncoder->setRenderPipelineState(renderPipeline->mtlPipelineState());
1694    this->commandBuffer()->addResource(sk_ref_sp(renderPipeline));
1695
1696    // Bind src as input texture
1697    renderCmdEncoder->setFragmentTexture(src->mtlTexture(), 0);
1698    // No sampler needed
1699    this->commandBuffer()->addGrSurface(sk_ref_sp<GrSurface>(src));
1700
1701    // Scissor and viewport should default to size of color attachment
1702
1703    // Update and bind uniform data
1704    int w = srcRect.width();
1705    int h = srcRect.height();
1706
1707    // dst rect edges in NDC (-1 to 1)
1708    int dw = dst->width();
1709    int dh = dst->height();
1710    float dx0 = 2.f * srcRect.fLeft / dw - 1.f;
1711    float dx1 = 2.f * (srcRect.fLeft + w) / dw - 1.f;
1712    float dy0 = 2.f * srcRect.fTop / dh - 1.f;
1713    float dy1 = 2.f * (srcRect.fTop + h) / dh - 1.f;
1714
1715    struct {
1716        float posXform[4];
1717        int textureSize[2];
1718        int pad[2];
1719    } uniData = {{dx1 - dx0, dy1 - dy0, dx0, dy0}, {dw, dh}, {0, 0}};
1720
1721    constexpr size_t uniformSize = 32;
1722    if (@available(macOS 10.11, iOS 8.3, tvOS 9.0, *)) {
1723        SkASSERT(uniformSize <= this->caps()->maxPushConstantsSize());
1724        renderCmdEncoder->setVertexBytes(&uniData, uniformSize, 0);
1725    } else {
1726        // upload the data
1727        GrRingBuffer::Slice slice = this->uniformsRingBuffer()->suballocate(uniformSize);
1728        GrMtlBuffer* buffer = (GrMtlBuffer*) slice.fBuffer;
1729        char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
1730        memcpy(destPtr, &uniData, uniformSize);
1731
1732        renderCmdEncoder->setVertexBuffer(buffer->mtlBuffer(), slice.fOffset, 0);
1733    }
1734
1735    renderCmdEncoder->drawPrimitives(MTLPrimitiveTypeTriangleStrip, (NSUInteger)0, (NSUInteger)4);
1736
1737    return renderCmdEncoder;
1738}
1739
1740#if defined(GPU_TEST_UTILS)
1741void GrMtlGpu::testingOnly_startCapture() {
1742    if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1743        // TODO: add Metal 3 interface as well
1744        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1745        if (captureManager.isCapturing) {
1746            return;
1747        }
1748        if (@available(macOS 10.15, iOS 13.0, tvOS 13.0, *)) {
1749            MTLCaptureDescriptor* captureDescriptor = [[MTLCaptureDescriptor alloc] init];
1750            captureDescriptor.captureObject = fQueue;
1751
1752            NSError *error;
1753            if (![captureManager startCaptureWithDescriptor: captureDescriptor error:&error])
1754            {
1755                NSLog(@"Failed to start capture, error %@", error);
1756            }
1757        } else {
1758            [captureManager startCaptureWithCommandQueue: fQueue];
1759        }
1760     }
1761}
1762
1763void GrMtlGpu::testingOnly_stopCapture() {
1764    if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1765        MTLCaptureManager* captureManager = [MTLCaptureManager sharedCaptureManager];
1766        if (captureManager.isCapturing) {
1767            [captureManager stopCapture];
1768        }
1769    }
1770}
1771#endif
1772
1773#ifdef SK_ENABLE_DUMP_GPU
1774#include "src/utils/SkJSONWriter.h"
1775void GrMtlGpu::onDumpJSON(SkJSONWriter* writer) const {
1776    // We are called by the base class, which has already called beginObject(). We choose to nest
1777    // all of our caps information in a named sub-object.
1778    writer->beginObject("Metal GPU");
1779
1780    writer->beginObject("Device");
1781    writer->appendCString("name", fDevice.name.UTF8String);
1782#ifdef SK_BUILD_FOR_MAC
1783    if (@available(macOS 10.11, *)) {
1784        writer->appendBool("isHeadless", fDevice.isHeadless);
1785        writer->appendBool("isLowPower", fDevice.isLowPower);
1786    }
1787    if (@available(macOS 10.13, *)) {
1788        writer->appendBool("isRemovable", fDevice.isRemovable);
1789    }
1790#endif
1791    if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1792        writer->appendU64("registryID", fDevice.registryID);
1793    }
1794#if defined(SK_BUILD_FOR_MAC) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1795    if (@available(macOS 10.15, *)) {
1796        switch (fDevice.location) {
1797            case MTLDeviceLocationBuiltIn:
1798                writer->appendNString("location", "builtIn");
1799                break;
1800            case MTLDeviceLocationSlot:
1801                writer->appendNString("location", "slot");
1802                break;
1803            case MTLDeviceLocationExternal:
1804                writer->appendNString("location", "external");
1805                break;
1806            case MTLDeviceLocationUnspecified:
1807                writer->appendNString("location", "unspecified");
1808                break;
1809            default:
1810                writer->appendNString("location", "unknown");
1811                break;
1812        }
1813        writer->appendU64("locationNumber", fDevice.locationNumber);
1814        writer->appendU64("maxTransferRate", fDevice.maxTransferRate);
1815    }
1816#endif  // SK_BUILD_FOR_MAC
1817#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500 || __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000
1818    if (@available(macOS 10.15, iOS 13.0, tvOS 13.0, *)) {
1819        writer->appendBool("hasUnifiedMemory", fDevice.hasUnifiedMemory);
1820    }
1821#endif
1822#ifdef SK_BUILD_FOR_MAC
1823#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1824    if (@available(macOS 10.15, *)) {
1825        writer->appendU64("peerGroupID", fDevice.peerGroupID);
1826        writer->appendU32("peerCount", fDevice.peerCount);
1827        writer->appendU32("peerIndex", fDevice.peerIndex);
1828    }
1829#endif
1830    if (@available(macOS 10.12, *)) {
1831        writer->appendU64("recommendedMaxWorkingSetSize", fDevice.recommendedMaxWorkingSetSize);
1832    }
1833#endif  // SK_BUILD_FOR_MAC
1834    if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1835        writer->appendU64("currentAllocatedSize", fDevice.currentAllocatedSize);
1836        writer->appendU64("maxThreadgroupMemoryLength", fDevice.maxThreadgroupMemoryLength);
1837    }
1838
1839    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
1840        writer->beginObject("maxThreadsPerThreadgroup");
1841        writer->appendU64("width", fDevice.maxThreadsPerThreadgroup.width);
1842        writer->appendU64("height", fDevice.maxThreadsPerThreadgroup.height);
1843        writer->appendU64("depth", fDevice.maxThreadsPerThreadgroup.depth);
1844        writer->endObject();
1845    }
1846
1847    if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1848        writer->appendBool("areProgrammableSamplePositionsSupported",
1849                           fDevice.areProgrammableSamplePositionsSupported);
1850        writer->appendBool("areRasterOrderGroupsSupported",
1851                           fDevice.areRasterOrderGroupsSupported);
1852    }
1853#ifdef SK_BUILD_FOR_MAC
1854    if (@available(macOS 10.11, *)) {
1855        writer->appendBool("isDepth24Stencil8PixelFormatSupported",
1856                           fDevice.isDepth24Stencil8PixelFormatSupported);
1857
1858    }
1859#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
1860    if (@available(macOS 10.15, *)) {
1861        writer->appendBool("areBarycentricCoordsSupported",
1862                           fDevice.areBarycentricCoordsSupported);
1863        writer->appendBool("supportsShaderBarycentricCoordinates",
1864                           fDevice.supportsShaderBarycentricCoordinates);
1865    }
1866#endif
1867#endif  // SK_BUILD_FOR_MAC
1868    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1869        writer->appendU64("maxBufferLength", fDevice.maxBufferLength);
1870    }
1871    if (@available(macOS 10.13, iOS 11.0, tvOS 11.0, *)) {
1872        switch (fDevice.readWriteTextureSupport) {
1873            case MTLReadWriteTextureTier1:
1874                writer->appendNString("readWriteTextureSupport", "tier1");
1875                break;
1876            case MTLReadWriteTextureTier2:
1877                writer->appendNString("readWriteTextureSupport", "tier2");
1878                break;
1879            case MTLReadWriteTextureTierNone:
1880                writer->appendNString("readWriteTextureSupport", "tierNone");
1881                break;
1882            default:
1883                writer->appendNString("readWriteTextureSupport", "unknown");
1884                break;
1885        }
1886        switch (fDevice.argumentBuffersSupport) {
1887            case MTLArgumentBuffersTier1:
1888                writer->appendNString("argumentBuffersSupport", "tier1");
1889                break;
1890            case MTLArgumentBuffersTier2:
1891                writer->appendNString("argumentBuffersSupport", "tier2");
1892                break;
1893            default:
1894                writer->appendNString("argumentBuffersSupport", "unknown");
1895                break;
1896        }
1897    }
1898    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
1899        writer->appendU64("maxArgumentBufferSamplerCount", fDevice.maxArgumentBufferSamplerCount);
1900    }
1901#ifdef SK_BUILD_FOR_IOS
1902    if (@available(iOS 13.0, tvOS 13.0, *)) {
1903        writer->appendU64("sparseTileSizeInBytes", fDevice.sparseTileSizeInBytes);
1904    }
1905#endif
1906    writer->endObject();
1907
1908    writer->appendCString("queue", fQueue.label.UTF8String);
1909    writer->appendBool("disconnected", fDisconnected);
1910
1911    writer->endObject();
1912}
1913#endif
1914
1915GR_NORETAIN_END
1916