xref: /aosp_15_r20/external/skia/src/gpu/graphite/mtl/MtlCommandBuffer.mm (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1/*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/graphite/mtl/MtlCommandBuffer.h"
9
10#include "include/gpu/graphite/BackendSemaphore.h"
11#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
12#include "src/gpu/graphite/ContextUtils.h"
13#include "src/gpu/graphite/Log.h"
14#include "src/gpu/graphite/RenderPassDesc.h"
15#include "src/gpu/graphite/TextureProxy.h"
16#include "src/gpu/graphite/UniformManager.h"
17#include "src/gpu/graphite/compute/DispatchGroup.h"
18#include "src/gpu/graphite/mtl/MtlBlitCommandEncoder.h"
19#include "src/gpu/graphite/mtl/MtlBuffer.h"
20#include "src/gpu/graphite/mtl/MtlCaps.h"
21#include "src/gpu/graphite/mtl/MtlCommandBuffer.h"
22#include "src/gpu/graphite/mtl/MtlComputeCommandEncoder.h"
23#include "src/gpu/graphite/mtl/MtlComputePipeline.h"
24#include "src/gpu/graphite/mtl/MtlGraphicsPipeline.h"
25#include "src/gpu/graphite/mtl/MtlRenderCommandEncoder.h"
26#include "src/gpu/graphite/mtl/MtlSampler.h"
27#include "src/gpu/graphite/mtl/MtlSharedContext.h"
28#include "src/gpu/graphite/mtl/MtlTexture.h"
29#include "src/gpu/mtl/MtlUtilsPriv.h"
30
31namespace skgpu::graphite {
32
33std::unique_ptr<MtlCommandBuffer> MtlCommandBuffer::Make(id<MTLCommandQueue> queue,
34                                                         const MtlSharedContext* sharedContext,
35                                                         MtlResourceProvider* resourceProvider) {
36    auto commandBuffer = std::unique_ptr<MtlCommandBuffer>(
37            new MtlCommandBuffer(queue, sharedContext, resourceProvider));
38    if (!commandBuffer) {
39        return nullptr;
40    }
41    if (!commandBuffer->createNewMTLCommandBuffer()) {
42        return nullptr;
43    }
44    return commandBuffer;
45}
46
47MtlCommandBuffer::MtlCommandBuffer(id<MTLCommandQueue> queue,
48                                   const MtlSharedContext* sharedContext,
49                                   MtlResourceProvider* resourceProvider)
50        : CommandBuffer(Protected::kNo)  // Metal doesn't support protected memory
51        , fQueue(queue)
52        , fSharedContext(sharedContext)
53        , fResourceProvider(resourceProvider) {}
54
55MtlCommandBuffer::~MtlCommandBuffer() {
56    SkASSERT(!fActiveRenderCommandEncoder);
57    SkASSERT(!fActiveComputeCommandEncoder);
58    SkASSERT(!fActiveBlitCommandEncoder);
59}
60
61bool MtlCommandBuffer::setNewCommandBufferResources() {
62    return this->createNewMTLCommandBuffer();
63}
64
65bool MtlCommandBuffer::createNewMTLCommandBuffer() {
66    SkASSERT(fCommandBuffer == nil);
67
68    // Inserting a pool here so the autorelease occurs when we return and the
69    // only remaining ref is the retain below.
70    @autoreleasepool {
71        if (@available(macOS 11.0, iOS 14.0, tvOS 14.0, *)) {
72            sk_cfp<MTLCommandBufferDescriptor*> desc([[MTLCommandBufferDescriptor alloc] init]);
73            (*desc).retainedReferences = NO;
74#ifdef SK_ENABLE_MTL_DEBUG_INFO
75            (*desc).errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
76#endif
77            // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
78            fCommandBuffer.reset([[fQueue commandBufferWithDescriptor:desc.get()] retain]);
79        } else {
80            // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
81            fCommandBuffer.reset([[fQueue commandBufferWithUnretainedReferences] retain]);
82        }
83    }
84    return fCommandBuffer != nil;
85}
86
87bool MtlCommandBuffer::commit() {
88    SkASSERT(!fActiveRenderCommandEncoder);
89    SkASSERT(!fActiveComputeCommandEncoder);
90    this->endBlitCommandEncoder();
91    [(*fCommandBuffer) commit];
92
93    if ((*fCommandBuffer).status == MTLCommandBufferStatusError) {
94        NSString* description = (*fCommandBuffer).error.localizedDescription;
95        const char* errorString = [description UTF8String];
96        SKGPU_LOG_E("Failure submitting command buffer: %s", errorString);
97    }
98
99    return ((*fCommandBuffer).status != MTLCommandBufferStatusError);
100}
101
102void MtlCommandBuffer::onResetCommandBuffer() {
103    fCommandBuffer.reset();
104    fActiveRenderCommandEncoder.reset();
105    fActiveComputeCommandEncoder.reset();
106    fActiveBlitCommandEncoder.reset();
107    fCurrentIndexBuffer = nil;
108    fCurrentIndexBufferOffset = 0;
109}
110
111void MtlCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
112                                         const BackendSemaphore* waitSemaphores) {
113    if (!waitSemaphores) {
114        SkASSERT(numWaitSemaphores == 0);
115        return;
116    }
117
118    // Can only insert events with no active encoder
119    SkASSERT(!fActiveRenderCommandEncoder);
120    SkASSERT(!fActiveComputeCommandEncoder);
121    this->endBlitCommandEncoder();
122    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
123        for (size_t i = 0; i < numWaitSemaphores; ++i) {
124            auto semaphore = waitSemaphores[i];
125            if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
126                id<MTLEvent> mtlEvent =
127                        (__bridge id<MTLEvent>)BackendSemaphores::GetMtlEvent(semaphore);
128                [(*fCommandBuffer) encodeWaitForEvent:mtlEvent
129                                                value:BackendSemaphores::GetMtlValue(semaphore)];
130            }
131        }
132    }
133}
134
135void MtlCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
136                                           const BackendSemaphore* signalSemaphores) {
137    if (!signalSemaphores) {
138        SkASSERT(numSignalSemaphores == 0);
139        return;
140    }
141
142    // Can only insert events with no active encoder
143    SkASSERT(!fActiveRenderCommandEncoder);
144    SkASSERT(!fActiveComputeCommandEncoder);
145    this->endBlitCommandEncoder();
146
147    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
148        for (size_t i = 0; i < numSignalSemaphores; ++i) {
149            auto semaphore = signalSemaphores[i];
150            if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
151                id<MTLEvent> mtlEvent = (__bridge id<MTLEvent>)BackendSemaphores::GetMtlEvent;
152                [(*fCommandBuffer) encodeSignalEvent:mtlEvent
153                                               value:BackendSemaphores::GetMtlValue(semaphore)];
154            }
155        }
156    }
157}
158
159bool MtlCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
160                                       SkIRect renderPassBounds,
161                                       const Texture* colorTexture,
162                                       const Texture* resolveTexture,
163                                       const Texture* depthStencilTexture,
164                                       SkIRect viewport,
165                                       const DrawPassList& drawPasses) {
166    if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
167        return false;
168    }
169
170    this->setViewport(viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
171    this->updateIntrinsicUniforms(viewport);
172
173    for (const auto& drawPass : drawPasses) {
174        this->addDrawPass(drawPass.get());
175    }
176
177    this->endRenderPass();
178    return true;
179}
180
181bool MtlCommandBuffer::onAddComputePass(DispatchGroupSpan groups) {
182    this->beginComputePass();
183    for (const auto& group : groups) {
184        group->addResourceRefs(this);
185        for (const auto& dispatch : group->dispatches()) {
186            this->bindComputePipeline(group->getPipeline(dispatch.fPipelineIndex));
187            for (const ResourceBinding& binding : dispatch.fBindings) {
188                if (const BindBufferInfo* buffer = std::get_if<BindBufferInfo>(&binding.fResource)) {
189                    this->bindBuffer(buffer->fBuffer, buffer->fOffset, binding.fIndex);
190                } else if (const TextureIndex* texIdx =
191                                   std::get_if<TextureIndex>(&binding.fResource)) {
192                    SkASSERT(texIdx);
193                    this->bindTexture(group->getTexture(texIdx->fValue), binding.fIndex);
194                } else {
195                    const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource);
196                    SkASSERT(samplerIdx);
197                    this->bindSampler(group->getSampler(samplerIdx->fValue), binding.fIndex);
198                }
199            }
200            SkASSERT(fActiveComputeCommandEncoder);
201            for (const ComputeStep::WorkgroupBufferDesc& wgBuf : dispatch.fWorkgroupBuffers) {
202                fActiveComputeCommandEncoder->setThreadgroupMemoryLength(
203                        SkAlignTo(wgBuf.size, 16),
204                        wgBuf.index);
205            }
206            if (const WorkgroupSize* globalSize =
207                        std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
208                this->dispatchThreadgroups(*globalSize, dispatch.fLocalSize);
209            } else {
210                SkASSERT(std::holds_alternative<BindBufferInfo>(dispatch.fGlobalSizeOrIndirect));
211                const BindBufferInfo& indirect =
212                        *std::get_if<BindBufferInfo>(&dispatch.fGlobalSizeOrIndirect);
213                this->dispatchThreadgroupsIndirect(
214                        dispatch.fLocalSize, indirect.fBuffer, indirect.fOffset);
215            }
216        }
217    }
218    this->endComputePass();
219    return true;
220}
221
222bool MtlCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
223                                       const Texture* colorTexture,
224                                       const Texture* resolveTexture,
225                                       const Texture* depthStencilTexture) {
226    SkASSERT(!fActiveRenderCommandEncoder);
227    SkASSERT(!fActiveComputeCommandEncoder);
228    this->endBlitCommandEncoder();
229
230    const static MTLLoadAction mtlLoadAction[] {
231        MTLLoadActionLoad,
232        MTLLoadActionClear,
233        MTLLoadActionDontCare
234    };
235    static_assert((int)LoadOp::kLoad == 0);
236    static_assert((int)LoadOp::kClear == 1);
237    static_assert((int)LoadOp::kDiscard == 2);
238    static_assert(std::size(mtlLoadAction) == kLoadOpCount);
239
240    const static MTLStoreAction mtlStoreAction[] {
241        MTLStoreActionStore,
242        MTLStoreActionDontCare
243    };
244    static_assert((int)StoreOp::kStore == 0);
245    static_assert((int)StoreOp::kDiscard == 1);
246    static_assert(std::size(mtlStoreAction) == kStoreOpCount);
247
248    sk_cfp<MTLRenderPassDescriptor*> descriptor([[MTLRenderPassDescriptor alloc] init]);
249    // Set up color attachment.
250    auto& colorInfo = renderPassDesc.fColorAttachment;
251    bool loadMSAAFromResolve = false;
252    if (colorTexture) {
253        // TODO: check Texture matches RenderPassDesc
254        auto colorAttachment = (*descriptor).colorAttachments[0];
255        colorAttachment.texture = ((const MtlTexture*)colorTexture)->mtlTexture();
256        const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
257        colorAttachment.clearColor =
258                MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
259        colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
260        colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
261        // Set up resolve attachment
262        if (resolveTexture) {
263            SkASSERT(renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore);
264            // TODO: check Texture matches RenderPassDesc
265            colorAttachment.resolveTexture = ((const MtlTexture*)resolveTexture)->mtlTexture();
266            // Inclusion of a resolve texture implies the client wants to finish the
267            // renderpass with a resolve.
268            if (@available(macOS 10.12, iOS 10.0, tvOS 10.0, *)) {
269                SkASSERT(colorAttachment.storeAction == MTLStoreActionDontCare);
270                colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
271            } else {
272                // We expect at least Metal 2
273                // TODO: Add error output
274                SkASSERT(false);
275            }
276            // But it also means we have to load the resolve texture into the MSAA color attachment
277            loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
278            // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
279            // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
280            // action instead of loading as a draw.
281        }
282    }
283
284    // Set up stencil/depth attachment
285    auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
286    if (depthStencilTexture) {
287        // TODO: check Texture matches RenderPassDesc
288        id<MTLTexture> mtlTexture = ((const MtlTexture*)depthStencilTexture)->mtlTexture();
289        if (MtlFormatIsDepth(mtlTexture.pixelFormat)) {
290            auto depthAttachment = (*descriptor).depthAttachment;
291            depthAttachment.texture = mtlTexture;
292            depthAttachment.clearDepth = renderPassDesc.fClearDepth;
293            depthAttachment.loadAction =
294                     mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
295            depthAttachment.storeAction =
296                     mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
297        }
298        if (MtlFormatIsStencil(mtlTexture.pixelFormat)) {
299            auto stencilAttachment = (*descriptor).stencilAttachment;
300            stencilAttachment.texture = mtlTexture;
301            stencilAttachment.clearStencil = renderPassDesc.fClearStencil;
302            stencilAttachment.loadAction =
303                     mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
304            stencilAttachment.storeAction =
305                     mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
306        }
307    } else {
308        SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
309    }
310
311    fActiveRenderCommandEncoder = MtlRenderCommandEncoder::Make(fSharedContext,
312                                                                fCommandBuffer.get(),
313                                                                descriptor.get());
314    this->trackResource(fActiveRenderCommandEncoder);
315
316    if (loadMSAAFromResolve) {
317        // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
318        // so the actual load op for the MSAA attachment had better have been discard.
319        SkASSERT(colorInfo.fLoadOp == LoadOp::kDiscard);
320        auto loadPipeline = fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
321        if (!loadPipeline) {
322            SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
323            return false;
324        }
325        this->bindGraphicsPipeline(loadPipeline.get());
326        // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
327        // one texture that does not require a sampler.
328        fActiveRenderCommandEncoder->setFragmentTexture(
329                ((const MtlTexture*) resolveTexture)->mtlTexture(), 0);
330        this->draw(PrimitiveType::kTriangleStrip, 0, 4);
331    }
332
333    return true;
334}
335
336void MtlCommandBuffer::endRenderPass() {
337    SkASSERT(fActiveRenderCommandEncoder);
338    fActiveRenderCommandEncoder->endEncoding();
339    fActiveRenderCommandEncoder.reset();
340    fDrawIsOffscreen = false;
341}
342
343void MtlCommandBuffer::addDrawPass(const DrawPass* drawPass) {
344    SkIRect replayPassBounds = drawPass->bounds().makeOffset(fReplayTranslation.x(),
345                                                             fReplayTranslation.y());
346    if (!SkIRect::Intersects(replayPassBounds, SkIRect::MakeSize(fColorAttachmentSize))) {
347        // The entire DrawPass is offscreen given the replay translation so skip adding any
348        // commands. When the DrawPass is partially offscreen individual draw commands will be
349        // culled while preserving state changing commands.
350        return;
351    }
352
353    drawPass->addResourceRefs(this);
354
355    for (auto[type, cmdPtr] : drawPass->commands()) {
356        // Skip draw commands if they'd be offscreen.
357        if (fDrawIsOffscreen) {
358            switch (type) {
359                case DrawPassCommands::Type::kDraw:
360                case DrawPassCommands::Type::kDrawIndexed:
361                case DrawPassCommands::Type::kDrawInstanced:
362                case DrawPassCommands::Type::kDrawIndexedInstanced:
363                    continue;
364                default:
365                    break;
366            }
367        }
368
369        switch (type) {
370            case DrawPassCommands::Type::kBindGraphicsPipeline: {
371                auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
372                this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
373                break;
374            }
375            case DrawPassCommands::Type::kSetBlendConstants: {
376                auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
377                this->setBlendConstants(sbc->fBlendConstants);
378                break;
379            }
380            case DrawPassCommands::Type::kBindUniformBuffer: {
381                auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
382                this->bindUniformBuffer(bub->fInfo, bub->fSlot);
383                break;
384            }
385            case DrawPassCommands::Type::kBindDrawBuffers: {
386                auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
387                this->bindDrawBuffers(
388                        bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
389                break;
390            }
391            case DrawPassCommands::Type::kBindTexturesAndSamplers: {
392                auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
393                for (int j = 0; j < bts->fNumTexSamplers; ++j) {
394                    this->bindTextureAndSampler(drawPass->getTexture(bts->fTextureIndices[j]),
395                                                drawPass->getSampler(bts->fSamplerIndices[j]),
396                                                j);
397                }
398                break;
399            }
400            case DrawPassCommands::Type::kSetScissor: {
401                auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
402                this->setScissor(ss->fScissor);
403                break;
404            }
405            case DrawPassCommands::Type::kDraw: {
406                auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
407                this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
408                break;
409            }
410            case DrawPassCommands::Type::kDrawIndexed: {
411                auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
412                this->drawIndexed(draw->fType,
413                                  draw->fBaseIndex,
414                                  draw->fIndexCount,
415                                  draw->fBaseVertex);
416                break;
417            }
418            case DrawPassCommands::Type::kDrawInstanced: {
419                auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
420                this->drawInstanced(draw->fType,
421                                    draw->fBaseVertex,
422                                    draw->fVertexCount,
423                                    draw->fBaseInstance,
424                                    draw->fInstanceCount);
425                break;
426            }
427            case DrawPassCommands::Type::kDrawIndexedInstanced: {
428                auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
429                this->drawIndexedInstanced(draw->fType,
430                                           draw->fBaseIndex,
431                                           draw->fIndexCount,
432                                           draw->fBaseVertex,
433                                           draw->fBaseInstance,
434                                           draw->fInstanceCount);
435                break;
436            }
437            case DrawPassCommands::Type::kDrawIndirect: {
438                auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
439                this->drawIndirect(draw->fType);
440                break;
441            }
442            case DrawPassCommands::Type::kDrawIndexedIndirect: {
443                auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
444                this->drawIndexedIndirect(draw->fType);
445                break;
446            }
447        }
448    }
449}
450
451MtlBlitCommandEncoder* MtlCommandBuffer::getBlitCommandEncoder() {
452    if (fActiveBlitCommandEncoder) {
453        return fActiveBlitCommandEncoder.get();
454    }
455
456    fActiveBlitCommandEncoder = MtlBlitCommandEncoder::Make(fSharedContext, fCommandBuffer.get());
457
458    if (!fActiveBlitCommandEncoder) {
459        return nullptr;
460    }
461
462    // We add the ref on the command buffer for the BlitCommandEncoder now so that we don't need
463    // to add a ref for every copy we do.
464    this->trackResource(fActiveBlitCommandEncoder);
465    return fActiveBlitCommandEncoder.get();
466}
467
468void MtlCommandBuffer::endBlitCommandEncoder() {
469    if (fActiveBlitCommandEncoder) {
470        fActiveBlitCommandEncoder->endEncoding();
471        fActiveBlitCommandEncoder.reset();
472    }
473}
474
475void MtlCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
476    SkASSERT(fActiveRenderCommandEncoder);
477
478    auto mtlPipeline = static_cast<const MtlGraphicsPipeline*>(graphicsPipeline);
479    auto pipelineState = mtlPipeline->mtlPipelineState();
480    fActiveRenderCommandEncoder->setRenderPipelineState(pipelineState);
481    auto depthStencilState = mtlPipeline->mtlDepthStencilState();
482    fActiveRenderCommandEncoder->setDepthStencilState(depthStencilState);
483    uint32_t stencilRefValue = mtlPipeline->stencilReferenceValue();
484    fActiveRenderCommandEncoder->setStencilReferenceValue(stencilRefValue);
485
486    if (graphicsPipeline->dstReadRequirement() == DstReadRequirement::kTextureCopy) {
487        // The last texture binding is reserved for the dstCopy texture, which is not included in
488        // the list on each BindTexturesAndSamplers command. We can set it once now and any
489        // subsequent BindTexturesAndSamplers commands in a DrawPass will set the other N-1.
490        SkASSERT(fDstCopy.first && fDstCopy.second);
491        const int textureIndex = graphicsPipeline->numFragTexturesAndSamplers() - 1;
492        this->bindTextureAndSampler(fDstCopy.first, fDstCopy.second, textureIndex);
493    }
494}
495
496void MtlCommandBuffer::bindUniformBuffer(const BindBufferInfo& info, UniformSlot slot) {
497    SkASSERT(fActiveRenderCommandEncoder);
498
499    id<MTLBuffer> mtlBuffer = info.fBuffer ?
500            static_cast<const MtlBuffer*>(info.fBuffer)->mtlBuffer() : nullptr;
501
502    unsigned int bufferIndex;
503    switch(slot) {
504        case UniformSlot::kRenderStep:
505            bufferIndex = MtlGraphicsPipeline::kRenderStepUniformBufferIndex;
506            break;
507        case UniformSlot::kPaint:
508            bufferIndex = MtlGraphicsPipeline::kPaintUniformBufferIndex;
509            break;
510        case UniformSlot::kGradient:
511            bufferIndex = MtlGraphicsPipeline::kGradientBufferIndex;
512            break;
513    }
514
515    fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, info.fOffset, bufferIndex);
516    fActiveRenderCommandEncoder->setFragmentBuffer(mtlBuffer, info.fOffset, bufferIndex);
517}
518
519void MtlCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
520                                       const BindBufferInfo& instances,
521                                       const BindBufferInfo& indices,
522                                       const BindBufferInfo& indirect) {
523    this->bindVertexBuffers(vertices.fBuffer,
524                            vertices.fOffset,
525                            instances.fBuffer,
526                            instances.fOffset);
527    this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
528    this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
529}
530
531void MtlCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
532                                         size_t vertexOffset,
533                                         const Buffer* instanceBuffer,
534                                         size_t instanceOffset) {
535    SkASSERT(fActiveRenderCommandEncoder);
536
537    if (vertexBuffer) {
538        id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(vertexBuffer)->mtlBuffer();
539        // Metal requires buffer offsets to be aligned to the data type, which is at most 4 bytes
540        // since we use [[attribute]] to automatically unpack float components into SIMD arrays.
541        SkASSERT((vertexOffset & 0b11) == 0);
542        fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, vertexOffset,
543                                                     MtlGraphicsPipeline::kVertexBufferIndex);
544    }
545    if (instanceBuffer) {
546        id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(instanceBuffer)->mtlBuffer();
547        SkASSERT((instanceOffset & 0b11) == 0);
548        fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, instanceOffset,
549                                                     MtlGraphicsPipeline::kInstanceBufferIndex);
550    }
551}
552
553void MtlCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
554    if (indexBuffer) {
555        fCurrentIndexBuffer = static_cast<const MtlBuffer*>(indexBuffer)->mtlBuffer();
556        fCurrentIndexBufferOffset = offset;
557    } else {
558        fCurrentIndexBuffer = nil;
559        fCurrentIndexBufferOffset = 0;
560    }
561}
562
563void MtlCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
564    if (indirectBuffer) {
565        fCurrentIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
566        fCurrentIndirectBufferOffset = offset;
567    } else {
568        fCurrentIndirectBuffer = nil;
569        fCurrentIndirectBufferOffset = 0;
570    }
571}
572
573void MtlCommandBuffer::bindTextureAndSampler(const Texture* texture,
574                                             const Sampler* sampler,
575                                             unsigned int bindIndex) {
576    SkASSERT(texture && sampler);
577    SkASSERT(fActiveRenderCommandEncoder);
578
579    id<MTLTexture> mtlTexture = ((const MtlTexture*)texture)->mtlTexture();
580    id<MTLSamplerState> mtlSamplerState = ((const MtlSampler*)sampler)->mtlSamplerState();
581    fActiveRenderCommandEncoder->setFragmentTexture(mtlTexture, bindIndex);
582    fActiveRenderCommandEncoder->setFragmentSamplerState(mtlSamplerState, bindIndex);
583}
584
585void MtlCommandBuffer::setScissor(const Scissor& scissor) {
586    SkASSERT(fActiveRenderCommandEncoder);
587
588    SkIRect rect = scissor.getRect(fReplayTranslation, fReplayClip);
589    fDrawIsOffscreen = rect.isEmpty();
590
591    fActiveRenderCommandEncoder->setScissorRect({
592            static_cast<unsigned int>(rect.x()),
593            static_cast<unsigned int>(rect.y()),
594            static_cast<unsigned int>(rect.width()),
595            static_cast<unsigned int>(rect.height()),
596    });
597}
598
599void MtlCommandBuffer::setViewport(float x, float y, float width, float height,
600                                   float minDepth, float maxDepth) {
601    SkASSERT(fActiveRenderCommandEncoder);
602    MTLViewport viewport = {x,
603                            y,
604                            width,
605                            height,
606                            minDepth,
607                            maxDepth};
608    fActiveRenderCommandEncoder->setViewport(viewport);
609}
610
611void MtlCommandBuffer::updateIntrinsicUniforms(SkIRect viewport) {
612    UniformManager intrinsicValues{Layout::kMetal};
613    CollectIntrinsicUniforms(fSharedContext->caps(), viewport, fDstCopyBounds, &intrinsicValues);
614    SkSpan<const char> bytes = intrinsicValues.finish();
615    fActiveRenderCommandEncoder->setVertexBytes(
616            bytes.data(), bytes.size_bytes(), MtlGraphicsPipeline::kIntrinsicUniformBufferIndex);
617    fActiveRenderCommandEncoder->setFragmentBytes(
618            bytes.data(), bytes.size_bytes(), MtlGraphicsPipeline::kIntrinsicUniformBufferIndex);
619}
620
621void MtlCommandBuffer::setBlendConstants(float* blendConstants) {
622    SkASSERT(fActiveRenderCommandEncoder);
623
624    fActiveRenderCommandEncoder->setBlendColor(blendConstants);
625}
626
627static MTLPrimitiveType graphite_to_mtl_primitive(PrimitiveType primitiveType) {
628    const static MTLPrimitiveType mtlPrimitiveType[] {
629        MTLPrimitiveTypeTriangle,
630        MTLPrimitiveTypeTriangleStrip,
631        MTLPrimitiveTypePoint,
632    };
633    static_assert((int)PrimitiveType::kTriangles == 0);
634    static_assert((int)PrimitiveType::kTriangleStrip == 1);
635    static_assert((int)PrimitiveType::kPoints == 2);
636
637    SkASSERT(primitiveType <= PrimitiveType::kPoints);
638    return mtlPrimitiveType[static_cast<int>(primitiveType)];
639}
640
641void MtlCommandBuffer::draw(PrimitiveType type,
642                            unsigned int baseVertex,
643                            unsigned int vertexCount) {
644    SkASSERT(fActiveRenderCommandEncoder);
645
646    auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
647
648    fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount);
649}
650
651void MtlCommandBuffer::drawIndexed(PrimitiveType type, unsigned int baseIndex,
652                                   unsigned int indexCount, unsigned int baseVertex) {
653    SkASSERT(fActiveRenderCommandEncoder);
654
655    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
656        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
657        size_t indexOffset =  fCurrentIndexBufferOffset + sizeof(uint16_t )* baseIndex;
658        // Use the "instance" variant witha count of 1 so that we can pass in a base vertex
659        // instead of rebinding a vertex buffer offset.
660        fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
661                                                           MTLIndexTypeUInt16, fCurrentIndexBuffer,
662                                                           indexOffset, 1, baseVertex, 0);
663
664    } else {
665        SKGPU_LOG_E("Skipping unsupported draw call.");
666    }
667}
668
669void MtlCommandBuffer::drawInstanced(PrimitiveType type, unsigned int baseVertex,
670                                     unsigned int vertexCount, unsigned int baseInstance,
671                                     unsigned int instanceCount) {
672    SkASSERT(fActiveRenderCommandEncoder);
673
674    auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
675
676    // This ordering is correct
677    fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount,
678                                                instanceCount, baseInstance);
679}
680
681void MtlCommandBuffer::drawIndexedInstanced(PrimitiveType type,
682                                            unsigned int baseIndex,
683                                            unsigned int indexCount,
684                                            unsigned int baseVertex,
685                                            unsigned int baseInstance,
686                                            unsigned int instanceCount) {
687    SkASSERT(fActiveRenderCommandEncoder);
688
689    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
690        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
691        size_t indexOffset =  fCurrentIndexBufferOffset + sizeof(uint16_t) * baseIndex;
692        fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
693                                                           MTLIndexTypeUInt16, fCurrentIndexBuffer,
694                                                           indexOffset, instanceCount,
695                                                           baseVertex, baseInstance);
696    } else {
697        SKGPU_LOG_E("Skipping unsupported draw call.");
698    }
699}
700
701void MtlCommandBuffer::drawIndirect(PrimitiveType type) {
702    SkASSERT(fActiveRenderCommandEncoder);
703    SkASSERT(fCurrentIndirectBuffer);
704
705    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
706        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
707        fActiveRenderCommandEncoder->drawPrimitives(
708                mtlPrimitiveType, fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
709    } else {
710        SKGPU_LOG_E("Skipping unsupported draw call.");
711    }
712}
713
714void MtlCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
715    SkASSERT(fActiveRenderCommandEncoder);
716    SkASSERT(fCurrentIndirectBuffer);
717
718    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
719        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
720        fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType,
721                                                           MTLIndexTypeUInt32,
722                                                           fCurrentIndexBuffer,
723                                                           fCurrentIndexBufferOffset,
724                                                           fCurrentIndirectBuffer,
725                                                           fCurrentIndirectBufferOffset);
726    } else {
727        SKGPU_LOG_E("Skipping unsupported draw call.");
728    }
729}
730
731void MtlCommandBuffer::beginComputePass() {
732    SkASSERT(!fActiveRenderCommandEncoder);
733    SkASSERT(!fActiveComputeCommandEncoder);
734    this->endBlitCommandEncoder();
735    fActiveComputeCommandEncoder = MtlComputeCommandEncoder::Make(fSharedContext,
736                                                                  fCommandBuffer.get());
737}
738
739void MtlCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
740    SkASSERT(fActiveComputeCommandEncoder);
741
742    auto mtlPipeline = static_cast<const MtlComputePipeline*>(computePipeline);
743    fActiveComputeCommandEncoder->setComputePipelineState(mtlPipeline->mtlPipelineState());
744}
745
746void MtlCommandBuffer::bindBuffer(const Buffer* buffer, unsigned int offset, unsigned int index) {
747    SkASSERT(fActiveComputeCommandEncoder);
748
749    id<MTLBuffer> mtlBuffer = buffer ? static_cast<const MtlBuffer*>(buffer)->mtlBuffer() : nil;
750    fActiveComputeCommandEncoder->setBuffer(mtlBuffer, offset, index);
751}
752
753void MtlCommandBuffer::bindTexture(const Texture* texture, unsigned int index) {
754    SkASSERT(fActiveComputeCommandEncoder);
755
756    id<MTLTexture> mtlTexture =
757            texture ? static_cast<const MtlTexture*>(texture)->mtlTexture() : nil;
758    fActiveComputeCommandEncoder->setTexture(mtlTexture, index);
759}
760
761void MtlCommandBuffer::bindSampler(const Sampler* sampler, unsigned int index) {
762    SkASSERT(fActiveComputeCommandEncoder);
763
764    id<MTLSamplerState> mtlSamplerState =
765            sampler ? static_cast<const MtlSampler*>(sampler)->mtlSamplerState() : nil;
766    fActiveComputeCommandEncoder->setSamplerState(mtlSamplerState, index);
767}
768
769void MtlCommandBuffer::dispatchThreadgroups(const WorkgroupSize& globalSize,
770                                            const WorkgroupSize& localSize) {
771    SkASSERT(fActiveComputeCommandEncoder);
772    fActiveComputeCommandEncoder->dispatchThreadgroups(globalSize, localSize);
773}
774
775void MtlCommandBuffer::dispatchThreadgroupsIndirect(const WorkgroupSize& localSize,
776                                                    const Buffer* indirectBuffer,
777                                                    size_t indirectBufferOffset) {
778    SkASSERT(fActiveComputeCommandEncoder);
779
780    id<MTLBuffer> mtlIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
781    fActiveComputeCommandEncoder->dispatchThreadgroupsWithIndirectBuffer(
782            mtlIndirectBuffer, indirectBufferOffset, localSize);
783}
784
785void MtlCommandBuffer::endComputePass() {
786    SkASSERT(fActiveComputeCommandEncoder);
787    fActiveComputeCommandEncoder->endEncoding();
788    fActiveComputeCommandEncoder.reset();
789}
790
791static bool check_max_blit_width(int widthInPixels) {
792    if (widthInPixels > 32767) {
793        SkASSERT(false); // surfaces should not be this wide anyway
794        return false;
795    }
796    return true;
797}
798
799bool MtlCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
800                                            size_t srcOffset,
801                                            const Buffer* dstBuffer,
802                                            size_t dstOffset,
803                                            size_t size) {
804    SkASSERT(!fActiveRenderCommandEncoder);
805    SkASSERT(!fActiveComputeCommandEncoder);
806
807    id<MTLBuffer> mtlSrcBuffer = static_cast<const MtlBuffer*>(srcBuffer)->mtlBuffer();
808    id<MTLBuffer> mtlDstBuffer = static_cast<const MtlBuffer*>(dstBuffer)->mtlBuffer();
809
810    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
811    if (!blitCmdEncoder) {
812        return false;
813    }
814
815#ifdef SK_ENABLE_MTL_DEBUG_INFO
816    blitCmdEncoder->pushDebugGroup(@"copyBufferToBuffer");
817#endif
818    blitCmdEncoder->copyBufferToBuffer(mtlSrcBuffer, srcOffset, mtlDstBuffer, dstOffset, size);
819#ifdef SK_ENABLE_MTL_DEBUG_INFO
820    blitCmdEncoder->popDebugGroup();
821#endif
822    return true;
823}
824
825bool MtlCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
826                                             SkIRect srcRect,
827                                             const Buffer* buffer,
828                                             size_t bufferOffset,
829                                             size_t bufferRowBytes) {
830    SkASSERT(!fActiveRenderCommandEncoder);
831    SkASSERT(!fActiveComputeCommandEncoder);
832
833    if (!check_max_blit_width(srcRect.width())) {
834        return false;
835    }
836
837    id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
838    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
839
840    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
841    if (!blitCmdEncoder) {
842        return false;
843    }
844
845#ifdef SK_ENABLE_MTL_DEBUG_INFO
846    blitCmdEncoder->pushDebugGroup(@"copyTextureToBuffer");
847#endif
848    blitCmdEncoder->copyFromTexture(mtlTexture, srcRect, mtlBuffer, bufferOffset, bufferRowBytes);
849#ifdef SK_ENABLE_MTL_DEBUG_INFO
850    blitCmdEncoder->popDebugGroup();
851#endif
852    return true;
853}
854
855bool MtlCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
856                                             const Texture* texture,
857                                             const BufferTextureCopyData* copyData,
858                                             int count) {
859    SkASSERT(!fActiveRenderCommandEncoder);
860    SkASSERT(!fActiveComputeCommandEncoder);
861
862    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
863    id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
864
865    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
866    if (!blitCmdEncoder) {
867        return false;
868    }
869
870#ifdef SK_ENABLE_MTL_DEBUG_INFO
871    blitCmdEncoder->pushDebugGroup(@"copyBufferToTexture");
872#endif
873    for (int i = 0; i < count; ++i) {
874        if (!check_max_blit_width(copyData[i].fRect.width())) {
875            return false;
876        }
877
878        blitCmdEncoder->copyFromBuffer(mtlBuffer,
879                                       copyData[i].fBufferOffset,
880                                       copyData[i].fBufferRowBytes,
881                                       mtlTexture,
882                                       copyData[i].fRect,
883                                       copyData[i].fMipLevel);
884    }
885
886#ifdef SK_ENABLE_MTL_DEBUG_INFO
887    blitCmdEncoder->popDebugGroup();
888#endif
889    return true;
890}
891
892bool MtlCommandBuffer::onCopyTextureToTexture(const Texture* src,
893                                              SkIRect srcRect,
894                                              const Texture* dst,
895                                              SkIPoint dstPoint,
896                                              int mipLevel) {
897    SkASSERT(!fActiveRenderCommandEncoder);
898    SkASSERT(!fActiveComputeCommandEncoder);
899
900    id<MTLTexture> srcMtlTexture = static_cast<const MtlTexture*>(src)->mtlTexture();
901    id<MTLTexture> dstMtlTexture = static_cast<const MtlTexture*>(dst)->mtlTexture();
902
903    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
904    if (!blitCmdEncoder) {
905        return false;
906    }
907
908#ifdef SK_ENABLE_MTL_DEBUG_INFO
909    blitCmdEncoder->pushDebugGroup(@"copyTextureToTexture");
910#endif
911
912    blitCmdEncoder->copyTextureToTexture(srcMtlTexture, srcRect, dstMtlTexture, dstPoint, mipLevel);
913
914#ifdef SK_ENABLE_MTL_DEBUG_INFO
915    blitCmdEncoder->popDebugGroup();
916#endif
917    return true;
918}
919
920bool MtlCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
921#ifdef SK_BUILD_FOR_MAC
922    SkASSERT(!fActiveRenderCommandEncoder);
923    SkASSERT(!fActiveComputeCommandEncoder);
924
925    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
926    if ([mtlBuffer storageMode] != MTLStorageModeManaged) {
927        *outDidResultInWork = false;
928        return true;
929    }
930
931    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
932    if (!blitCmdEncoder) {
933        return false;
934    }
935
936#ifdef SK_ENABLE_MTL_DEBUG_INFO
937    blitCmdEncoder->pushDebugGroup(@"synchronizeToCpu");
938#endif
939    blitCmdEncoder->synchronizeResource(mtlBuffer);
940#ifdef SK_ENABLE_MTL_DEBUG_INFO
941    blitCmdEncoder->popDebugGroup();
942#endif
943
944    *outDidResultInWork = true;
945    return true;
946#else   // SK_BUILD_FOR_MAC
947    // Explicit synchronization is never necessary on builds that are not macOS since we never use
948    // discrete GPUs with managed mode buffers outside of macOS.
949    *outDidResultInWork = false;
950    return true;
951#endif  // SK_BUILD_FOR_MAC
952}
953
954bool MtlCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
955    SkASSERT(!fActiveRenderCommandEncoder);
956    SkASSERT(!fActiveComputeCommandEncoder);
957
958    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
959    if (!blitCmdEncoder) {
960        return false;
961    }
962
963    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
964    blitCmdEncoder->fillBuffer(mtlBuffer, offset, size, 0);
965
966    return true;
967}
968
969} // namespace skgpu::graphite
970