1/* 2 * Copyright 2018 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "src/gpu/ganesh/mtl/GrMtlOpsRenderPass.h" 9 10#include "src/gpu/ganesh/GrBackendUtils.h" 11#include "src/gpu/ganesh/GrColor.h" 12#include "src/gpu/ganesh/GrNativeRect.h" 13#include "src/gpu/ganesh/GrRenderTarget.h" 14#include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h" 15#include "src/gpu/ganesh/mtl/GrMtlPipelineState.h" 16#include "src/gpu/ganesh/mtl/GrMtlPipelineStateBuilder.h" 17#include "src/gpu/ganesh/mtl/GrMtlRenderCommandEncoder.h" 18#include "src/gpu/ganesh/mtl/GrMtlRenderTarget.h" 19#include "src/gpu/ganesh/mtl/GrMtlTexture.h" 20 21#if !__has_feature(objc_arc) 22#error This file must be compiled with Arc. Use -fobjc-arc flag 23#endif 24 25GR_NORETAIN_BEGIN 26 27GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt, 28 sk_sp<GrMtlFramebuffer> framebuffer, GrSurfaceOrigin origin, 29 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, 30 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) 31 : INHERITED(rt, origin) 32 , fGpu(gpu) 33 , fFramebuffer(std::move(framebuffer)) { 34 this->setupRenderPass(colorInfo, stencilInfo); 35} 36 37GrMtlOpsRenderPass::~GrMtlOpsRenderPass() { 38} 39 40void GrMtlOpsRenderPass::submit() { 41 if (!fFramebuffer) { 42 return; 43 } 44 SkIRect iBounds; 45 fBounds.roundOut(&iBounds); 46 fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds); 47 fActiveRenderCmdEncoder = nullptr; 48} 49 50static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) { 51 const static MTLPrimitiveType mtlPrimitiveType[] { 52 MTLPrimitiveTypeTriangle, 53 MTLPrimitiveTypeTriangleStrip, 54 MTLPrimitiveTypePoint, 55 MTLPrimitiveTypeLine, 56 MTLPrimitiveTypeLineStrip 57 }; 58 static_assert((int)GrPrimitiveType::kTriangles == 0); 59 static_assert((int)GrPrimitiveType::kTriangleStrip == 1); 60 static_assert((int)GrPrimitiveType::kPoints == 2); 61 static_assert((int)GrPrimitiveType::kLines == 3); 62 static_assert((int)GrPrimitiveType::kLineStrip == 4); 63 64 SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip); 65 return mtlPrimitiveType[static_cast<int>(primitiveType)]; 66} 67 68bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, 69 const SkRect& drawBounds) { 70 const GrMtlCaps& caps = fGpu->mtlCaps(); 71 GrProgramDesc programDesc = caps.makeDesc(fRenderTarget, programInfo, 72 GrCaps::ProgramDescOverrideFlags::kNone); 73 if (!programDesc.isValid()) { 74 return false; 75 } 76 77 fActivePipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState( 78 programDesc, programInfo); 79 if (!fActivePipelineState) { 80 return false; 81 } 82 83 fActivePipelineState->setData(fFramebuffer.get(), programInfo); 84 fCurrentVertexStride = programInfo.geomProc().vertexStride(); 85 86 if (!fActiveRenderCmdEncoder) { 87 this->setupRenderCommandEncoder(fActivePipelineState); 88 if (!fActiveRenderCmdEncoder) { 89 return false; 90 } 91 fGpu->commandBuffer()->addGrSurface( 92 sk_ref_sp<GrMtlAttachment>(fFramebuffer->colorAttachment())); 93 } 94 95 fActiveRenderCmdEncoder->setRenderPipelineState( 96 fActivePipelineState->pipeline()->mtlPipelineState()); 97#ifdef SK_ENABLE_MTL_DEBUG_INFO 98 if (!fDebugGroupActive) { 99 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 100 fDebugGroupActive = true; 101 } 102#endif 103 fActivePipelineState->setDrawState(fActiveRenderCmdEncoder, 104 programInfo.pipeline().writeSwizzle(), 105 programInfo.pipeline().getXferProcessor()); 106 if (this->gpu()->caps()->wireframeMode() || programInfo.pipeline().isWireframe()) { 107 fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeLines); 108 } else { 109 fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeFill); 110 } 111 112 if (!programInfo.pipeline().isScissorTestEnabled()) { 113 // "Disable" scissor by setting it to the full pipeline bounds. 114 SkISize dimensions = fFramebuffer->colorAttachment()->dimensions(); 115 GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, 116 dimensions, fOrigin, 117 SkIRect::MakeWH(dimensions.width(), 118 dimensions.height())); 119 } 120 121 fActivePrimitiveType = gr_to_mtl_primitive(programInfo.primitiveType()); 122 fBounds.join(drawBounds); 123 return true; 124} 125 126void GrMtlOpsRenderPass::onSetScissorRect(const SkIRect& scissor) { 127 SkASSERT(fActivePipelineState); 128 SkASSERT(fActiveRenderCmdEncoder); 129 GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, 130 fFramebuffer->colorAttachment()->dimensions(), 131 fOrigin, scissor); 132} 133 134bool GrMtlOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc, 135 const GrSurfaceProxy* const geomProcTextures[], 136 const GrPipeline& pipeline) { 137 SkASSERT(fActivePipelineState); 138 SkASSERT(fActiveRenderCmdEncoder); 139#ifdef SK_ENABLE_MTL_DEBUG_INFO 140 if (!fDebugGroupActive) { 141 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 142 fDebugGroupActive = true; 143 } 144#endif 145 fActivePipelineState->setTextures(geomProc, pipeline, geomProcTextures); 146 fActivePipelineState->bindTextures(fActiveRenderCmdEncoder); 147 return true; 148} 149 150void GrMtlOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) { 151 // Partial clears are not supported 152 SkASSERT(!scissor.enabled()); 153 154 // Ideally we should never end up here since all clears should either be done as draws or 155 // load ops in metal. However, if a client inserts a wait op we need to handle it. 156 auto colorAttachment = fRenderPassDesc.colorAttachments[0]; 157 colorAttachment.clearColor = MTLClearColorMake(color[0], color[1], color[2], color[3]); 158 colorAttachment.loadAction = MTLLoadActionClear; 159 if (!this->setupResolve()) { 160 this->setupRenderCommandEncoder(nullptr); 161 } 162} 163 164void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) { 165 // Partial clears are not supported 166 SkASSERT(!scissor.enabled()); 167 168 GrAttachment* sb = fFramebuffer->stencilAttachment(); 169 // this should only be called internally when we know we have a 170 // stencil buffer. 171 SkASSERT(sb); 172 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat()); 173 174 // The contract with the callers does not guarantee that we preserve all bits in the stencil 175 // during this clear. Thus we will clear the entire stencil to the desired value. 176 auto stencilAttachment = fRenderPassDesc.stencilAttachment; 177 if (insideStencilMask) { 178 stencilAttachment.clearStencil = (1 << (stencilBitCount - 1)); 179 } else { 180 stencilAttachment.clearStencil = 0; 181 } 182 183 stencilAttachment.loadAction = MTLLoadActionClear; 184 if (!this->setupResolve()) { 185 this->setupRenderCommandEncoder(nullptr); 186 } 187} 188 189void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) { 190 state->doUpload(upload); 191 192 // If the previous renderCommandEncoder did a resolve without an MSAA store 193 // (e.g., if the color attachment is memoryless) we need to copy the contents of 194 // the resolve attachment to the MSAA attachment at this point. 195 if (!this->setupResolve()) { 196 // If setting up for the resolve didn't create an encoder, it's probably reasonable to 197 // create a new encoder at this point, though maybe not necessary. 198 this->setupRenderCommandEncoder(nullptr); 199 } 200} 201 202void GrMtlOpsRenderPass::initRenderState(GrMtlRenderCommandEncoder* encoder) { 203 if (!encoder) { 204 return; 205 } 206#ifdef SK_ENABLE_MTL_DEBUG_INFO 207 encoder->pushDebugGroup(@"initRenderState"); 208#endif 209 encoder->setFrontFacingWinding(MTLWindingCounterClockwise); 210 SkISize colorAttachmentDimensions = fFramebuffer->colorAttachment()->dimensions(); 211 // Strictly speaking we shouldn't have to set this, as the default viewport is the size of 212 // the drawable used to generate the renderCommandEncoder -- but just in case. 213 MTLViewport viewport = { 0.0, 0.0, 214 (double) colorAttachmentDimensions.width(), 215 (double) colorAttachmentDimensions.height(), 216 0.0, 1.0 }; 217 encoder->setViewport(viewport); 218#ifdef SK_ENABLE_MTL_DEBUG_INFO 219 encoder->popDebugGroup(); 220#endif 221} 222 223void GrMtlOpsRenderPass::setupRenderPass( 224 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, 225 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) { 226 const static MTLLoadAction mtlLoadAction[] { 227 MTLLoadActionLoad, 228 MTLLoadActionClear, 229 MTLLoadActionDontCare 230 }; 231 static_assert((int)GrLoadOp::kLoad == 0); 232 static_assert((int)GrLoadOp::kClear == 1); 233 static_assert((int)GrLoadOp::kDiscard == 2); 234 SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard); 235 SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard); 236 237 const static MTLStoreAction mtlStoreAction[] { 238 MTLStoreActionStore, 239 MTLStoreActionDontCare 240 }; 241 static_assert((int)GrStoreOp::kStore == 0); 242 static_assert((int)GrStoreOp::kDiscard == 1); 243 SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard); 244 SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard); 245 246 fRenderPassDesc = [MTLRenderPassDescriptor new]; 247 auto colorAttachment = fRenderPassDesc.colorAttachments[0]; 248 auto color = fFramebuffer->colorAttachment(); 249 colorAttachment.texture = color->mtlTexture(); 250 const std::array<float, 4>& clearColor = colorInfo.fClearColor; 251 colorAttachment.clearColor = 252 MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]); 253 colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)]; 254 colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)]; 255 256 auto stencil = fFramebuffer->stencilAttachment(); 257 auto mtlStencil = fRenderPassDesc.stencilAttachment; 258 if (stencil) { 259 mtlStencil.texture = stencil->mtlTexture(); 260 } 261 mtlStencil.clearStencil = 0; 262 mtlStencil.loadAction = mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)]; 263 mtlStencil.storeAction = mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)]; 264 265 if (!this->setupResolve()) { 266 // Manage initial clears 267 if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear) { 268 fBounds = SkRect::MakeWH(color->dimensions().width(), 269 color->dimensions().height()); 270 this->setupRenderCommandEncoder(nullptr); 271 } else { 272 fBounds.setEmpty(); 273 // For now, we lazily create the renderCommandEncoder because we may have no draws, 274 // and an empty renderCommandEncoder can still produce output. This can cause issues 275 // when we clear a texture upon creation -- we'll subsequently discard the contents. 276 // This can be removed when that ordering is fixed. 277 } 278 } 279} 280 281bool GrMtlOpsRenderPass::setupResolve() { 282 fActiveRenderCmdEncoder = nullptr; 283 auto resolve = fFramebuffer->resolveAttachment(); 284 if (resolve) { 285 auto colorAttachment = fRenderPassDesc.colorAttachments[0]; 286 colorAttachment.resolveTexture = resolve->mtlTexture(); 287 // TODO: For framebufferOnly attachments we should do StoreAndMultisampleResolve if 288 // the storeAction is Store. But for the moment they don't take this path. 289 colorAttachment.storeAction = MTLStoreActionMultisampleResolve; 290 if (colorAttachment.loadAction == MTLLoadActionLoad) { 291 auto color = fFramebuffer->colorAttachment(); 292 auto dimensions = color->dimensions(); 293 // for now use the full bounds 294 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo( 295 fOrigin, dimensions.height(), SkIRect::MakeSize(dimensions)); 296 fActiveRenderCmdEncoder = 297 fGpu->loadMSAAFromResolve(color, resolve, nativeBounds, 298 fRenderPassDesc.stencilAttachment); 299 } 300 } 301 302 return (fActiveRenderCmdEncoder != nullptr); 303} 304 305void GrMtlOpsRenderPass::setupRenderCommandEncoder(GrMtlPipelineState* pipelineState) { 306 fActiveRenderCmdEncoder = 307 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, pipelineState, this); 308 // Any future RenderCommandEncoders we create for this OpsRenderPass should load, 309 // unless onClear or onClearStencilClip are explicitly called. 310 auto colorAttachment = fRenderPassDesc.colorAttachments[0]; 311 colorAttachment.loadAction = MTLLoadActionLoad; 312 auto stencilAttachment = fRenderPassDesc.stencilAttachment; 313 stencilAttachment.loadAction = MTLLoadActionLoad; 314} 315 316void GrMtlOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer, 317 sk_sp<const GrBuffer> instanceBuffer, 318 sk_sp<const GrBuffer> vertexBuffer, 319 GrPrimitiveRestart primRestart) { 320#ifdef SK_ENABLE_MTL_DEBUG_INFO 321 if (!fDebugGroupActive) { 322 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 323 fDebugGroupActive = true; 324 } 325#endif 326 SkASSERT(GrPrimitiveRestart::kNo == primRestart); 327 int inputBufferIndex = 0; 328 if (vertexBuffer) { 329 SkASSERT(!vertexBuffer->isCpuBuffer()); 330 SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer.get())->isMapped()); 331 fActiveVertexBuffer = std::move(vertexBuffer); 332 fGpu->commandBuffer()->addGrBuffer(fActiveVertexBuffer); 333 ++inputBufferIndex; 334 } 335 if (instanceBuffer) { 336 SkASSERT(!instanceBuffer->isCpuBuffer()); 337 SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer.get())->isMapped()); 338 this->setVertexBuffer(fActiveRenderCmdEncoder, instanceBuffer.get(), 0, inputBufferIndex++); 339 fActiveInstanceBuffer = std::move(instanceBuffer); 340 fGpu->commandBuffer()->addGrBuffer(fActiveInstanceBuffer); 341 } 342 if (indexBuffer) { 343 SkASSERT(!indexBuffer->isCpuBuffer()); 344 SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer.get())->isMapped()); 345 fActiveIndexBuffer = std::move(indexBuffer); 346 fGpu->commandBuffer()->addGrBuffer(fActiveIndexBuffer); 347 } 348} 349 350void GrMtlOpsRenderPass::onDraw(int vertexCount, int baseVertex) { 351 SkASSERT(fActivePipelineState); 352 SkASSERT(nil != fActiveRenderCmdEncoder); 353#ifdef SK_ENABLE_MTL_DEBUG_INFO 354 if (!fDebugGroupActive) { 355 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 356 fDebugGroupActive = true; 357 } 358#endif 359 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0); 360 361 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount); 362 fGpu->stats()->incNumDraws(); 363#ifdef SK_ENABLE_MTL_DEBUG_INFO 364 SkASSERT(fDebugGroupActive); 365 fActiveRenderCmdEncoder->popDebugGroup(); 366 fDebugGroupActive = false; 367#endif 368} 369 370void GrMtlOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, 371 uint16_t maxIndexValue, int baseVertex) { 372 SkASSERT(fActivePipelineState); 373 SkASSERT(nil != fActiveRenderCmdEncoder); 374 SkASSERT(fActiveIndexBuffer); 375#ifdef SK_ENABLE_MTL_DEBUG_INFO 376 if (!fDebugGroupActive) { 377 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 378 fDebugGroupActive = true; 379 } 380#endif 381 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 382 fCurrentVertexStride * baseVertex, 0); 383 384 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get()); 385 size_t indexOffset = sizeof(uint16_t) * baseIndex; 386 id<MTLBuffer> indexBuffer = mtlIndexBuffer->mtlBuffer(); 387 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount, 388 MTLIndexTypeUInt16, indexBuffer, indexOffset); 389 fGpu->stats()->incNumDraws(); 390#ifdef SK_ENABLE_MTL_DEBUG_INFO 391 SkASSERT(fDebugGroupActive); 392 fActiveRenderCmdEncoder->popDebugGroup(); 393 fDebugGroupActive = false; 394#endif 395} 396 397void GrMtlOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, int vertexCount, 398 int baseVertex) { 399 SkASSERT(fActivePipelineState); 400 SkASSERT(nil != fActiveRenderCmdEncoder); 401#ifdef SK_ENABLE_MTL_DEBUG_INFO 402 if (!fDebugGroupActive) { 403 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 404 fDebugGroupActive = true; 405 } 406#endif 407 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0); 408 409 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) { 410 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount, 411 instanceCount, baseInstance); 412 } else { 413 SkASSERT(false); 414 } 415 fGpu->stats()->incNumDraws(); 416#ifdef SK_ENABLE_MTL_DEBUG_INFO 417 SkASSERT(fDebugGroupActive); 418 fActiveRenderCmdEncoder->popDebugGroup(); 419 fDebugGroupActive = false; 420#endif 421} 422 423void GrMtlOpsRenderPass::onDrawIndexedInstanced( 424 int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex) { 425 SkASSERT(fActivePipelineState); 426 SkASSERT(nil != fActiveRenderCmdEncoder); 427 SkASSERT(fActiveIndexBuffer); 428#ifdef SK_ENABLE_MTL_DEBUG_INFO 429 if (!fDebugGroupActive) { 430 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 431 fDebugGroupActive = true; 432 } 433#endif 434 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0); 435 436 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get()); 437 size_t indexOffset = sizeof(uint16_t) * baseIndex; 438 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) { 439 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount, 440 MTLIndexTypeUInt16, 441 mtlIndexBuffer->mtlBuffer(), indexOffset, 442 instanceCount, baseVertex, baseInstance); 443 } else { 444 SkASSERT(false); 445 } 446 fGpu->stats()->incNumDraws(); 447#ifdef SK_ENABLE_MTL_DEBUG_INFO 448 SkASSERT(fDebugGroupActive); 449 fActiveRenderCmdEncoder->popDebugGroup(); 450 fDebugGroupActive = false; 451#endif 452} 453 454void GrMtlOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, 455 size_t bufferOffset, 456 int drawCount) { 457 SkASSERT(fGpu->caps()->nativeDrawIndirectSupport()); 458 SkASSERT(fActivePipelineState); 459 SkASSERT(nil != fActiveRenderCmdEncoder); 460#ifdef SK_ENABLE_MTL_DEBUG_INFO 461 if (!fDebugGroupActive) { 462 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 463 fDebugGroupActive = true; 464 } 465#endif 466 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0); 467 468 auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer); 469 const size_t stride = sizeof(GrDrawIndirectCommand); 470 while (drawCount >= 1) { 471 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) { 472 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, 473 mtlIndirectBuffer->mtlBuffer(), bufferOffset); 474 } else { 475 SkASSERT(false); 476 } 477 drawCount--; 478 bufferOffset += stride; 479 fGpu->stats()->incNumDraws(); 480 } 481#ifdef SK_ENABLE_MTL_DEBUG_INFO 482 SkASSERT(fDebugGroupActive); 483 fActiveRenderCmdEncoder->popDebugGroup(); 484 fDebugGroupActive = false; 485#endif 486} 487 488void GrMtlOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, 489 size_t bufferOffset, 490 int drawCount) { 491 SkASSERT(fGpu->caps()->nativeDrawIndirectSupport()); 492 SkASSERT(fActivePipelineState); 493 SkASSERT(nil != fActiveRenderCmdEncoder); 494 SkASSERT(fActiveIndexBuffer); 495#ifdef SK_ENABLE_MTL_DEBUG_INFO 496 if (!fDebugGroupActive) { 497 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw"); 498 fDebugGroupActive = true; 499 } 500#endif 501 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0); 502 503 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get()); 504 auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer); 505 size_t indexOffset = 0; 506 507 const size_t stride = sizeof(GrDrawIndexedIndirectCommand); 508 while (drawCount >= 1) { 509 if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) { 510 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, 511 MTLIndexTypeUInt16, 512 mtlIndexBuffer->mtlBuffer(), 513 indexOffset, 514 mtlIndirectBuffer->mtlBuffer(), 515 bufferOffset); 516 } else { 517 SkASSERT(false); 518 } 519 drawCount--; 520 bufferOffset += stride; 521 fGpu->stats()->incNumDraws(); 522 } 523#ifdef SK_ENABLE_MTL_DEBUG_INFO 524 SkASSERT(fDebugGroupActive); 525 fActiveRenderCmdEncoder->popDebugGroup(); 526 fDebugGroupActive = false; 527#endif 528} 529 530void GrMtlOpsRenderPass::setVertexBuffer(GrMtlRenderCommandEncoder* encoder, 531 const GrBuffer* buffer, 532 size_t vertexOffset, 533 size_t inputBufferIndex) { 534 if (!buffer) { 535 return; 536 } 537 538 // point after the uniforms 539 constexpr static int kFirstBufferBindingIdx = GrMtlUniformHandler::kLastUniformBinding + 1; 540 int index = inputBufferIndex + kFirstBufferBindingIdx; 541 SkASSERT(index < 4); 542 auto mtlBuffer = static_cast<const GrMtlBuffer*>(buffer); 543 id<MTLBuffer> mtlVertexBuffer = mtlBuffer->mtlBuffer(); 544 SkASSERT(mtlVertexBuffer); 545 size_t offset = vertexOffset; 546 encoder->setVertexBuffer(mtlVertexBuffer, offset, index); 547} 548 549GR_NORETAIN_END 550