xref: /aosp_15_r20/external/skia/src/gpu/ganesh/mtl/GrMtlBuffer.mm (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "include/private/gpu/ganesh/GrTypesPriv.h"
9#include "src/gpu/ganesh/GrGpuResourcePriv.h"
10#include "src/gpu/ganesh/GrStagingBufferManager.h"
11#include "src/gpu/ganesh/mtl/GrMtlBuffer.h"
12#include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h"
13#include "src/gpu/ganesh/mtl/GrMtlGpu.h"
14
15#if !__has_feature(objc_arc)
16#error This file must be compiled with Arc. Use -fobjc-arc flag
17#endif
18
19#ifdef SK_DEBUG
20#define VALIDATE() this->validate()
21#else
22#define VALIDATE() do {} while(false)
23#endif
24
25GR_NORETAIN_BEGIN
26
27#ifdef SK_ENABLE_MTL_DEBUG_INFO
28NSString* kBufferTypeNames[kGrGpuBufferTypeCount] = {
29    @"Vertex",
30    @"Index",
31    @"Indirect",
32    @"Xfer CPU to GPU",
33    @"Xfer GPU to CPU",
34    @"Uniform",
35};
36#endif
37
38sk_sp<GrMtlBuffer> GrMtlBuffer::Make(GrMtlGpu* gpu,
39                                     size_t size,
40                                     GrGpuBufferType intendedType,
41                                     GrAccessPattern accessPattern) {
42    return sk_sp<GrMtlBuffer>(new GrMtlBuffer(gpu,
43                                              size,
44                                              intendedType,
45                                              accessPattern,
46                                              /*label=*/"MakeMtlBuffer"));
47}
48
49GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedType,
50                         GrAccessPattern accessPattern, std::string_view label)
51        : INHERITED(gpu, size, intendedType, accessPattern, label)
52        , fIsDynamic(accessPattern != kStatic_GrAccessPattern) {
53    NSUInteger options = 0;
54    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
55        if (fIsDynamic) {
56#ifdef SK_BUILD_FOR_MAC
57            if (gpu->mtlCaps().isMac()) {
58                options |= MTLResourceStorageModeManaged;
59            } else {
60                options |= MTLResourceStorageModeShared;
61            }
62#else
63            options |= MTLResourceStorageModeShared;
64#endif
65        } else {
66            options |= MTLResourceStorageModePrivate;
67        }
68    }
69
70    size = SkAlignTo(size, gpu->mtlCaps().getMinBufferAlignment());
71    fMtlBuffer = size == 0 ? nil :
72            [gpu->device() newBufferWithLength: size
73                                       options: options];
74#ifdef SK_ENABLE_MTL_DEBUG_INFO
75    fMtlBuffer.label = kBufferTypeNames[(int)intendedType];
76#endif
77    this->registerWithCache(skgpu::Budgeted::kYes);
78    VALIDATE();
79}
80
81GrMtlBuffer::~GrMtlBuffer() {
82    SkASSERT(!fMtlBuffer);
83    SkASSERT(!fMapPtr);
84}
85
86bool GrMtlBuffer::onUpdateData(const void *src, size_t offset, size_t size, bool preserve) {
87    if (fIsDynamic) {
88        this->internalMap();
89        if (!fMapPtr) {
90            return false;
91        }
92        memcpy(SkTAddOffset<void>(fMapPtr, offset), src, size);
93        this->internalUnmap(offset, size);
94        return true;
95    }
96    // Update via transfer buffer.
97
98    // We have to respect the transfer alignment. So we may transfer some extra bytes before and
99    // after the region to be updated.
100    size_t transferAlignment = this->getGpu()->caps()->transferFromBufferToBufferAlignment();
101    size_t r = offset%transferAlignment;
102    SkASSERT(!preserve || r == 0);  // We can't push extra bytes when preserving.
103
104    offset -= r;
105    size_t transferSize = SkAlignTo(size + r, transferAlignment);
106
107    GrStagingBufferManager::Slice slice;
108    slice = this->mtlGpu()->stagingBufferManager()->allocateStagingBufferSlice(
109            transferSize, this->mtlGpu()->mtlCaps().getMinBufferAlignment());
110    if (!slice.fBuffer) {
111        return false;
112    }
113    memcpy(SkTAddOffset<void>(slice.fOffsetMapPtr, r), src, size);
114
115    GrMtlCommandBuffer* cmdBuffer = this->mtlGpu()->commandBuffer();
116    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
117    if (!blitCmdEncoder) {
118        return false;
119    }
120    GrMtlBuffer* mtlBuffer = static_cast<GrMtlBuffer*>(slice.fBuffer);
121    id<MTLBuffer> transferBuffer = mtlBuffer->mtlBuffer();
122    [blitCmdEncoder copyFromBuffer: transferBuffer
123                      sourceOffset: slice.fOffset
124                          toBuffer: fMtlBuffer
125                 destinationOffset: offset
126                              size: transferSize];
127    return true;
128}
129
130inline GrMtlGpu* GrMtlBuffer::mtlGpu() const {
131    SkASSERT(!this->wasDestroyed());
132    return static_cast<GrMtlGpu*>(this->getGpu());
133}
134
135void GrMtlBuffer::onAbandon() {
136    fMtlBuffer = nil;
137    fMapPtr = nullptr;
138    VALIDATE();
139    INHERITED::onAbandon();
140}
141
142void GrMtlBuffer::onRelease() {
143    if (!this->wasDestroyed()) {
144        VALIDATE();
145        fMtlBuffer = nil;
146        fMapPtr = nullptr;
147        VALIDATE();
148    }
149    INHERITED::onRelease();
150}
151
152void GrMtlBuffer::internalMap() {
153    if (fIsDynamic) {
154        VALIDATE();
155        SkASSERT(!this->isMapped());
156        fMapPtr = static_cast<char*>(fMtlBuffer.contents);
157        VALIDATE();
158    }
159}
160
161void GrMtlBuffer::internalUnmap(size_t writtenOffset, size_t writtenSize) {
162    SkASSERT(fMtlBuffer);
163    if (fIsDynamic) {
164        VALIDATE();
165        SkASSERT(writtenOffset + writtenSize <= this->size());
166        SkASSERT(this->isMapped());
167#ifdef SK_BUILD_FOR_MAC
168        if (this->mtlGpu()->mtlCaps().isMac() && writtenSize) {
169            // We should never write to this type of buffer on the CPU.
170            SkASSERT(this->intendedType() != GrGpuBufferType::kXferGpuToCpu);
171            [fMtlBuffer didModifyRange: NSMakeRange(writtenOffset, writtenSize)];
172        }
173#endif
174        fMapPtr = nullptr;
175    }
176}
177
178void GrMtlBuffer::onMap(MapType) {
179    this->internalMap();
180}
181
182void GrMtlBuffer::onUnmap(MapType type) {
183    this->internalUnmap(0, type == MapType::kWriteDiscard ? this-> size() : 0);
184}
185
186bool GrMtlBuffer::onClearToZero() {
187    SkASSERT(fMtlBuffer);
188    GrMtlCommandBuffer* cmdBuffer = this->mtlGpu()->commandBuffer();
189    id<MTLBlitCommandEncoder> GR_NORETAIN blitCmdEncoder = cmdBuffer->getBlitCommandEncoder();
190    if (!blitCmdEncoder) {
191        return false;
192    }
193
194    NSRange range{0, this->size()};
195    [blitCmdEncoder fillBuffer: fMtlBuffer range: range value: 0];
196
197    cmdBuffer->addGrBuffer(sk_ref_sp(this));
198
199    return true;
200}
201
202#ifdef SK_DEBUG
203void GrMtlBuffer::validate() const {
204    SkASSERT(fMtlBuffer == nil ||
205             this->intendedType() == GrGpuBufferType::kVertex ||
206             this->intendedType() == GrGpuBufferType::kIndex ||
207             this->intendedType() == GrGpuBufferType::kXferCpuToGpu ||
208             this->intendedType() == GrGpuBufferType::kXferGpuToCpu ||
209             this->intendedType() == GrGpuBufferType::kDrawIndirect ||
210             this->intendedType() == GrGpuBufferType::kUniform);
211    SkASSERT((fMapPtr && fMtlBuffer) || !fMapPtr);
212}
213#endif
214
215void GrMtlBuffer::onSetLabel() {
216    SkASSERT(fMtlBuffer);
217    if (!this->getLabel().empty()) {
218        NSString* labelStr = @(this->getLabel().c_str());
219        fMtlBuffer.label = [@"_Skia_" stringByAppendingString:labelStr];
220    }
221}
222
223GR_NORETAIN_END
224