1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/task/ComputeTask.h"
9
10 #include "src/gpu/graphite/Buffer.h"
11 #include "src/gpu/graphite/CommandBuffer.h"
12 #include "src/gpu/graphite/ResourceProvider.h"
13 #include "src/gpu/graphite/compute/DispatchGroup.h"
14
15 namespace skgpu::graphite {
16
Make(DispatchGroupList dispatchGroups)17 sk_sp<ComputeTask> ComputeTask::Make(DispatchGroupList dispatchGroups) {
18 return sk_sp<ComputeTask>(new ComputeTask(std::move(dispatchGroups)));
19 }
20
ComputeTask(DispatchGroupList dispatchGroups)21 ComputeTask::ComputeTask(DispatchGroupList dispatchGroups)
22 : fDispatchGroups(std::move(dispatchGroups)), fChildTasks(fDispatchGroups.size()) {
23 for (auto& group : fDispatchGroups) {
24 fChildTasks.push_back(group->snapChildTask());
25 }
26 }
27
28 ComputeTask::~ComputeTask() = default;
29
prepareResources(ResourceProvider * provider,ScratchResourceManager * scratchManager,const RuntimeEffectDictionary * rtd)30 Task::Status ComputeTask::prepareResources(ResourceProvider* provider,
31 ScratchResourceManager* scratchManager,
32 const RuntimeEffectDictionary* rtd) {
33 for (auto& child : fChildTasks) {
34 if (child) {
35 Status status = child->prepareResources(provider, scratchManager, rtd);
36 if (status == Status::kFail) {
37 return Status::kFail;
38 } else if (status == Status::kDiscard) {
39 child.reset();
40 }
41 }
42 }
43 for (const auto& group : fDispatchGroups) {
44 // TODO: Allow ComputeTasks to instantiate with scratch textures and return them.
45 if (!group->prepareResources(provider)) {
46 return Status::kFail;
47 }
48 }
49 return Status::kSuccess;
50 }
51
addCommands(Context * ctx,CommandBuffer * commandBuffer,ReplayTargetData rtd)52 Task::Status ComputeTask::addCommands(Context* ctx,
53 CommandBuffer* commandBuffer,
54 ReplayTargetData rtd) {
55 if (fDispatchGroups.empty()) {
56 return Status::kDiscard;
57 }
58 SkASSERT(fDispatchGroups.size() == fChildTasks.size());
59 const std::unique_ptr<DispatchGroup>* currentSpanPtr = &fDispatchGroups[0];
60 size_t currentSpanSize = 0u;
61 for (int i = 0; i < fDispatchGroups.size(); ++i) {
62 // If the next DispatchGroup has a dependent task, then encode the accumulated span as a
63 // compute pass now. CommandBuffer encodes each compute pass with a separate encoder, so
64 // the dependent task can use a non-compute encoder if needed.
65 Task* child = fChildTasks[i].get();
66 if (child) {
67 if (currentSpanSize > 0u) {
68 if (!commandBuffer->addComputePass({currentSpanPtr, currentSpanSize})) {
69 return Status::kFail;
70 }
71 currentSpanPtr = &fDispatchGroups[i];
72 currentSpanSize = 0u;
73 }
74
75 Status status = child->addCommands(ctx, commandBuffer, rtd);
76 if (status == Status::kFail) {
77 return Status::kFail;
78 } else if (status == Status::kDiscard) {
79 fChildTasks[i].reset();
80 }
81 }
82 currentSpanSize++;
83 }
84 return (currentSpanSize == 0u ||
85 commandBuffer->addComputePass({currentSpanPtr, currentSpanSize})) ? Status::kSuccess
86 : Status::kFail;
87 }
88
89 } // namespace skgpu::graphite
90