xref: /aosp_15_r20/external/tensorflow/tensorflow/c/eager/unified_api_testutil.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/c/eager/unified_api_testutil.h"
16 
17 #include "absl/container/flat_hash_set.h"
18 #include "tensorflow/c/eager/c_api_experimental.h"
19 #include "tensorflow/c/eager/c_api_test_util.h"
20 #include "tensorflow/c/eager/c_api_unified_experimental.h"
21 #include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
22 #include "tensorflow/c/tf_status.h"
23 #include "tensorflow/c/tf_status_helper.h"
24 #include "tensorflow/core/framework/tensor_shape.h"
25 #include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
26 #include "tensorflow/core/platform/errors.h"
27 
28 namespace tensorflow {
29 
BuildFunction(const char * fn_name)30 AbstractContext* BuildFunction(const char* fn_name) {
31   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
32       TF_NewStatus(), TF_DeleteStatus);
33   TF_ExecutionContext* graph_ctx = TF_CreateFunction(fn_name, status.get());
34   return unwrap(graph_ctx);
35 }
36 
CreateParamsForInputs(AbstractContext * ctx,absl::Span<AbstractTensorHandle * const> inputs,std::vector<AbstractTensorHandle * > * params)37 Status CreateParamsForInputs(AbstractContext* ctx,
38                              absl::Span<AbstractTensorHandle* const> inputs,
39                              std::vector<AbstractTensorHandle*>* params) {
40   tracing::TracingTensorHandle* handle = nullptr;
41   for (auto input : inputs) {
42     PartialTensorShape shape;
43     TF_RETURN_IF_ERROR(input->Shape(&shape));
44     TF_RETURN_IF_ERROR(dyn_cast<tracing::TracingContext>(ctx)->AddParameter(
45         input->DataType(), shape, &handle));
46     params->emplace_back(handle);
47   }
48   return OkStatus();
49 }
50 
51 // Runs `model` maybe wrapped in a function.
RunModel(Model model,AbstractContext * ctx,absl::Span<AbstractTensorHandle * const> inputs,absl::Span<AbstractTensorHandle * > outputs,bool use_function)52 Status RunModel(Model model, AbstractContext* ctx,
53                 absl::Span<AbstractTensorHandle* const> inputs,
54                 absl::Span<AbstractTensorHandle*> outputs, bool use_function) {
55   if (use_function) {
56     const char* fn_name = "test_fn";
57     core::RefCountPtr<AbstractFunction> scoped_func;
58     // Returning null tensors from a tf.function is not supported, so we keep
59     // track of indices in the model's outputs are nullptr in this set.
60     // The FunctionDef only outputs the non-null tensors. We later pad the
61     // function op outputs to have nullptrs at the `null_indices`.
62     absl::flat_hash_set<int> null_indices;
63     {
64       AbstractContextPtr func_ctx(BuildFunction(fn_name));
65       std::vector<AbstractTensorHandle*> func_inputs;
66       func_inputs.reserve(inputs.size());
67       TF_RETURN_IF_ERROR(
68           CreateParamsForInputs(func_ctx.get(), inputs, &func_inputs));
69       std::vector<AbstractTensorHandle*> model_outputs;
70       model_outputs.resize(outputs.size());
71       TF_RETURN_IF_ERROR(model(func_ctx.get(), absl::MakeSpan(func_inputs),
72                                absl::MakeSpan(model_outputs)));
73       for (auto func_input : func_inputs) {
74         func_input->Unref();
75       }
76       AbstractFunction* func = nullptr;
77       OutputList output_list;
78       output_list.expected_num_outputs = 0;
79       output_list.outputs.reserve(outputs.size());
80       for (int i = 0; i < model_outputs.size(); i++) {
81         if (model_outputs[i]) {
82           output_list.outputs.emplace_back(model_outputs[i]);
83           output_list.expected_num_outputs += 1;
84         } else {
85           null_indices.insert(i);
86         }
87       }
88       TF_RETURN_IF_ERROR(dyn_cast<tracing::TracingContext>(func_ctx.get())
89                              ->Finalize(&output_list, &func));
90       scoped_func.reset(func);
91       for (auto output : output_list.outputs) {
92         output->Unref();
93       }
94       TF_RETURN_IF_ERROR(ctx->RegisterFunction(func));
95     }
96 
97     AbstractOperationPtr fn_op(ctx->CreateOperation());
98     TF_RETURN_IF_ERROR(fn_op->Reset(fn_name, /*raw_device_name=*/nullptr));
99     for (auto input : inputs) {
100       TF_RETURN_IF_ERROR(fn_op->AddInput(input));
101     }
102     int retvals = outputs.size() - null_indices.size();
103     std::vector<AbstractTensorHandle*> fn_outputs(retvals);
104     TF_RETURN_IF_ERROR(fn_op->Execute(
105         absl::Span<AbstractTensorHandle*>(fn_outputs.data(), fn_outputs.size()),
106         &retvals));
107     int skipped_indices = 0;
108     for (int i = 0; i < outputs.size(); i++) {
109       if (!null_indices.contains(i)) {
110         outputs[i] = fn_outputs[i - skipped_indices];
111       } else {
112         skipped_indices += 1;
113       }
114     }
115     TF_RETURN_IF_ERROR(ctx->RemoveFunction(fn_name));
116     return OkStatus();
117   } else {
118     return model(ctx, inputs, outputs);
119   }
120 }
121 
BuildImmediateExecutionContext(bool use_tfrt,AbstractContext ** ctx)122 Status BuildImmediateExecutionContext(bool use_tfrt, AbstractContext** ctx) {
123   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
124       TF_NewStatus(), TF_DeleteStatus);
125   TFE_ContextOptions* opts = TFE_NewContextOptions();
126   TFE_ContextOptionsSetTfrt(opts, use_tfrt);
127   *ctx = unwrap(TF_NewEagerExecutionContext(opts, status.get()));
128   TF_RETURN_IF_ERROR(StatusFromTF_Status(status.get()));
129   TFE_DeleteContextOptions(opts);
130   return OkStatus();
131 }
132 
GetValue(AbstractTensorHandle * t,TF_Tensor ** result_tensor)133 Status GetValue(AbstractTensorHandle* t, TF_Tensor** result_tensor) {
134   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
135       TF_NewStatus(), TF_DeleteStatus);
136   TFE_TensorHandle* result_t =
137       TF_AbstractTensorGetEagerTensor(wrap(t), status.get());
138   TF_RETURN_IF_ERROR(StatusFromTF_Status(status.get()));
139   *result_tensor = TFE_TensorHandleResolve(result_t, status.get());
140   return StatusFromTF_Status(status.get());
141 }
142 
143 }  // namespace tensorflow
144