xref: /aosp_15_r20/external/tensorflow/tensorflow/compiler/aot/codegen_test_h.golden (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1// Generated by tfcompile, the TensorFlow graph compiler.  DO NOT EDIT!
2//
3// This header was generated via ahead-of-time compilation of a TensorFlow
4// graph.  An object file corresponding to this header was also generated.
5// This header gives access to the functionality in that object file.
6//
7// clang-format off
8
9#ifndef TFCOMPILE_GENERATED_entry_point_H_  // NOLINT(build/header_guard)
10#define TFCOMPILE_GENERATED_entry_point_H_  // NOLINT(build/header_guard)
11
12#include "tensorflow/compiler/xla/xla_data.pb.h"
13
14#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
15#include "tensorflow/core/platform/types.h"
16
17namespace Eigen { struct ThreadPoolDevice; }
18namespace xla { class ExecutableRunOptions; }
19
20// (Implementation detail) Entry point to the function in the object file.
21extern "C" void entry_point(
22    void* result, const ::xla::ExecutableRunOptions* run_options,
23    const void** args, void** temps, XlaCustomCallStatus* status,
24    int64_t* profile_counters);
25
26extern "C" char __tfcompile_foo_bar_MyClass_ProgramShapeProto_protobuf_array_contents[];
27
28
29namespace foo {
30namespace bar {
31
32// MyClass represents a computation previously specified in a
33// TensorFlow graph, now compiled into executable code. This extends the generic
34// XlaCompiledCpuFunction class with statically type-safe arg and result
35// methods. Usage example:
36//
37//   MyClass computation;
38//   // ...set args using computation.argN methods
39//   CHECK(computation.Run());
40//   // ...inspect results using computation.resultN methods
41//
42// The Run method invokes the actual computation, with inputs read from arg
43// buffers, and outputs written to result buffers. Each Run call may also use
44// a set of temporary buffers for the computation.
45//
46// By default each instance of this class manages its own arg, result and temp
47// buffers. The AllocMode constructor parameter may be used to modify the
48// buffer allocation strategy.
49//
50// Under the default allocation strategy, this class is thread-compatible:
51// o Calls to non-const methods require exclusive access to the object.
52// o Concurrent calls to const methods are OK, if those calls are made while it
53//   is guaranteed that no thread may call a non-const method.
54//
55// The logical function signature is:
56//   ((unknown): f32[1,2], (unknown): s64[3,4], (unknown): f32[1], (unknown): f32[1], (unknown): s32[5]) -> (u32[5,6], f32[1], s32[5])
57//
58// Memory stats:
59//   arg bytes total:    392
60//   arg bytes aligned:  576
61//   temp bytes total:   126
62//   temp bytes aligned: 512
63class MyClass final : public tensorflow::XlaCompiledCpuFunction {
64 public:
65  // Number of input arguments for the compiled computation.
66  static constexpr size_t kNumArgs = 5;
67
68  // Number of variables for the compiled computation.
69  static constexpr size_t kNumVariables = 3;
70
71  // Byte size of each argument buffer. There are kNumArgs entries.
72  static const ::int64_t ArgSize(::tensorflow::int32 index) {
73    return BufferInfos()[ArgIndexToBufferIndex()[index]].size();
74  }
75
76  // Returns static data used to create an XlaCompiledCpuFunction.
77  static const tensorflow::XlaCompiledCpuFunction::StaticData& StaticData() {
78    static XlaCompiledCpuFunction::StaticData* kStaticData = [](){
79      XlaCompiledCpuFunction::StaticData* data =
80        new XlaCompiledCpuFunction::StaticData;
81      set_static_data_raw_function(data, entry_point);
82      set_static_data_buffer_infos(data, BufferInfos());
83      set_static_data_num_buffers(data, kNumBuffers);
84      set_static_data_arg_index_table(data, ArgIndexToBufferIndex());
85      set_static_data_num_args(data, kNumArgs);
86      set_static_data_num_variables(data, kNumVariables);
87      set_static_data_result_index(data, kResultIndex);
88      set_static_data_arg_names(data, StaticArgNames());
89      set_static_data_variable_names(data, StaticVariableNames());
90      set_static_data_result_names(data, StaticResultNames());
91      set_static_data_program_shape(data, StaticProgramShape());
92      set_static_data_hlo_profile_printer_data(
93          data, StaticHloProfilePrinterData());
94
95      return data;
96    }();
97    return *kStaticData;
98  }
99
100  MyClass(AllocMode alloc_mode =
101            AllocMode::ARGS_VARIABLES_RESULTS_PROFILES_AND_TEMPS)
102      : XlaCompiledCpuFunction(StaticData(), alloc_mode) {}
103
104  MyClass(const MyClass&) = delete;
105  MyClass& operator=(const MyClass&) = delete;
106
107  // Arg methods for managing input buffers. Buffers are in row-major order.
108  // There is a set of methods for each positional argument, with the following
109  // general form:
110  //
111  // void set_argN_data(void* data)
112  //   Sets the buffer of type T for positional argument N. May be called in
113  //   any AllocMode. Must be called before Run to have an affect. Must be
114  //   called in AllocMode::RESULTS_PROFILES_AND_TEMPS_ONLY for each positional
115  //   argument, to set the argument buffers.
116  //
117  // T* argN_data()
118  //   Returns the buffer of type T for positional argument N.
119  //
120  // T& argN(...dim indices...)
121  //   Returns a reference to the value of type T for positional argument N,
122  //   with dim indices specifying which value. No bounds checking is performed
123  //   on dim indices.
124
125  void set_arg0_data(const void* data) {
126    set_arg_data(0, data);
127  }
128  float* arg0_data() {
129    return static_cast<float*>(arg_data(0));
130  }
131  float& arg0(size_t dim0, size_t dim1) {
132    return (*static_cast<float(*)[1][2]>(
133        arg_data(0)))[dim0][dim1];
134  }
135  const float* arg0_data() const {
136    return static_cast<const float*>(arg_data(0));
137  }
138  const float& arg0(size_t dim0, size_t dim1) const {
139    return (*static_cast<const float(*)[1][2]>(
140        arg_data(0)))[dim0][dim1];
141  }
142  int arg0_size() const {
143    return 2 * sizeof(float);
144  }
145  int arg0_count() const {
146    return 2;
147  }
148
149  void set_arg_myfeed_data(const void* data) {
150    set_arg_data(0, data);
151  }
152  float* arg_myfeed_data() {
153    return static_cast<float*>(arg_data(0));
154  }
155  float& arg_myfeed(size_t dim0, size_t dim1) {
156    return (*static_cast<float(*)[1][2]>(
157        arg_data(0)))[dim0][dim1];
158  }
159  const float* arg_myfeed_data() const {
160    return static_cast<const float*>(arg_data(0));
161  }
162  const float& arg_myfeed(size_t dim0, size_t dim1) const {
163    return (*static_cast<const float(*)[1][2]>(
164        arg_data(0)))[dim0][dim1];
165  }
166  int arg_myfeed_size() const {
167    return 2 * sizeof(float);
168  }
169  int arg_myfeed_count() const {
170    return 2;
171  }
172
173  void set_arg1_data(const void* data) {
174    set_arg_data(1, data);
175  }
176  int64_t* arg1_data() {
177    return static_cast<int64_t*>(arg_data(1));
178  }
179  int64_t& arg1(size_t dim0, size_t dim1) {
180    return (*static_cast<int64_t(*)[3][4]>(
181        arg_data(1)))[dim0][dim1];
182  }
183  const int64_t* arg1_data() const {
184    return static_cast<const int64_t*>(arg_data(1));
185  }
186  const int64_t& arg1(size_t dim0, size_t dim1) const {
187    return (*static_cast<const int64_t(*)[3][4]>(
188        arg_data(1)))[dim0][dim1];
189  }
190  int arg1_size() const {
191    return 12 * sizeof(int64_t);
192  }
193  int arg1_count() const {
194    return 12;
195  }
196
197  // Result methods for managing output buffers. Buffers are in row-major order.
198  // Must only be called after a successful Run call. There is a set of methods
199  // for each positional result, with the following general form:
200  //
201  // T* resultN_data()
202  //   Returns the buffer of type T for positional result N.
203  //
204  // T& resultN(...dim indices...)
205  //   Returns a reference to the value of type T for positional result N,
206  //   with dim indices specifying which value. No bounds checking is performed
207  //   on dim indices.
208  //
209  // Unlike the arg methods, there is no set_resultN_data method. The result
210  // buffers are managed internally, and may change after each call to Run.
211
212  tensorflow::uint32* result0_data() {
213    return static_cast<tensorflow::uint32*>(result_data(0));
214  }
215  tensorflow::uint32& result0(size_t dim0, size_t dim1) {
216    return (*static_cast<tensorflow::uint32(*)[5][6]>(
217        result_data(0)))[dim0][dim1];
218  }
219  const tensorflow::uint32* result0_data() const {
220    return static_cast<const tensorflow::uint32*>(result_data(0));
221  }
222  const tensorflow::uint32& result0(size_t dim0, size_t dim1) const {
223    return (*static_cast<const tensorflow::uint32(*)[5][6]>(
224        result_data(0)))[dim0][dim1];
225  }
226  int result0_size() const {
227    return 30 * sizeof(tensorflow::uint32);
228  }
229  int result0_count() const {
230    return 30;
231  }
232
233  tensorflow::uint32* result_myfetch_data() {
234    return static_cast<tensorflow::uint32*>(result_data(0));
235  }
236  tensorflow::uint32& result_myfetch(size_t dim0, size_t dim1) {
237    return (*static_cast<tensorflow::uint32(*)[5][6]>(
238        result_data(0)))[dim0][dim1];
239  }
240  const tensorflow::uint32* result_myfetch_data() const {
241    return static_cast<const tensorflow::uint32*>(result_data(0));
242  }
243  const tensorflow::uint32& result_myfetch(size_t dim0, size_t dim1) const {
244    return (*static_cast<const tensorflow::uint32(*)[5][6]>(
245        result_data(0)))[dim0][dim1];
246  }
247  int result_myfetch_size() const {
248    return 30 * sizeof(tensorflow::uint32);
249  }
250  int result_myfetch_count() const {
251    return 30;
252  }
253
254  // Methods for managing variable buffers. Buffers are in row-major order.
255  //
256  // For read-write variables we generate the following methods:
257  //
258  // void set_var_X_data(T* data)
259  //   Sets the buffer for variable X.  Must be called before Run if the
260  //   allocation mode is RESULTS_PROFILES_AND_TEMPS_ONLY.
261  //
262  // T* var_X_data()
263  //   Returns the buffer of type T for variable X.  If the allocation mode is
264  //   RESULTS_PROFILES_AND_TEMPS_ONLY then this buffer is the same as the
265  //   buffer passed to set_var_X_data.
266  //
267  // T& var_X(...dim indices...)
268  //   Returns a reference to the value of type T for variable X,
269  //   with dim indices specifying which value. No bounds checking is performed
270  //   on dim indices.
271  //
272  // For readonly variables we generate the same set of methods, except that we
273  // use `const T` instead of `T`.  We use `const T` to avoid erasing the
274  // constness of the buffer passed to `set_var_X_data` but the underlying
275  // buffer is not const (and thus the const can be safely const-cast'ed away)
276  // unless `set_var_X_data` is called with a pointer to constant storage.
277
278  void set_var_myvar_readonly_data(const float* data) {
279    set_arg_data(2, data);
280  }
281  const float* var_myvar_readonly_data() {
282    return static_cast<const float*>(arg_data(2));
283  }
284  const float& var_myvar_readonly() {
285    return (*static_cast<const float(*)[1]>(
286        arg_data(2)))[0];
287  }
288  const float* var_myvar_readonly_data() const {
289    return static_cast<const float*>(arg_data(2));
290  }
291  const float& var_myvar_readonly() const {
292    return (*static_cast<const float(*)[1]>(
293        arg_data(2)))[0];
294  }
295  int var_myvar_readonly_size() const {
296    return 1 * sizeof(float);
297  }
298  int var_myvar_readonly_count() const {
299    return 1;
300  }
301
302  void set_var_myvar_data(float* data) {
303    set_arg_data(3, data);
304  }
305  float* var_myvar_data() {
306    return static_cast<float*>(arg_data(3));
307  }
308  float& var_myvar() {
309    return (*static_cast<float(*)[1]>(
310        arg_data(3)))[0];
311  }
312  const float* var_myvar_data() const {
313    return static_cast<const float*>(arg_data(3));
314  }
315  const float& var_myvar() const {
316    return (*static_cast<const float(*)[1]>(
317        arg_data(3)))[0];
318  }
319  int var_myvar_size() const {
320    return 1 * sizeof(float);
321  }
322  int var_myvar_count() const {
323    return 1;
324  }
325
326  void set_var_myvar2_data(tensorflow::int32* data) {
327    set_arg_data(4, data);
328  }
329  tensorflow::int32* var_myvar2_data() {
330    return static_cast<tensorflow::int32*>(arg_data(4));
331  }
332  tensorflow::int32& var_myvar2(size_t dim0) {
333    return (*static_cast<tensorflow::int32(*)[5]>(
334        arg_data(4)))[dim0];
335  }
336  const tensorflow::int32* var_myvar2_data() const {
337    return static_cast<const tensorflow::int32*>(arg_data(4));
338  }
339  const tensorflow::int32& var_myvar2(size_t dim0) const {
340    return (*static_cast<const tensorflow::int32(*)[5]>(
341        arg_data(4)))[dim0];
342  }
343  int var_myvar2_size() const {
344    return 5 * sizeof(tensorflow::int32);
345  }
346  int var_myvar2_count() const {
347    return 5;
348  }
349
350 private:
351  // Number of buffers for the compiled computation.
352  static constexpr size_t kNumBuffers = 12;
353
354  static const ::xla::cpu_function_runtime::BufferInfo* BufferInfos() {
355    static const ::xla::cpu_function_runtime::BufferInfo
356      kBufferInfos[kNumBuffers] = {
357::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
358::xla::cpu_function_runtime::BufferInfo({34ULL, 0ULL}),
359::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
360::xla::cpu_function_runtime::BufferInfo({386ULL, 1ULL}),
361::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
362::xla::cpu_function_runtime::BufferInfo({386ULL, 2ULL}),
363::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
364::xla::cpu_function_runtime::BufferInfo({386ULL, 3ULL}),
365::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
366::xla::cpu_function_runtime::BufferInfo({386ULL, 4ULL}),
367::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
368::xla::cpu_function_runtime::BufferInfo({481ULL, ~0ULL})
369      };
370    return kBufferInfos;
371  }
372
373  static const ::tensorflow::int32* ArgIndexToBufferIndex() {
374    static constexpr ::tensorflow::int32 kArgIndexToBufferIndex[kNumArgs] = {
3751, 3, 5, 7, 9
376    };
377    return kArgIndexToBufferIndex;
378  }
379
380  // The 0-based index of the result tuple in the temporary buffers.
381  static constexpr size_t kResultIndex = 11;
382
383  // Array of names of each positional argument, terminated by nullptr.
384  static const char** StaticArgNames() {
385    static const char* kNames[] = {"myfeed", nullptr};
386    return kNames;
387  }
388
389  // Array of names of each positional variable, terminated by nullptr.
390  static const char** StaticVariableNames() {
391    static const char* kNames[] = {"myvar_readonly", "myvar", "myvar2", nullptr};
392    return kNames;
393  }
394
395  // Array of names of each positional result, terminated by nullptr.
396  static const char** StaticResultNames() {
397    static const char* kNames[] = {"myfetch", nullptr};
398    return kNames;
399  }
400
401  // Shape of the args and results.
402  static const ::xla::ProgramShapeProto* StaticProgramShape() {
403    static const ::xla::ProgramShapeProto* kShape = []() {
404    ::xla::ProgramShapeProto* proto = new ::xla::ProgramShapeProto;
405    proto->ParseFromArray(&__tfcompile_foo_bar_MyClass_ProgramShapeProto_protobuf_array_contents[0], 133);
406    return proto;
407  }();
408    return kShape;
409  }
410
411  // Metadata that can be used to pretty-print profile counters.
412  static const ::xla::HloProfilePrinterData* StaticHloProfilePrinterData() {
413    static const ::xla::HloProfilePrinterData* kHloProfilePrinterData =
414      nullptr;
415    return kHloProfilePrinterData;
416  }
417};
418
419}  // end namespace bar
420}  // end namespace foo
421
422#endif  // TFCOMPILE_GENERATED_entry_point_H_
423
424// clang-format on
425