xref: /aosp_15_r20/external/tensorflow/tensorflow/core/profiler/internal/tfprof_tensor.h (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2016 The TensorFlow Authors All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // TFProf representation of a Tensor's value.
17 // 1. Multi-dimension tensor is flattened in row major, and stored in proto.
18 // 2. integer are up-casted to int64. floats are up-casted to double. string
19 //    is not supported by TensorFlow CheckPointReader library, though it is
20 //    supported in current code.
21 
22 #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
23 #define TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
24 
25 #include <typeinfo>
26 
27 #include "absl/strings/numbers.h"
28 #include "absl/strings/str_cat.h"
29 #include "absl/strings/str_format.h"
30 #include "tensorflow/core/framework/tensor.h"
31 #include "tensorflow/core/profiler/tfprof_output.pb.h"
32 
33 namespace tensorflow {
34 namespace tfprof {
35 
36 class TFProfTensor {
37  public:
TFProfTensor(std::unique_ptr<Tensor> tensor)38   explicit TFProfTensor(std::unique_ptr<Tensor> tensor)
39       : tensor_(std::move(tensor)) {
40     Build();
41   }
42 
43   // If pointers are provided, they are filled by the method.
44   void Display(string* formatted_str, TFProfTensorProto* tfprof_tensor_pb);
45 
46  private:
47   // Max length of tensor value displayed to CLI.
48   const int64_t kTFProfTenosrMaxDisplayLen = 10000;
49   // Max length after which a latency warning will be printed.
50   const int64_t kTFProfTensorMaxWarnLen = 100000;
51 
52   void Build();
53 
54   template <typename T>
AddValue(const T & value,TFProfTensorProto * dim)55   bool AddValue(const T& value, TFProfTensorProto* dim) {
56     std::ostringstream sstream;
57     sstream << value;
58     if (typeid(value) == typeid(double)) {
59       double double_val = 0.0;
60       CHECK(absl::SimpleAtod(sstream.str(), &double_val));  // Crash OK
61       dim->add_value_double(double_val);
62       absl::StrAppendFormat(&formatted_str_, "%.2f ",
63                             dim->value_double(dim->value_double_size() - 1));
64     } else if (typeid(value) == typeid(int64_t)) {
65       int64_t int64_val = 0;
66       CHECK(absl::SimpleAtoi(sstream.str(), &int64_val));  // Crash OK
67       dim->add_value_int64(int64_val);
68       absl::StrAppendFormat(&formatted_str_, "%d ",
69                             dim->value_int64(dim->value_int64_size() - 1));
70     } else if (typeid(value) == typeid(string)) {
71       dim->add_value_str(sstream.str());
72       absl::StrAppend(&formatted_str_, "'",
73                       dim->value_str(dim->value_str_size() - 1), "' ");
74     } else {
75       CHECK(false) << "Unsupported type: " << typeid(value).name();
76     }
77   }
78 
79   // It assumes the flatten values are stored in row-major, which is mentioned
80   // indirectly at various places:
81   // TODO(xpan): Further verifying it.
82   template <typename T>
BuildOutput(int64_t start,int depth,const std::vector<T> & values,TFProfTensorProto * dim)83   int64_t BuildOutput(int64_t start, int depth, const std::vector<T>& values,
84                       TFProfTensorProto* dim) {
85     formatted_str_ += "[";
86     int64_t nstart = start;
87     if (tensor_->dims() == 0 && values.size() == 1) {
88       std::ostringstream sstream;
89       sstream << values[nstart];
90 
91       if (typeid(values[nstart]) == typeid(double)) {
92         double double_val = 0.0;
93         CHECK(absl::SimpleAtod(sstream.str(), &double_val));  // Crash OK
94         dim->add_value_double(double_val);
95         absl::StrAppendFormat(&formatted_str_, "%.2f ",
96                               dim->value_double(dim->value_double_size() - 1));
97       } else if (typeid(values[nstart]) == typeid(int64_t)) {
98         int64_t int64_val = 0;
99         CHECK(absl::SimpleAtoi(sstream.str(), &int64_val));  // Crash OK
100         dim->add_value_int64(int64_val);
101         absl::StrAppendFormat(&formatted_str_, "%d ",
102                               dim->value_int64(dim->value_int64_size() - 1));
103       } else if (typeid(values[nstart]) == typeid(string)) {
104         dim->add_value_str(sstream.str());
105         absl::StrAppend(&formatted_str_, "'",
106                         dim->value_str(dim->value_str_size() - 1), "' ");
107       } else {
108         CHECK(false) << "Unsupported type: " << typeid(values[nstart]).name();
109       }
110     } else {
111       for (int i = 0; i < tensor_->dim_size(depth); i++) {
112         // Last dimension, pull the values.
113         if (depth == tensor_->dims() - 1) {
114           std::ostringstream sstream;
115           sstream << values[nstart];
116 
117           if (typeid(values[nstart]) == typeid(double)) {
118             double double_val = 0.0;
119             CHECK(absl::SimpleAtod(sstream.str(), &double_val));  // Crash OK
120             dim->add_value_double(double_val);
121             absl::StrAppendFormat(
122                 &formatted_str_, "%.2f ",
123                 dim->value_double(dim->value_double_size() - 1));
124           } else if (typeid(values[nstart]) == typeid(int64_t)) {
125             int64_t int64_val = 0;
126             CHECK(absl::SimpleAtoi(sstream.str(), &int64_val));  // Crash OK
127             dim->add_value_int64(int64_val);
128             absl::StrAppendFormat(
129                 &formatted_str_, "%d ",
130                 dim->value_int64(dim->value_int64_size() - 1));
131           } else if (typeid(values[nstart]) == typeid(string)) {
132             dim->add_value_str(sstream.str());
133             absl::StrAppend(&formatted_str_, "'",
134                             dim->value_str(dim->value_str_size() - 1), "' ");
135           } else {
136             CHECK(false) << "Unsupported type: "
137                          << typeid(values[nstart]).name();
138           }
139           ++nstart;
140         } else {
141           // Not-last dimension. Drill deeper.
142           nstart = BuildOutput<T>(nstart, depth + 1, values, dim);
143         }
144       }
145     }
146     if (formatted_str_.length() > kTFProfTenosrMaxDisplayLen) {
147       formatted_str_ = formatted_str_.substr(0, kTFProfTenosrMaxDisplayLen);
148     }
149     formatted_str_ += "],\n";
150     return nstart;
151   }
152 
153   template <typename T, typename U>
GetValueVec(std::vector<U> * value_vec)154   void GetValueVec(std::vector<U>* value_vec) {
155     // TODO(xpan): Address the huge tensor problem.
156     if (tensor_->NumElements() > kTFProfTensorMaxWarnLen) {
157       absl::FPrintF(stderr, "Showing huge tensor, the tool might halt...\n");
158     }
159     auto values = tensor_->flat<T>();
160     for (int64_t i = 0; i < tensor_->NumElements(); i++) {
161       value_vec->push_back(static_cast<U>(values(i)));
162     }
163   }
164 
165   TFProfTensorProto tfprof_tensor_pb_;
166   std::unique_ptr<Tensor> tensor_;
167   string formatted_str_;
168 };
169 }  // namespace tfprof
170 }  // namespace tensorflow
171 
172 #endif  // TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
173