1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_METRICS_TO_RECORD_H_
17 #define TENSORFLOW_CORE_PROFILER_CONVERT_OP_METRICS_TO_RECORD_H_
18
19 #include <vector>
20
21 #include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
22 #include "tensorflow/core/profiler/utils/math_utils.h"
23
24 namespace tensorflow {
25 namespace profiler {
26
27 std::vector<const OpMetrics*> SortedOpMetricsDb(const OpMetricsDb& metrics_db,
28 int max_records = -1);
29
GigaFlopsPerSecondPerCore(const OpMetrics & metrics)30 inline double GigaFlopsPerSecondPerCore(const OpMetrics& metrics) {
31 // flops and time_ps are accumulated across all occurrences on all cores.
32 // time_ps is used instead of self_time_ps because flops for an op includes
33 // the flops executed by children (nested) ops.
34 return SafeDivide(metrics.flops(), PicoToNano(metrics.time_ps()));
35 }
36
GigaBytesPerSecondPerCore(const OpMetrics & metrics)37 inline double GigaBytesPerSecondPerCore(const OpMetrics& metrics) {
38 // bytes_accessed and time_ps are accumulated across all occurrences on all
39 // cores.
40 // time_ps is used instead of self_time_ps because bytes_accessed for an op
41 // includes the bytes accessed by children (nested) ops.
42 return SafeDivide(metrics.bytes_accessed(), PicoToNano(metrics.time_ps()));
43 }
44
GibiBytesPerSecondPerCore(const OpMetrics & metrics)45 inline double GibiBytesPerSecondPerCore(const OpMetrics& metrics) {
46 return GigaToGibi(GigaBytesPerSecondPerCore(metrics));
47 }
48
49 template <typename Record>
SetExecutionTimes(const OpMetrics & metrics,Record * record)50 inline void SetExecutionTimes(const OpMetrics& metrics, Record* record) {
51 record->set_occurrences(metrics.occurrences());
52 record->set_total_time_in_us(PicoToMicro(metrics.time_ps()));
53 record->set_avg_time_in_us(
54 SafeDivide(record->total_time_in_us(), metrics.occurrences()));
55 record->set_total_self_time_in_us(PicoToMicro(metrics.self_time_ps()));
56 record->set_avg_self_time_in_us(
57 SafeDivide(record->total_self_time_in_us(), metrics.occurrences()));
58 }
59
60 template <typename Record>
SetTpuUnitFractions(const OpMetrics & metrics,Record * record)61 inline void SetTpuUnitFractions(const OpMetrics& metrics, Record* record) {
62 record->set_dma_stall_fraction(
63 SafeDivide(metrics.dma_stall_ps(), metrics.time_ps()));
64 }
65
66 template <typename Record>
SetRankAndTimeFractions(double total_time_us,const Record & prev_record,Record * record)67 inline void SetRankAndTimeFractions(double total_time_us,
68 const Record& prev_record, Record* record) {
69 record->set_rank(prev_record.rank() + 1);
70 record->set_total_self_time_as_fraction(
71 SafeDivide(record->total_self_time_in_us(), total_time_us));
72 record->set_cumulative_total_self_time_as_fraction(
73 prev_record.cumulative_total_self_time_as_fraction() +
74 record->total_self_time_as_fraction());
75 }
76
77 template <typename Record>
SetRankAndDeviceTimeFractions(double total_time_us,const Record & prev_record,Record * record)78 inline void SetRankAndDeviceTimeFractions(double total_time_us,
79 const Record& prev_record,
80 Record* record) {
81 record->set_rank(prev_record.rank() + 1);
82 record->set_device_total_self_time_as_fraction(
83 SafeDivide(record->total_self_time_in_us(), total_time_us));
84 record->set_device_cumulative_total_self_time_as_fraction(
85 prev_record.device_cumulative_total_self_time_as_fraction() +
86 record->device_total_self_time_as_fraction());
87 }
88
89 template <typename Record>
SetRankAndHostTimeFractions(double total_time_us,const Record & prev_record,Record * record)90 inline void SetRankAndHostTimeFractions(double total_time_us,
91 const Record& prev_record,
92 Record* record) {
93 record->set_rank(prev_record.rank() + 1);
94 record->set_host_total_self_time_as_fraction(
95 SafeDivide(record->total_self_time_in_us(), total_time_us));
96 record->set_host_cumulative_total_self_time_as_fraction(
97 prev_record.host_cumulative_total_self_time_as_fraction() +
98 record->host_total_self_time_as_fraction());
99 }
100
101 template <typename Record>
SetRooflineMetrics(const OpMetrics & metrics,double ridge_point_operational_intensity,Record * record)102 inline void SetRooflineMetrics(const OpMetrics& metrics,
103 double ridge_point_operational_intensity,
104 Record* record) {
105 using ::tensorflow::profiler::PicoToNano;
106 record->set_measured_flop_rate(GigaFlopsPerSecondPerCore(metrics));
107 record->set_measured_memory_bw(GigaBytesPerSecondPerCore(metrics));
108 record->set_operational_intensity(
109 SafeDivide(metrics.flops(), metrics.bytes_accessed()));
110 record->set_bound_by((metrics.bytes_accessed() != 0)
111 ? ((record->operational_intensity() >=
112 ridge_point_operational_intensity)
113 ? "Compute"
114 : "Memory")
115 : ((metrics.flops() != 0) ? "Compute" : "Unknown"));
116 }
117
118 } // namespace profiler
119 } // namespace tensorflow
120
121 #endif // TENSORFLOW_CORE_PROFILER_CONVERT_OP_METRICS_TO_RECORD_H_
122