1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
16
17 #include "tensorflow/core/common_runtime/dma_helper.h"
18 #include "tensorflow/core/common_runtime/scoped_allocator.h"
19 #include "tensorflow/core/framework/allocator.h"
20 #include "tensorflow/core/framework/tensor.h"
21 #include "tensorflow/core/platform/test.h"
22
23 namespace tensorflow {
24 namespace {
25
26 class ScopedAllocatorMgrTest : public ::testing::Test {
27 public:
ScopedAllocatorMgrTest()28 ScopedAllocatorMgrTest() : sam_("CPU0") {}
29
InitTensor()30 void InitTensor() {
31 backing_tensor_ = Tensor(cpu_allocator(), DT_FLOAT, backing_tensor_shape_);
32 }
33
PopulateFields()34 void PopulateFields() {
35 ScopedAllocatorMgr::PopulateFields(scope_id_, fields_shapes_, DT_FLOAT,
36 &fields_);
37 }
38
AddScopedAllocator(int expected_use_count,int scope_id)39 Status AddScopedAllocator(int expected_use_count, int scope_id) {
40 VLOG(2) << "Adding ScopedAllocator step_id " << step_id_ << " scope_id "
41 << scope_id_ << " #fields " << fields_.size()
42 << " expected_use_count " << expected_use_count;
43 return sam_.AddScopedAllocator(backing_tensor_, step_id_, scope_id,
44 "tensor_shape_599", fields_,
45 expected_use_count);
46 }
47
PrepScopedAllocatorMgr(int expected_use_count)48 Status PrepScopedAllocatorMgr(int expected_use_count) {
49 InitTensor();
50 PopulateFields();
51 return AddScopedAllocator(expected_use_count, scope_id_);
52 }
53
SaveInstances(int num_instances)54 void SaveInstances(int num_instances) {
55 sa_instances_.clear();
56 sa_instances_.resize(num_instances);
57 ScopedAllocatorContainer* sac = sam_.GetContainer(step_id_);
58 for (int i = 0; i < num_instances; i++) {
59 sa_instances_[i] = sac->GetInstance(scope_id_ + 1 + i);
60 }
61 }
62
63 // For the specific case when the backing tensor is of shape
64 // {512 + 9 + 512 + 16} and the fields_shapes are {{512}, {3,3}, {2, 256}}
65 // This method computes the padding between the second and third slice of the
66 // backing tensor. This example is reused across multiple tests.
AlignmentPadding()67 int AlignmentPadding() {
68 int alignment_padding =
69 (Allocator::kAllocatorAlignment -
70 (521 * sizeof(float)) % Allocator::kAllocatorAlignment) %
71 Allocator::kAllocatorAlignment;
72 return alignment_padding;
73 }
74
75 // Debug
PrintShapes()76 void PrintShapes() {
77 VLOG(2) << "tensor_shape=" << backing_tensor_shape_.DebugString();
78 for (int i = 0; i < fields_shapes_.size(); i++) {
79 VLOG(2) << "fields_shapes[" << i
80 << "]=" << fields_shapes_[i].DebugString();
81 }
82 }
83
84 protected:
85 TensorShape backing_tensor_shape_;
86 Tensor backing_tensor_;
87 std::vector<TensorShape> fields_shapes_;
88 std::vector<ScopedAllocator::Field> fields_;
89 ScopedAllocatorMgr sam_;
90 const int step_id_ = 101;
91 const int scope_id_ = 599;
92 std::vector<ScopedAllocatorInstance*> sa_instances_;
93 };
94
TEST_F(ScopedAllocatorMgrTest,ContainerAllocation)95 TEST_F(ScopedAllocatorMgrTest, ContainerAllocation) {
96 ScopedAllocatorContainer* sac_101 = sam_.GetContainer(101);
97 EXPECT_TRUE(sac_101 != nullptr);
98 ScopedAllocatorContainer* sac_201 = sam_.GetContainer(201);
99 EXPECT_TRUE(sac_201 != nullptr);
100 EXPECT_NE(sac_101, sac_201);
101 ScopedAllocatorContainer* also_sac_101 = sam_.GetContainer(101);
102 EXPECT_EQ(sac_101, also_sac_101);
103 sam_.Cleanup(101);
104 // 201 should be cleaned up by the destructor.
105 }
106
TEST_F(ScopedAllocatorMgrTest,PopulateFields)107 TEST_F(ScopedAllocatorMgrTest, PopulateFields) {
108 backing_tensor_shape_ = TensorShape({512 + 9 + 512 + 16});
109 fields_shapes_ = std::vector<TensorShape>({{512}, {3, 3}, {2, 256}});
110 InitTensor();
111 PopulateFields();
112 EXPECT_EQ(0, fields_[0].offset);
113 EXPECT_EQ(512 * sizeof(float), fields_[0].bytes_requested);
114 EXPECT_EQ(scope_id_ + 1, fields_[0].scope_id);
115 EXPECT_EQ(512 * sizeof(float), fields_[1].offset);
116 EXPECT_EQ(9 * sizeof(float), fields_[1].bytes_requested);
117 EXPECT_EQ(scope_id_ + 2, fields_[1].scope_id);
118 EXPECT_EQ(521 * sizeof(float) + AlignmentPadding(), fields_[2].offset);
119 EXPECT_EQ(512 * sizeof(float), fields_[2].bytes_requested);
120 EXPECT_EQ(scope_id_ + 3, fields_[2].scope_id);
121 }
122
TEST_F(ScopedAllocatorMgrTest,ContainerAddAllocator)123 TEST_F(ScopedAllocatorMgrTest, ContainerAddAllocator) {
124 backing_tensor_shape_ = TensorShape({1024});
125 fields_shapes_ = std::vector<TensorShape>({{512}, {512}});
126 Status s = PrepScopedAllocatorMgr(2);
127 EXPECT_TRUE(s.ok());
128 // Need to call Allocate and Deallocate in order to use up the expected uses
129 // for this allocator. Save the instances for now.
130 SaveInstances(fields_shapes_.size());
131
132 s = AddScopedAllocator(2, scope_id_);
133 EXPECT_FALSE(s.ok());
134 fields_[0].scope_id = scope_id_ + 1;
135 s = AddScopedAllocator(2, scope_id_ + 3);
136 EXPECT_FALSE(s.ok());
137
138 // Cleanup the instances by invoking allocate and deallocate.
139 void* ptr0 =
140 sa_instances_[0]->AllocateRaw(0 /* alignment */, 512 * sizeof(float));
141 void* ptr1 =
142 sa_instances_[1]->AllocateRaw(0 /* alignment */, 512 * sizeof(float));
143 sa_instances_[0]->DeallocateRaw(ptr0);
144 sa_instances_[1]->DeallocateRaw(ptr1);
145 }
146
TEST_F(ScopedAllocatorMgrTest,AllocatorSuccess)147 TEST_F(ScopedAllocatorMgrTest, AllocatorSuccess) {
148 ScopedAllocatorContainer* sac = sam_.GetContainer(step_id_);
149 ScopedAllocator* other = sac->GetAllocator(scope_id_);
150 EXPECT_EQ(other, nullptr);
151 backing_tensor_shape_ = TensorShape({512 + 9 + 512 + 16});
152 fields_shapes_ = std::vector<TensorShape>({{512}, {3, 3}, {2, 256}});
153 Status s = PrepScopedAllocatorMgr(3);
154 other = sac->GetAllocator(scope_id_);
155
156 ScopedAllocatorInstance* inst0 = sac->GetInstance(scope_id_ + 1);
157 char* ptr0 = static_cast<char*>(inst0->AllocateRaw(0, 512 * sizeof(float)));
158 const char* base =
159 static_cast<const char*>(DMAHelper::base(&backing_tensor_));
160 EXPECT_EQ(ptr0, base);
161
162 ScopedAllocatorInstance* inst1 = sac->GetInstance(scope_id_ + 2);
163 char* ptr1 = static_cast<char*>(inst1->AllocateRaw(0, 9 * sizeof(float)));
164 EXPECT_EQ(ptr1, ptr0 + (512 * sizeof(float)));
165
166 ScopedAllocatorInstance* inst2 = sac->GetInstance(scope_id_ + 3);
167 char* ptr2 = static_cast<char*>(inst2->AllocateRaw(0, 512 * sizeof(float)));
168 EXPECT_EQ(ptr2, ptr1 + AlignmentPadding() + (9 * sizeof(float)));
169
170 // At this point the scopes should be gone from the container
171 EXPECT_EQ(nullptr, sac->GetAllocator(scope_id_));
172
173 // The ScopedAllocatorInstances automatically delete when their memory
174 // is returned and they are out of table.
175 inst0->DeallocateRaw(ptr0);
176 inst1->DeallocateRaw(ptr1);
177 inst2->DeallocateRaw(ptr2);
178 }
179
180 // ScopedAllocator initialization should fail because backing_tensor is not
181 // large enough to hold all the fields
TEST_F(ScopedAllocatorMgrTest,AllocatorInitFail)182 TEST_F(ScopedAllocatorMgrTest, AllocatorInitFail) {
183 backing_tensor_shape_ = TensorShape({8});
184 InitTensor();
185 fields_.resize(1);
186 fields_[0].scope_id = scope_id_ + 1;
187 fields_[0].offset = 0;
188 fields_[0].bytes_requested =
189 backing_tensor_shape_.num_elements() * 2 * sizeof(float);
190 // fields[0].offset + fields[0].bytes_requested is larger than the size of the
191 // backing tensor, so this check should fail
192 EXPECT_DEATH(Status s = AddScopedAllocator(1, scope_id_), "");
193 }
194
195 // ScopedAllocator allocation should fail because we called more times than
196 // expected, or we deallocated a non-existent pointer, or we requested more
197 // or less than the exact size of an instance buffer.
TEST_F(ScopedAllocatorMgrTest,AllocatorFail)198 TEST_F(ScopedAllocatorMgrTest, AllocatorFail) {
199 backing_tensor_shape_ = TensorShape({1024});
200 fields_shapes_ = std::vector<TensorShape>({{512}, {512}});
201 Status s = PrepScopedAllocatorMgr(2);
202 EXPECT_TRUE(s.ok());
203 // Save instances so that we can explicitly delete later on. In normal
204 // operation the instances will be automatically deleted after single use, but
205 // in this test we are invoking the ScopedAllocator's Alloc/Dealloc interface,
206 // so we need to explicitly delete the instances to avoid a memleak.
207 SaveInstances(fields_shapes_.size());
208
209 char* ptr0 =
210 static_cast<char*>(sa_instances_[0]->AllocateRaw(0, 512 * sizeof(float)));
211 VLOG(2) << "Should fail because we deallocate ptr="
212 << static_cast<void*>(ptr0 + 8) << " which we never allocated.";
213 EXPECT_DEATH(sa_instances_[0]->DeallocateRaw(ptr0 + 8), "");
214 VLOG(2) << "Should fail because we allocate smaller than the size of the "
215 << "field.";
216 EXPECT_EQ(nullptr, sa_instances_[1]->AllocateRaw(0, 256 * sizeof(float)));
217 VLOG(2) << "Should fail because we allocate larger than the size of the "
218 << "field.";
219 EXPECT_EQ(nullptr, sa_instances_[1]->AllocateRaw(0, 1024 * sizeof(float)));
220 void* ptr1 = sa_instances_[1]->AllocateRaw(0, 512 * sizeof(float));
221 VLOG(2) << "Should fail because we exceed expected_use_count.";
222 EXPECT_EQ(nullptr, sa_instances_[0]->AllocateRaw(0, 512 * sizeof(float)));
223 sa_instances_[0]->DeallocateRaw(ptr0);
224 sa_instances_[1]->DeallocateRaw(ptr1);
225 }
226
227 } // namespace
228 } // namespace tensorflow
229