xref: /aosp_15_r20/external/pigweed/pw_bluetooth_sapphire/host/att/database.cc (revision 61c4878ac05f98d0ceed94b57d316916de578985)
1 // Copyright 2023 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_bluetooth_sapphire/internal/host/att/database.h"
16 
17 #include <lib/fit/defer.h>
18 
19 #include <algorithm>
20 
21 #include "pw_bluetooth_sapphire/internal/host/att/error.h"
22 #include "pw_bluetooth_sapphire/internal/host/att/permissions.h"
23 #include "pw_bluetooth_sapphire/internal/host/common/assert.h"
24 #include "pw_bluetooth_sapphire/internal/host/common/log.h"
25 
26 namespace bt::att {
27 namespace {
28 
StartLessThan(const AttributeGrouping & grp,const Handle handle)29 bool StartLessThan(const AttributeGrouping& grp, const Handle handle) {
30   return grp.start_handle() < handle;
31 }
32 
EndLessThan(const AttributeGrouping & grp,const Handle handle)33 bool EndLessThan(const AttributeGrouping& grp, const Handle handle) {
34   return grp.end_handle() < handle;
35 }
36 
37 }  // namespace
38 
Iterator(GroupingList * list,Handle start,Handle end,const UUID * type,bool groups_only)39 Database::Iterator::Iterator(GroupingList* list,
40                              Handle start,
41                              Handle end,
42                              const UUID* type,
43                              bool groups_only)
44     : start_(start), end_(end), grp_only_(groups_only), attr_offset_(0u) {
45   PW_DCHECK(list);
46   grp_end_ = list->end();
47 
48   if (type)
49     type_filter_ = *type;
50 
51   // Initialize the iterator by performing a binary search over the attributes.
52   // If we were asked to iterate over groupings only, then look strictly within
53   // the range. Otherwise we allow the first grouping to partially overlap the
54   // range.
55   grp_iter_ = std::lower_bound(
56       list->begin(), grp_end_, start_, grp_only_ ? StartLessThan : EndLessThan);
57 
58   if (AtEnd())
59     return;
60 
61   // If the first grouping is out of range then the iterator is done.
62   if (grp_iter_->start_handle() > end) {
63     MarkEnd();
64     return;
65   }
66 
67   if (start_ > grp_iter_->start_handle()) {
68     attr_offset_ = start_ - grp_iter_->start_handle();
69   }
70 
71   // If the first is inactive or if it doesn't match the current filter then
72   // skip ahead.
73   if (!grp_iter_->active() ||
74       (type_filter_ &&
75        grp_iter_->attributes()[attr_offset_].type() != *type_filter_)) {
76     Advance();
77   }
78 }
79 
get() const80 const Attribute* Database::Iterator::get() const {
81   if (AtEnd() || !grp_iter_->active())
82     return nullptr;
83 
84   PW_DCHECK(attr_offset_ < grp_iter_->attributes().size());
85   return &grp_iter_->attributes()[attr_offset_];
86 }
87 
Advance()88 void Database::Iterator::Advance() {
89   if (AtEnd())
90     return;
91 
92   do {
93     if (!grp_only_ && grp_iter_->active()) {
94       // If this grouping has more attributes to look at.
95       if (attr_offset_ < grp_iter_->attributes().size() - 1) {
96         size_t end_offset = grp_iter_->end_handle() - grp_iter_->start_handle();
97         PW_DCHECK(end_offset < grp_iter_->attributes().size());
98 
99         // Advance.
100         attr_offset_++;
101 
102         for (; attr_offset_ <= end_offset; ++attr_offset_) {
103           const auto& attr = grp_iter_->attributes()[attr_offset_];
104 
105           // If |end_| is within this grouping and we go past it, the iterator
106           // is done.
107           if (attr.handle() > end_) {
108             MarkEnd();
109             return;
110           }
111 
112           // If there is no filter then we're done. Otherwise, loop until an
113           // attribute is found that matches the filter.
114           if (!type_filter_ || attr.type() == *type_filter_)
115             return;
116         }
117       }
118 
119       // We are done with the current grouping. Fall through and move to the
120       // next group below.
121       attr_offset_ = 0u;
122     } else {
123       PW_DCHECK(attr_offset_ == 0u);
124     }
125 
126     // Advance the group.
127     grp_iter_++;
128     if (AtEnd())
129       return;
130 
131     if (grp_iter_->start_handle() > end_) {
132       MarkEnd();
133       return;
134     }
135 
136     if (!grp_iter_->active() || !grp_iter_->complete())
137       continue;
138 
139     // If there is no filter then we're done. Otherwise, loop until an
140     // attribute is found that matches the filter. (NOTE: the group type is the
141     // type of the first attribute).
142     if (!type_filter_ || (*type_filter_ == grp_iter_->group_type()))
143       return;
144   } while (true);
145 }
146 
Database(Handle range_start,Handle range_end)147 Database::Database(Handle range_start, Handle range_end)
148     : WeakSelf(this), range_start_(range_start), range_end_(range_end) {
149   PW_DCHECK(range_start_ < range_end_);
150   PW_DCHECK(range_start_ >= kHandleMin);
151   PW_DCHECK(range_end_ <= kHandleMax);
152 }
153 
GetIterator(Handle start,Handle end,const UUID * type,bool groups_only)154 Database::Iterator Database::GetIterator(Handle start,
155                                          Handle end,
156                                          const UUID* type,
157                                          bool groups_only) {
158   PW_DCHECK(start >= range_start_);
159   PW_DCHECK(end <= range_end_);
160   PW_DCHECK(start <= end);
161 
162   return Iterator(&groupings_, start, end, type, groups_only);
163 }
164 
NewGrouping(const UUID & group_type,size_t attr_count,const ByteBuffer & decl_value)165 AttributeGrouping* Database::NewGrouping(const UUID& group_type,
166                                          size_t attr_count,
167                                          const ByteBuffer& decl_value) {
168   // This method looks for a |pos| before which to insert the new grouping.
169   Handle start_handle;
170   decltype(groupings_)::iterator pos;
171 
172   if (groupings_.empty()) {
173     if (range_end_ - range_start_ < attr_count)
174       return nullptr;
175 
176     start_handle = range_start_;
177     pos = groupings_.end();
178   } else if (groupings_.front().start_handle() - range_start_ > attr_count) {
179     // There is room at the head of the list.
180     start_handle = range_start_;
181     pos = groupings_.begin();
182   } else if (range_end_ - groupings_.back().end_handle() > attr_count) {
183     // There is room at the tail end of the list.
184     start_handle = groupings_.back().end_handle() + 1;
185     pos = groupings_.end();
186   } else {
187     // Linearly search for a gap that fits the new grouping.
188     // TODO(armansito): This is suboptimal for long running cases where the
189     // database is fragmented. Think about using a better algorithm.
190 
191     auto prev = groupings_.begin();
192     pos = prev;
193     pos++;
194 
195     for (; pos != groupings_.end(); ++pos, ++prev) {
196       size_t next_avail = pos->start_handle() - prev->end_handle() - 1;
197       if (attr_count < next_avail)
198         break;
199     }
200 
201     if (pos == groupings_.end()) {
202       bt_log(DEBUG, "att", "attribute database is out of space!");
203       return nullptr;
204     }
205 
206     start_handle = prev->end_handle() + 1;
207   }
208 
209   auto iter =
210       groupings_.emplace(pos, group_type, start_handle, attr_count, decl_value);
211   PW_DCHECK(iter != groupings_.end());
212 
213   return &*iter;
214 }
215 
RemoveGrouping(Handle start_handle)216 bool Database::RemoveGrouping(Handle start_handle) {
217   auto iter = std::lower_bound(
218       groupings_.begin(), groupings_.end(), start_handle, StartLessThan);
219 
220   if (iter == groupings_.end() || iter->start_handle() != start_handle)
221     return false;
222 
223   groupings_.erase(iter);
224   return true;
225 }
226 
FindAttribute(Handle handle)227 const Attribute* Database::FindAttribute(Handle handle) {
228   if (handle == kInvalidHandle)
229     return nullptr;
230 
231   // Do a binary search to find the grouping that this handle is in.
232   auto iter = std::lower_bound(
233       groupings_.begin(), groupings_.end(), handle, EndLessThan);
234   if (iter == groupings_.end() || iter->start_handle() > handle)
235     return nullptr;
236 
237   if (!iter->active() || !iter->complete())
238     return nullptr;
239 
240   size_t index = handle - iter->start_handle();
241   PW_DCHECK(index < iter->attributes().size());
242 
243   return &iter->attributes()[index];
244 }
245 
ExecuteWriteQueue(PeerId peer_id,PrepareWriteQueue write_queue,const sm::SecurityProperties & security,WriteCallback callback)246 void Database::ExecuteWriteQueue(PeerId peer_id,
247                                  PrepareWriteQueue write_queue,
248                                  const sm::SecurityProperties& security,
249                                  WriteCallback callback) {
250   PW_CHECK(callback);
251 
252   // When destroyed, invokes |callback| with success if it hasn't already been
253   // called
254   auto deferred_succcess = fit::defer([client_cb = callback.share()]() mutable {
255     if (client_cb) {
256       client_cb(fit::ok());
257     }
258   });
259 
260   // Signal success without writing to any attributes if the queue is empty (see
261   // Core Spec v5.3, Vol 3, Part F, 3.4.6.3).
262   if (write_queue.empty()) {
263     return;
264   }
265 
266   // Continuation that keeps track of all outstanding write requests. This is
267   // shared between writes in the queue, causing the captured |deferred_success|
268   // to be destroyed only after all writes have completed. |callback| may be
269   // called earlier (and consumed) if any error is received.
270   fit::function<void(WriteQueueResult)> write_complete_fn =
271       [client_cb = std::move(callback),
272        d = std::move(deferred_succcess)](WriteQueueResult result) mutable {
273         if (result.is_ok()) {
274           return;
275         }
276         const auto& [handle, error] = result.error_value();
277         bt_log(DEBUG,
278                "att",
279                "execute write result - handle: %#.4x, error: %s",
280                handle,
281                bt_str(Error(error)));
282         if (!client_cb) {
283           bt_log(
284               TRACE, "att", "ignore execute write result - already responded");
285           return;
286         }
287         client_cb(result);
288       };
289 
290   while (!write_queue.empty()) {
291     auto next = std::move(write_queue.front());
292     write_queue.pop();
293 
294     auto attr_write_cb = [handle = next.handle(),
295                           write_complete_function = write_complete_fn.share()](
296                              fit::result<ErrorCode> status) {
297       if (status.is_error()) {
298         write_complete_function(
299             fit::error(std::tuple(handle, status.error_value())));
300       } else {
301         bt_log(DEBUG, "att", "execute write to handle %#.4x - success", handle);
302         write_complete_function(fit::ok());
303       }
304     };
305 
306     const auto* attr = FindAttribute(next.handle());
307     if (!attr) {
308       // The attribute is no longer valid, so we can respond with an error and
309       // abort the rest of the queue.
310       attr_write_cb(fit::error(ErrorCode::kInvalidHandle));
311       break;
312     }
313 
314     if (next.value().size() > kMaxAttributeValueLength) {
315       attr_write_cb(fit::error(ErrorCode::kInvalidAttributeValueLength));
316       break;
317     }
318 
319     fit::result<ErrorCode> status =
320         CheckWritePermissions(attr->write_reqs(), security);
321     if (status.is_error()) {
322       attr_write_cb(status);
323       break;
324     }
325 
326     // TODO(fxbug.dev/42179688): Consider removing the boolean return value in
327     // favor of always reporting errors using the callback. That would simplify
328     // the pattern here.
329     if (!attr->WriteAsync(
330             peer_id, next.offset(), next.value(), std::move(attr_write_cb))) {
331       write_complete_fn(
332           fit::error(std::tuple(next.handle(), ErrorCode::kWriteNotPermitted)));
333       break;
334     }
335   }
336 }
337 
338 }  // namespace bt::att
339