1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
17 * under the License.
18 */
19 #include <stdint.h>
20 #include <stdlib.h>
21 #include <assert.h>
22 #include <string.h>
23 #include "os/os.h"
24 #include "os/os_cputime.h"
25 #include "ble/xcvr.h"
26 #include "controller/ble_phy.h"
27 #include "controller/ble_ll.h"
28 #include "controller/ble_ll_sched.h"
29 #include "controller/ble_ll_adv.h"
30 #include "controller/ble_ll_scan.h"
31 #include "controller/ble_ll_xcvr.h"
32 #include "controller/ble_ll_trace.h"
33 #include "ble_ll_conn_priv.h"
34
35 /* XXX: this is temporary. Not sure what I want to do here */
36 struct hal_timer g_ble_ll_sched_timer;
37
38 #ifdef BLE_XCVR_RFCLK
39 /* Settling time of crystal, in ticks */
40 uint8_t g_ble_ll_sched_xtal_ticks;
41 #endif
42
43 uint8_t g_ble_ll_sched_offset_ticks;
44
45 #define BLE_LL_SCHED_ADV_WORST_CASE_USECS \
46 (BLE_LL_SCHED_MAX_ADV_PDU_USECS + BLE_LL_IFS + BLE_LL_SCHED_ADV_MAX_USECS \
47 + XCVR_TX_SCHED_DELAY_USECS)
48
49 #if (BLE_LL_SCHED_DEBUG == 1)
50 int32_t g_ble_ll_sched_max_late;
51 int32_t g_ble_ll_sched_max_early;
52 #endif
53
54 /* XXX: TODO:
55 * 1) Add some accounting to the schedule code to see how late we are
56 * (min/max?)
57 *
58 * 2) Need to determine how we really want to handle the case when we execute
59 * a schedule item but there is a current event. We could:
60 * -> Reschedule the schedule item and let current event finish
61 * -> Kill the current event and run the scheduled item.
62 * -> Disable schedule timer while in an event; could cause us to be late.
63 * -> Wait for current event to finish hoping it does before schedule item.
64 */
65
66 /* Queue for timers */
67 TAILQ_HEAD(ll_sched_qhead, ble_ll_sched_item) g_ble_ll_sched_q;
68
69 #if MYNEWT_VAL(BLE_LL_STRICT_CONN_SCHEDULING)
70 struct ble_ll_sched_obj g_ble_ll_sched_data;
71 #endif
72
73 /**
74 * Checks if two events in the schedule will overlap in time. NOTE: consecutive
75 * schedule items can end and start at the same time.
76 *
77 * @param s1
78 * @param s2
79 *
80 * @return int 0: dont overlap 1:overlap
81 */
82 static int
ble_ll_sched_is_overlap(struct ble_ll_sched_item * s1,struct ble_ll_sched_item * s2)83 ble_ll_sched_is_overlap(struct ble_ll_sched_item *s1,
84 struct ble_ll_sched_item *s2)
85 {
86 int rc;
87
88 rc = 1;
89 if ((int32_t)(s1->start_time - s2->start_time) < 0) {
90 /* Make sure this event does not overlap current event */
91 if ((int32_t)(s1->end_time - s2->start_time) <= 0) {
92 rc = 0;
93 }
94 } else {
95 /* Check for overlap */
96 if ((int32_t)(s1->start_time - s2->end_time) >= 0) {
97 rc = 0;
98 }
99 }
100
101 return rc;
102 }
103
104 /*
105 * Determines if the schedule item overlaps the currently running schedule
106 * item. We only care about connection schedule items
107 */
108 int
ble_ll_sched_overlaps_current(struct ble_ll_sched_item * sch)109 ble_ll_sched_overlaps_current(struct ble_ll_sched_item *sch)
110 {
111 int rc;
112 uint32_t ce_end_time;
113
114 rc = 0;
115 if (ble_ll_state_get() == BLE_LL_STATE_CONNECTION) {
116 ce_end_time = ble_ll_conn_get_ce_end_time();
117 if ((int32_t)(ce_end_time - sch->start_time) > 0) {
118 rc = 1;
119 }
120 }
121 return rc;
122 }
123
124 static int
ble_ll_sched_conn_overlap(struct ble_ll_sched_item * entry)125 ble_ll_sched_conn_overlap(struct ble_ll_sched_item *entry)
126 {
127 int rc;
128 struct ble_ll_conn_sm *connsm;
129
130 /* Should only be advertising or a connection here */
131 if (entry->sched_type == BLE_LL_SCHED_TYPE_CONN) {
132 connsm = (struct ble_ll_conn_sm *)entry->cb_arg;
133 entry->enqueued = 0;
134 TAILQ_REMOVE(&g_ble_ll_sched_q, entry, link);
135 ble_ll_event_send(&connsm->conn_ev_end);
136 rc = 0;
137 } else {
138 rc = -1;
139 }
140
141 return rc;
142 }
143
144 struct ble_ll_sched_item *
ble_ll_sched_insert_if_empty(struct ble_ll_sched_item * sch)145 ble_ll_sched_insert_if_empty(struct ble_ll_sched_item *sch)
146 {
147 struct ble_ll_sched_item *entry;
148
149 entry = TAILQ_FIRST(&g_ble_ll_sched_q);
150 if (!entry) {
151 TAILQ_INSERT_HEAD(&g_ble_ll_sched_q, sch, link);
152 sch->enqueued = 1;
153 }
154 return entry;
155 }
156
157 int
ble_ll_sched_conn_reschedule(struct ble_ll_conn_sm * connsm)158 ble_ll_sched_conn_reschedule(struct ble_ll_conn_sm *connsm)
159 {
160 int rc;
161 os_sr_t sr;
162 uint32_t usecs;
163 struct ble_ll_sched_item *sch;
164 struct ble_ll_sched_item *start_overlap;
165 struct ble_ll_sched_item *end_overlap;
166 struct ble_ll_sched_item *entry;
167 struct ble_ll_conn_sm *tmp;
168
169 /* Get schedule element from connection */
170 sch = &connsm->conn_sch;
171
172 /* Set schedule start and end times */
173 sch->start_time = connsm->anchor_point - g_ble_ll_sched_offset_ticks;
174 if (connsm->conn_role == BLE_LL_CONN_ROLE_SLAVE) {
175 usecs = connsm->slave_cur_window_widening;
176 sch->start_time -= (os_cputime_usecs_to_ticks(usecs) + 1);
177 sch->remainder = 0;
178 } else {
179 sch->remainder = connsm->anchor_point_usecs;
180 }
181 sch->end_time = connsm->ce_end_time;
182
183 /* Better be past current time or we just leave */
184 if ((int32_t)(sch->start_time - os_cputime_get32()) < 0) {
185 return -1;
186 }
187
188 /* We have to find a place for this schedule */
189 OS_ENTER_CRITICAL(sr);
190
191 if (ble_ll_sched_overlaps_current(sch)) {
192 OS_EXIT_CRITICAL(sr);
193 return -1;
194 }
195
196 /* Stop timer since we will add an element */
197 os_cputime_timer_stop(&g_ble_ll_sched_timer);
198
199 start_overlap = NULL;
200 end_overlap = NULL;
201 rc = 0;
202 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
203 if (ble_ll_sched_is_overlap(sch, entry)) {
204 if (entry->sched_type == BLE_LL_SCHED_TYPE_AUX_SCAN) {
205 /* Do nothing, we start_mark overlap below */
206 } else if (!ble_ll_conn_is_lru((struct ble_ll_conn_sm *)sch->cb_arg,
207 (struct ble_ll_conn_sm *)entry->cb_arg)) {
208 /* Only insert if this element is older than all that we
209 * overlap
210 */
211 start_overlap = NULL;
212 rc = -1;
213 break;
214 }
215
216 if (start_overlap == NULL) {
217 start_overlap = entry;
218 end_overlap = entry;
219 } else {
220 end_overlap = entry;
221 }
222 } else {
223 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
224 rc = 0;
225 TAILQ_INSERT_BEFORE(entry, sch, link);
226 break;
227 }
228 }
229 }
230
231 if (!rc) {
232 if (!entry) {
233 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
234 }
235 sch->enqueued = 1;
236 }
237
238 /* Remove first to last scheduled elements */
239 entry = start_overlap;
240 while (entry) {
241 start_overlap = TAILQ_NEXT(entry,link);
242 switch (entry->sched_type) {
243 case BLE_LL_SCHED_TYPE_CONN:
244 tmp = (struct ble_ll_conn_sm *)entry->cb_arg;
245 ble_ll_event_send(&tmp->conn_ev_end);
246 break;
247 case BLE_LL_SCHED_TYPE_ADV:
248 ble_ll_adv_event_rmvd_from_sched((struct ble_ll_adv_sm *)
249 entry->cb_arg);
250 break;
251 #if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
252 case BLE_LL_SCHED_TYPE_AUX_SCAN:
253 ble_ll_scan_end_adv_evt((struct ble_ll_aux_data *)
254 entry->cb_arg);
255
256 break;
257 #endif
258 default:
259 BLE_LL_ASSERT(0);
260 break;
261 }
262
263 TAILQ_REMOVE(&g_ble_ll_sched_q, entry, link);
264 entry->enqueued = 0;
265
266 if (entry == end_overlap) {
267 break;
268 }
269 entry = start_overlap;
270 }
271
272 #ifdef BLE_XCVR_RFCLK
273 entry = TAILQ_FIRST(&g_ble_ll_sched_q);
274 if (entry == sch) {
275 ble_ll_xcvr_rfclk_timer_start(sch->start_time);
276 } else {
277 sch = entry;
278 }
279 #else
280 /* Get first on list */
281 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
282 #endif
283
284 OS_EXIT_CRITICAL(sr);
285
286 /* Restart timer */
287 BLE_LL_ASSERT(sch != NULL);
288 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
289
290 return rc;
291 }
292
293 /**
294 * Called to schedule a connection when the current role is master.
295 *
296 * Context: Interrupt
297 *
298 * @param connsm
299 * @param ble_hdr
300 * @param pyld_len
301 *
302 * @return int
303 */
304 #if MYNEWT_VAL(BLE_LL_STRICT_CONN_SCHEDULING)
305 int
ble_ll_sched_master_new(struct ble_ll_conn_sm * connsm,struct ble_mbuf_hdr * ble_hdr,uint8_t pyld_len)306 ble_ll_sched_master_new(struct ble_ll_conn_sm *connsm,
307 struct ble_mbuf_hdr *ble_hdr, uint8_t pyld_len)
308 {
309 int rc;
310 os_sr_t sr;
311 uint32_t initial_start;
312 uint32_t earliest_start;
313 uint32_t earliest_end;
314 uint32_t dur;
315 uint32_t itvl_t;
316 uint32_t adv_rxend;
317 int i;
318 uint32_t tpp;
319 uint32_t tse;
320 uint32_t np;
321 uint32_t cp;
322 uint32_t tick_in_period;
323
324 struct ble_ll_sched_item *entry;
325 struct ble_ll_sched_item *sch;
326
327 /* Better have a connsm */
328 BLE_LL_ASSERT(connsm != NULL);
329
330 /* Get schedule element from connection */
331 rc = -1;
332 sch = &connsm->conn_sch;
333
334 /* XXX:
335 * The calculations for the 32kHz crystal bear alot of explanation. The
336 * earliest possible time that the master can start the connection with a
337 * slave is 1.25 msecs from the end of the connection request. The
338 * connection request is sent an IFS time from the end of the advertising
339 * packet that was received plus the time it takes to send the connection
340 * request. At 1 Mbps, this is 1752 usecs, or 57.41 ticks. Using 57 ticks
341 * makes us off ~13 usecs. Since we dont want to actually calculate the
342 * receive end time tick (this would take too long), we assume the end of
343 * the advertising PDU is 'now' (we call os_cputime_get32). We dont know
344 * how much time it will take to service the ISR but if we are more than the
345 * rx to tx time of the chip we will not be successful transmitting the
346 * connect request. All this means is that we presume that the slave will
347 * receive the connect request later than we expect but no earlier than
348 * 13 usecs before (this is important).
349 *
350 * The code then attempts to schedule the connection at the
351 * earliest time although this may not be possible. When the actual
352 * schedule start time is determined, the master has to determine if this
353 * time is more than a transmit window offset interval (1.25 msecs). The
354 * master has to tell the slave how many transmit window offsets there are
355 * from the earliest possible time to when the actual transmit start will
356 * occur. Later in this function you will see the calculation. The actual
357 * transmission start has to occur within the transmit window. The transmit
358 * window interval is in units of 1.25 msecs and has to be at least 1. To
359 * make things a bit easier (but less power efficient for the slave), we
360 * use a transmit window of 2. We do this because we dont quite know the
361 * exact start of the transmission and if we are too early or too late we
362 * could miss the transmit window. A final note: the actual transmission
363 * start (the anchor point) is sched offset ticks from the schedule start
364 * time. We dont add this to the calculation when calculating the window
365 * offset. The reason we dont do this is we want to insure we transmit
366 * after the window offset we tell the slave. For example, say we think
367 * we are transmitting 1253 usecs from the earliest start. This would cause
368 * us to send a transmit window offset of 1. Since we are actually
369 * transmitting earlier than the slave thinks we could end up transmitting
370 * before the window offset. Transmitting later is fine since we have the
371 * transmit window to do so. Transmitting before is bad, since the slave
372 * wont be listening. We could do better calculation if we wanted to use
373 * a transmit window of 1 as opposed to 2, but for now we dont care.
374 */
375 dur = os_cputime_usecs_to_ticks(g_ble_ll_sched_data.sch_ticks_per_period);
376 adv_rxend = os_cputime_get32();
377 if (ble_hdr->rxinfo.channel >= BLE_PHY_NUM_DATA_CHANS) {
378 /*
379 * We received packet on advertising channel which means this is a legacy
380 * PDU on 1 Mbps - we do as described above.
381 */
382 earliest_start = adv_rxend + 57;
383 } else {
384 /*
385 * The calculations are similar as above.
386 *
387 * We received packet on data channel which means this is AUX_ADV_IND
388 * received on secondary adv channel. We can schedule first packet at
389 * the earliest after "T_IFS + AUX_CONNECT_REQ + transmitWindowDelay".
390 * AUX_CONNECT_REQ and transmitWindowDelay times vary depending on which
391 * PHY we received on.
392 *
393 */
394 if (ble_hdr->rxinfo.phy == BLE_PHY_1M) {
395 // 150 + 352 + 2500 = 3002us = 98.37 ticks
396 earliest_start = adv_rxend + 98;
397 } else if (ble_hdr->rxinfo.phy == BLE_PHY_2M) {
398 // 150 + 180 + 2500 = 2830us = 92.73 ticks
399 earliest_start = adv_rxend + 93;
400 } else if (ble_hdr->rxinfo.phy == BLE_PHY_CODED) {
401 // 150 + 2896 + 3750 = 6796us = 222.69 ticks
402 earliest_start = adv_rxend + 223;
403 } else {
404 BLE_LL_ASSERT(0);
405 }
406 }
407 earliest_start += MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET) *
408 BLE_LL_SCHED_32KHZ_TICKS_PER_SLOT;
409 itvl_t = connsm->conn_itvl_ticks;
410
411 /* We have to find a place for this schedule */
412 OS_ENTER_CRITICAL(sr);
413
414 /*
415 * Are there any allocated periods? If not, set epoch start to earliest
416 * time
417 */
418 if (g_ble_ll_sched_data.sch_num_occ_periods == 0) {
419 g_ble_ll_sched_data.sch_epoch_start = earliest_start;
420 cp = 0;
421 } else {
422 /*
423 * Earliest start must occur on period boundary.
424 * (tse = ticks since epoch)
425 */
426 tpp = g_ble_ll_sched_data.sch_ticks_per_period;
427 tse = earliest_start - g_ble_ll_sched_data.sch_epoch_start;
428 np = tse / tpp;
429 cp = np % BLE_LL_SCHED_PERIODS;
430 tick_in_period = tse - (np * tpp);
431 if (tick_in_period != 0) {
432 ++cp;
433 if (cp == BLE_LL_SCHED_PERIODS) {
434 cp = 0;
435 }
436 earliest_start += (tpp - tick_in_period);
437 }
438
439 /* Now find first un-occupied period starting from cp */
440 for (i = 0; i < BLE_LL_SCHED_PERIODS; ++i) {
441 if (g_ble_ll_sched_data.sch_occ_period_mask & (1 << cp)) {
442 ++cp;
443 if (cp == BLE_LL_SCHED_PERIODS) {
444 cp = 0;
445 }
446 earliest_start += tpp;
447 } else {
448 /* not occupied */
449 break;
450 }
451 }
452 /* Should never happen but if it does... */
453 if (i == BLE_LL_SCHED_PERIODS) {
454 OS_EXIT_CRITICAL(sr);
455 return rc;
456 }
457 }
458
459 sch->start_time = earliest_start;
460 initial_start = earliest_start;
461 earliest_end = earliest_start + dur;
462
463 if (!ble_ll_sched_insert_if_empty(sch)) {
464 /* Nothing in schedule. Schedule as soon as possible */
465 rc = 0;
466 connsm->tx_win_off = MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET);
467 } else {
468 os_cputime_timer_stop(&g_ble_ll_sched_timer);
469 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
470 /* Set these because overlap function needs them to be set */
471 sch->start_time = earliest_start;
472 sch->end_time = earliest_end;
473
474 /* We can insert if before entry in list */
475 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
476 if ((earliest_start - initial_start) <= itvl_t) {
477 rc = 0;
478 TAILQ_INSERT_BEFORE(entry, sch, link);
479 }
480 break;
481 }
482
483 /* Check for overlapping events */
484 if (ble_ll_sched_is_overlap(sch, entry)) {
485 /* Earliest start is end of this event since we overlap */
486 earliest_start = entry->end_time;
487 earliest_end = earliest_start + dur;
488 }
489 }
490
491 /* Must be able to schedule within one connection interval */
492 if (!entry) {
493 if ((earliest_start - initial_start) <= itvl_t) {
494 rc = 0;
495 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
496 }
497 }
498
499 if (!rc) {
500 /* calculate number of window offsets. Each offset is 1.25 ms */
501 sch->enqueued = 1;
502 /*
503 * NOTE: we dont add sched offset ticks as we want to under-estimate
504 * the transmit window slightly since the window size is currently
505 * 2 when using a 32768 crystal.
506 */
507 dur = os_cputime_ticks_to_usecs(earliest_start - initial_start);
508 connsm->tx_win_off = dur / BLE_LL_CONN_TX_OFF_USECS;
509 }
510 }
511
512 if (!rc) {
513 sch->start_time = earliest_start;
514 sch->end_time = earliest_end;
515 /*
516 * Since we have the transmit window to transmit in, we dont need
517 * to set the anchor point usecs; just transmit to the nearest tick.
518 */
519 connsm->anchor_point = earliest_start + g_ble_ll_sched_offset_ticks;
520 connsm->anchor_point_usecs = 0;
521 connsm->ce_end_time = earliest_end;
522 connsm->period_occ_mask = (1 << cp);
523 g_ble_ll_sched_data.sch_occ_period_mask |= connsm->period_occ_mask;
524 ++g_ble_ll_sched_data.sch_num_occ_periods;
525 }
526
527
528 /* Get head of list to restart timer */
529 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
530
531 OS_EXIT_CRITICAL(sr);
532
533 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
534
535 return rc;
536 }
537 #else
538 int
ble_ll_sched_master_new(struct ble_ll_conn_sm * connsm,struct ble_mbuf_hdr * ble_hdr,uint8_t pyld_len)539 ble_ll_sched_master_new(struct ble_ll_conn_sm *connsm,
540 struct ble_mbuf_hdr *ble_hdr, uint8_t pyld_len)
541 {
542 int rc;
543 os_sr_t sr;
544 uint8_t req_slots;
545 uint32_t initial_start;
546 uint32_t earliest_start;
547 uint32_t earliest_end;
548 uint32_t dur;
549 uint32_t itvl_t;
550 uint32_t adv_rxend;
551 struct ble_ll_sched_item *entry;
552 struct ble_ll_sched_item *sch;
553
554 /*
555 * XXX: TODO this code assumes the advertisement and connect request were
556 * sent at 1Mbps.
557 */
558
559 /* Get schedule element from connection */
560 rc = -1;
561 sch = &connsm->conn_sch;
562 req_slots = MYNEWT_VAL(BLE_LL_CONN_INIT_SLOTS);
563
564 /* XXX:
565 * The calculations for the 32kHz crystal bear alot of explanation. The
566 * earliest possible time that the master can start the connection with a
567 * slave is 1.25 msecs from the end of the connection request. The
568 * connection request is sent an IFS time from the end of the advertising
569 * packet that was received plus the time it takes to send the connection
570 * request. At 1 Mbps, this is 1752 usecs, or 57.41 ticks. Using 57 ticks
571 * makes us off ~13 usecs. Since we dont want to actually calculate the
572 * receive end time tick (this would take too long), we assume the end of
573 * the advertising PDU is 'now' (we call os_cputime_get32). We dont know
574 * how much time it will take to service the ISR but if we are more than the
575 * rx to tx time of the chip we will not be successful transmitting the
576 * connect request. All this means is that we presume that the slave will
577 * receive the connect request later than we expect but no earlier than
578 * 13 usecs before (this is important).
579 *
580 * The code then attempts to schedule the connection at the
581 * earliest time although this may not be possible. When the actual
582 * schedule start time is determined, the master has to determine if this
583 * time is more than a transmit window offset interval (1.25 msecs). The
584 * master has to tell the slave how many transmit window offsets there are
585 * from the earliest possible time to when the actual transmit start will
586 * occur. Later in this function you will see the calculation. The actual
587 * transmission start has to occur within the transmit window. The transmit
588 * window interval is in units of 1.25 msecs and has to be at least 1. To
589 * make things a bit easier (but less power efficient for the slave), we
590 * use a transmit window of 2. We do this because we dont quite know the
591 * exact start of the transmission and if we are too early or too late we
592 * could miss the transmit window. A final note: the actual transmission
593 * start (the anchor point) is sched offset ticks from the schedule start
594 * time. We dont add this to the calculation when calculating the window
595 * offset. The reason we dont do this is we want to insure we transmit
596 * after the window offset we tell the slave. For example, say we think
597 * we are transmitting 1253 usecs from the earliest start. This would cause
598 * us to send a transmit window offset of 1. Since we are actually
599 * transmitting earlier than the slave thinks we could end up transmitting
600 * before the window offset. Transmitting later is fine since we have the
601 * transmit window to do so. Transmitting before is bad, since the slave
602 * wont be listening. We could do better calculation if we wanted to use
603 * a transmit window of 1 as opposed to 2, but for now we dont care.
604 */
605 dur = req_slots * BLE_LL_SCHED_32KHZ_TICKS_PER_SLOT;
606 adv_rxend = os_cputime_get32();
607 if (ble_hdr->rxinfo.channel >= BLE_PHY_NUM_DATA_CHANS) {
608 /*
609 * We received packet on advertising channel which means this is a legacy
610 * PDU on 1 Mbps - we do as described above.
611 */
612 earliest_start = adv_rxend + 57;
613 } else {
614 /*
615 * The calculations are similar as above.
616 *
617 * We received packet on data channel which means this is AUX_ADV_IND
618 * received on secondary adv channel. We can schedule first packet at
619 * the earliest after "T_IFS + AUX_CONNECT_REQ + transmitWindowDelay".
620 * AUX_CONNECT_REQ and transmitWindowDelay times vary depending on which
621 * PHY we received on.
622 *
623 */
624 if (ble_hdr->rxinfo.phy == BLE_PHY_1M) {
625 // 150 + 352 + 2500 = 3002us = 98.37 ticks
626 earliest_start = adv_rxend + 98;
627 } else if (ble_hdr->rxinfo.phy == BLE_PHY_2M) {
628 // 150 + 180 + 2500 = 2830us = 92.73 ticks
629 earliest_start = adv_rxend + 93;
630 } else if (ble_hdr->rxinfo.phy == BLE_PHY_CODED) {
631 // 150 + 2896 + 3750 = 6796us = 222.69 ticks
632 earliest_start = adv_rxend + 223;
633 } else {
634 BLE_LL_ASSERT(0);
635 }
636 }
637 earliest_start += MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET) *
638 BLE_LL_SCHED_32KHZ_TICKS_PER_SLOT;
639 earliest_end = earliest_start + dur;
640 itvl_t = connsm->conn_itvl_ticks;
641
642 /* We have to find a place for this schedule */
643 OS_ENTER_CRITICAL(sr);
644
645 /* The schedule item must occur after current running item (if any) */
646 sch->start_time = earliest_start;
647 initial_start = earliest_start;
648
649 if (!ble_ll_sched_insert_if_empty(sch)) {
650 /* Nothing in schedule. Schedule as soon as possible */
651 rc = 0;
652 connsm->tx_win_off = MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET);
653 } else {
654 os_cputime_timer_stop(&g_ble_ll_sched_timer);
655 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
656 /* Set these because overlap function needs them to be set */
657 sch->start_time = earliest_start;
658 sch->end_time = earliest_end;
659
660 /* We can insert if before entry in list */
661 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
662 if ((earliest_start - initial_start) <= itvl_t) {
663 rc = 0;
664 TAILQ_INSERT_BEFORE(entry, sch, link);
665 }
666 break;
667 }
668
669 /* Check for overlapping events */
670 if (ble_ll_sched_is_overlap(sch, entry)) {
671 /* Earliest start is end of this event since we overlap */
672 earliest_start = entry->end_time;
673 earliest_end = earliest_start + dur;
674 }
675 }
676
677 /* Must be able to schedule within one connection interval */
678 if (!entry) {
679 if ((earliest_start - initial_start) <= itvl_t) {
680 rc = 0;
681 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
682 }
683 }
684
685 if (!rc) {
686 /* calculate number of window offsets. Each offset is 1.25 ms */
687 sch->enqueued = 1;
688 /*
689 * NOTE: we dont add sched offset ticks as we want to under-estimate
690 * the transmit window slightly since the window size is currently
691 * 2 when using a 32768 crystal.
692 */
693 dur = os_cputime_ticks_to_usecs(earliest_start - initial_start);
694 connsm->tx_win_off = dur / BLE_LL_CONN_TX_OFF_USECS;
695 }
696 }
697
698 if (!rc) {
699 sch->start_time = earliest_start;
700 sch->end_time = earliest_end;
701 /*
702 * Since we have the transmit window to transmit in, we dont need
703 * to set the anchor point usecs; just transmit to the nearest tick.
704 */
705 connsm->anchor_point = earliest_start + g_ble_ll_sched_offset_ticks;
706 connsm->anchor_point_usecs = 0;
707 connsm->ce_end_time = earliest_end;
708 }
709
710 /* Get head of list to restart timer */
711 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
712
713 OS_EXIT_CRITICAL(sr);
714
715 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
716
717 return rc;
718 }
719 #endif
720
721 /**
722 * Schedules a slave connection for the first time.
723 *
724 * Context: Link Layer
725 *
726 * @param connsm
727 *
728 * @return int
729 */
730 int
ble_ll_sched_slave_new(struct ble_ll_conn_sm * connsm)731 ble_ll_sched_slave_new(struct ble_ll_conn_sm *connsm)
732 {
733 int rc;
734 os_sr_t sr;
735 struct ble_ll_sched_item *entry;
736 struct ble_ll_sched_item *next_sch;
737 struct ble_ll_sched_item *sch;
738
739 #ifdef BLE_XCVR_RFCLK
740 int first;
741 first = 0;
742 #endif
743
744 /* Get schedule element from connection */
745 rc = -1;
746 sch = &connsm->conn_sch;
747
748 /* Set schedule start and end times */
749 /*
750 * XXX: for now, we dont care about anchor point usecs for the slave. It
751 * does not matter if we turn on the receiver up to one tick before w
752 * need to. We also subtract one extra tick since the conversion from
753 * usecs to ticks could be off by up to 1 tick.
754 */
755 sch->start_time = connsm->anchor_point - g_ble_ll_sched_offset_ticks -
756 os_cputime_usecs_to_ticks(connsm->slave_cur_window_widening) - 1;
757 sch->end_time = connsm->ce_end_time;
758 sch->remainder = 0;
759
760 /* We have to find a place for this schedule */
761 OS_ENTER_CRITICAL(sr);
762
763 /* The schedule item must occur after current running item (if any) */
764 if (ble_ll_sched_overlaps_current(sch)) {
765 OS_EXIT_CRITICAL(sr);
766 return rc;
767 }
768
769 entry = ble_ll_sched_insert_if_empty(sch);
770 if (!entry) {
771 /* Nothing in schedule. Schedule as soon as possible */
772 rc = 0;
773 #ifdef BLE_XCVR_RFCLK
774 first = 1;
775 #endif
776 } else {
777 os_cputime_timer_stop(&g_ble_ll_sched_timer);
778 while (1) {
779 next_sch = entry->link.tqe_next;
780 /* Insert if event ends before next starts */
781 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
782 rc = 0;
783 TAILQ_INSERT_BEFORE(entry, sch, link);
784 break;
785 }
786
787 if (ble_ll_sched_is_overlap(sch, entry)) {
788 /* If we overlap with a connection, we re-schedule */
789 if (ble_ll_sched_conn_overlap(entry)) {
790 break;
791 }
792 }
793
794 /* Move to next entry */
795 entry = next_sch;
796
797 /* Insert at tail if none left to check */
798 if (!entry) {
799 rc = 0;
800 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
801 break;
802 }
803 }
804
805 if (!rc) {
806 sch->enqueued = 1;
807 }
808 #ifdef BLE_XCVR_RFCLK
809 next_sch = TAILQ_FIRST(&g_ble_ll_sched_q);
810 if (next_sch == sch) {
811 first = 1;
812 } else {
813 sch = next_sch;
814 }
815 #else
816 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
817 #endif
818 }
819
820 #ifdef BLE_XCVR_RFCLK
821 if (first) {
822 ble_ll_xcvr_rfclk_timer_start(sch->start_time);
823 }
824 #endif
825
826 OS_EXIT_CRITICAL(sr);
827
828 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
829
830 return rc;
831 }
832
833 int
ble_ll_sched_adv_new(struct ble_ll_sched_item * sch,ble_ll_sched_adv_new_cb cb,void * arg)834 ble_ll_sched_adv_new(struct ble_ll_sched_item *sch, ble_ll_sched_adv_new_cb cb,
835 void *arg)
836 {
837 int rc;
838 os_sr_t sr;
839 uint32_t adv_start;
840 uint32_t duration;
841 struct ble_ll_sched_item *entry;
842 struct ble_ll_sched_item *orig;
843
844 /* Get length of schedule item */
845 duration = sch->end_time - sch->start_time;
846 orig = sch;
847
848 OS_ENTER_CRITICAL(sr);
849 entry = ble_ll_sched_insert_if_empty(sch);
850 if (!entry) {
851 rc = 0;
852 adv_start = sch->start_time;
853 } else {
854 /* XXX: no need to stop timer if not first on list. Modify code? */
855 os_cputime_timer_stop(&g_ble_ll_sched_timer);
856 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
857 /* We can insert if before entry in list */
858 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
859 rc = 0;
860 TAILQ_INSERT_BEFORE(entry, sch, link);
861 break;
862 }
863
864 /* Check for overlapping events */
865 if (ble_ll_sched_is_overlap(sch, entry)) {
866 /* Earliest start is end of this event since we overlap */
867 sch->start_time = entry->end_time;
868 sch->end_time = sch->start_time + duration;
869 }
870 }
871
872 if (!entry) {
873 rc = 0;
874 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
875 }
876 adv_start = sch->start_time;
877
878 if (!rc) {
879 sch->enqueued = 1;
880 }
881
882 /* Restart with head of list */
883 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
884 }
885
886 if (cb) {
887 cb((struct ble_ll_adv_sm *)orig->cb_arg, adv_start, arg);
888 }
889
890 #ifdef BLE_XCVR_RFCLK
891 if (orig == sch) {
892 ble_ll_xcvr_rfclk_timer_start(sch->start_time);
893 }
894 #endif
895
896 OS_EXIT_CRITICAL(sr);
897
898 /* Restart timer */
899 BLE_LL_ASSERT(sch != NULL);
900 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
901
902 return rc;
903 }
904
905 int
ble_ll_sched_adv_reschedule(struct ble_ll_sched_item * sch,uint32_t * start,uint32_t max_delay_ticks)906 ble_ll_sched_adv_reschedule(struct ble_ll_sched_item *sch, uint32_t *start,
907 uint32_t max_delay_ticks)
908 {
909 int rc;
910 os_sr_t sr;
911 uint32_t orig_start;
912 uint32_t duration;
913 uint32_t rand_ticks;
914 struct ble_ll_sched_item *entry;
915 struct ble_ll_sched_item *next_sch;
916 struct ble_ll_sched_item *before;
917 struct ble_ll_sched_item *start_overlap;
918 struct ble_ll_sched_item *end_overlap;
919
920 /* Get length of schedule item */
921 duration = sch->end_time - sch->start_time;
922
923 /* Add maximum randomization delay to end */
924 rand_ticks = max_delay_ticks;
925 sch->end_time += max_delay_ticks;
926
927 start_overlap = NULL;
928 end_overlap = NULL;
929 before = NULL;
930 rc = 0;
931 OS_ENTER_CRITICAL(sr);
932
933 entry = ble_ll_sched_insert_if_empty(sch);
934 if (entry) {
935 os_cputime_timer_stop(&g_ble_ll_sched_timer);
936 while (1) {
937 next_sch = entry->link.tqe_next;
938 if (ble_ll_sched_is_overlap(sch, entry)) {
939 if (start_overlap == NULL) {
940 start_overlap = entry;
941 end_overlap = entry;
942 } else {
943 end_overlap = entry;
944 }
945 } else {
946 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
947 before = entry;
948 break;
949 }
950 }
951
952 entry = next_sch;
953 if (entry == NULL) {
954 break;
955 }
956 }
957
958 /*
959 * If there is no overlap, we either insert before the 'before' entry
960 * or we insert at the end if there is no before entry.
961 */
962 if (start_overlap == NULL) {
963 if (before) {
964 TAILQ_INSERT_BEFORE(before, sch, link);
965 } else {
966 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
967 }
968 } else {
969 /*
970 * This item will overlap with others. See if we can fit it in
971 * with original duration.
972 */
973 before = NULL;
974 orig_start = sch->start_time;
975 entry = start_overlap;
976 sch->end_time = sch->start_time + duration;
977 while (1) {
978 next_sch = entry->link.tqe_next;
979 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
980 rand_ticks = entry->start_time - sch->end_time;
981 before = entry;
982 TAILQ_INSERT_BEFORE(before, sch, link);
983 break;
984 } else {
985 sch->start_time = entry->end_time;
986 sch->end_time = sch->start_time + duration;
987 }
988
989 if (entry == end_overlap) {
990 rand_ticks = (orig_start + max_delay_ticks) - sch->start_time;
991 if (rand_ticks > max_delay_ticks) {
992 /* No place for advertisement. */
993 rc = -1;
994 } else {
995 if (next_sch == NULL) {
996 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
997 } else {
998 TAILQ_INSERT_BEFORE(next_sch, sch, link);
999 }
1000 }
1001 break;
1002 }
1003 entry = next_sch;
1004 BLE_LL_ASSERT(entry != NULL);
1005 }
1006 }
1007 }
1008
1009 if (!rc) {
1010 sch->enqueued = 1;
1011 if (rand_ticks) {
1012 sch->start_time += rand() % rand_ticks;
1013 }
1014 sch->end_time = sch->start_time + duration;
1015 *start = sch->start_time;
1016
1017 #ifdef BLE_XCVR_RFCLK
1018 if (sch == TAILQ_FIRST(&g_ble_ll_sched_q)) {
1019 ble_ll_xcvr_rfclk_timer_start(sch->start_time);
1020 }
1021 #endif
1022 }
1023
1024 OS_EXIT_CRITICAL(sr);
1025
1026 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1027 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1028
1029 return rc;
1030 }
1031
1032 int
ble_ll_sched_adv_resched_pdu(struct ble_ll_sched_item * sch)1033 ble_ll_sched_adv_resched_pdu(struct ble_ll_sched_item *sch)
1034 {
1035 uint8_t lls;
1036 os_sr_t sr;
1037 struct ble_ll_sched_item *entry;
1038
1039 OS_ENTER_CRITICAL(sr);
1040
1041 lls = ble_ll_state_get();
1042 if ((lls == BLE_LL_STATE_ADV) || (lls == BLE_LL_STATE_CONNECTION)) {
1043 goto adv_resched_pdu_fail;
1044 }
1045
1046 entry = ble_ll_sched_insert_if_empty(sch);
1047 if (entry) {
1048 /* If we overlap with the first item, simply re-schedule */
1049 if (ble_ll_sched_is_overlap(sch, entry)) {
1050 goto adv_resched_pdu_fail;
1051 }
1052 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1053 TAILQ_INSERT_BEFORE(entry, sch, link);
1054 }
1055
1056 OS_EXIT_CRITICAL(sr);
1057 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1058 return 0;
1059
1060 adv_resched_pdu_fail:
1061 OS_EXIT_CRITICAL(sr);
1062 return -1;
1063 }
1064
1065 /**
1066 * Remove a schedule element
1067 *
1068 * @param sched_type
1069 *
1070 * @return int 0 - removed, 1 - not in the list
1071 */
1072 int
ble_ll_sched_rmv_elem(struct ble_ll_sched_item * sch)1073 ble_ll_sched_rmv_elem(struct ble_ll_sched_item *sch)
1074 {
1075 os_sr_t sr;
1076 struct ble_ll_sched_item *first;
1077 int rc = 1;
1078
1079 if (!sch) {
1080 return rc;
1081 }
1082
1083 OS_ENTER_CRITICAL(sr);
1084 if (sch->enqueued) {
1085 first = TAILQ_FIRST(&g_ble_ll_sched_q);
1086 if (first == sch) {
1087 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1088 }
1089
1090 TAILQ_REMOVE(&g_ble_ll_sched_q, sch, link);
1091 sch->enqueued = 0;
1092 rc = 0;
1093
1094 if (first == sch) {
1095 first = TAILQ_FIRST(&g_ble_ll_sched_q);
1096 if (first) {
1097 os_cputime_timer_start(&g_ble_ll_sched_timer, first->start_time);
1098 }
1099 }
1100 }
1101 OS_EXIT_CRITICAL(sr);
1102
1103 return rc;
1104 }
1105
1106 void
ble_ll_sched_rmv_elem_type(uint8_t type,sched_remove_cb_func remove_cb)1107 ble_ll_sched_rmv_elem_type(uint8_t type, sched_remove_cb_func remove_cb)
1108 {
1109 os_sr_t sr;
1110 struct ble_ll_sched_item *entry;
1111 struct ble_ll_sched_item *first;
1112
1113 OS_ENTER_CRITICAL(sr);
1114 first = TAILQ_FIRST(&g_ble_ll_sched_q);
1115
1116 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
1117 if (entry->sched_type == type) {
1118 if (first == entry) {
1119 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1120 first = NULL;
1121 }
1122
1123 TAILQ_REMOVE(&g_ble_ll_sched_q, entry, link);
1124 remove_cb(entry);
1125 entry->enqueued = 0;
1126 }
1127 }
1128
1129 if (!first) {
1130 first = TAILQ_FIRST(&g_ble_ll_sched_q);
1131 os_cputime_timer_start(&g_ble_ll_sched_timer, first->start_time);
1132 }
1133
1134 OS_EXIT_CRITICAL(sr);
1135 }
1136
1137 /**
1138 * Executes a schedule item by calling the schedule callback function.
1139 *
1140 * Context: Interrupt
1141 *
1142 * @param sch Pointer to schedule item
1143 *
1144 * @return int 0: schedule item is not over; otherwise schedule item is done.
1145 */
1146 static int
ble_ll_sched_execute_item(struct ble_ll_sched_item * sch)1147 ble_ll_sched_execute_item(struct ble_ll_sched_item *sch)
1148 {
1149 int rc;
1150 uint8_t lls;
1151
1152 lls = ble_ll_state_get();
1153
1154 ble_ll_trace_u32x3(BLE_LL_TRACE_ID_SCHED, lls, os_cputime_get32(),
1155 sch->start_time);
1156
1157 if (lls == BLE_LL_STATE_STANDBY) {
1158 goto sched;
1159 }
1160
1161 /* If aux scan scheduled and LL is in state when scanner is running
1162 * in 3 states:
1163 * BLE_LL_STATE_SCANNING
1164 * BLE_LL_STATE_INITIATING
1165 * BLE_LL_STATE_STANDBY
1166 *
1167 * Let scanner to decide to disable phy or not.
1168 */
1169 if (sch->sched_type == BLE_LL_SCHED_TYPE_AUX_SCAN) {
1170 if (lls == BLE_LL_STATE_INITIATING || lls == BLE_LL_STATE_SCANNING) {
1171 goto sched;
1172 }
1173 }
1174
1175 /*
1176 * This is either an advertising event or connection event start. If
1177 * we are scanning or initiating just stop it.
1178 */
1179
1180 /* We have to disable the PHY no matter what */
1181 ble_phy_disable();
1182 ble_ll_wfr_disable();
1183
1184 if (lls == BLE_LL_STATE_SCANNING) {
1185 ble_ll_state_set(BLE_LL_STATE_STANDBY);
1186 ble_ll_scan_clean_cur_aux_data();
1187 } else if (lls == BLE_LL_STATE_INITIATING) {
1188 ble_ll_state_set(BLE_LL_STATE_STANDBY);
1189 ble_ll_scan_clean_cur_aux_data();
1190 /* PHY is disabled - make sure we do not wait for AUX_CONNECT_RSP */
1191 ble_ll_conn_reset_pending_aux_conn_rsp();
1192 } else if (lls == BLE_LL_STATE_ADV) {
1193 STATS_INC(ble_ll_stats, sched_state_adv_errs);
1194 ble_ll_adv_halt();
1195 } else {
1196 STATS_INC(ble_ll_stats, sched_state_conn_errs);
1197 ble_ll_conn_event_halt();
1198 }
1199
1200 sched:
1201 BLE_LL_ASSERT(sch->sched_cb);
1202 rc = sch->sched_cb(sch);
1203 return rc;
1204 }
1205
1206 /**
1207 * Run the BLE scheduler. Iterate through all items on the schedule queue.
1208 *
1209 * Context: interrupt (scheduler)
1210 *
1211 * @return int
1212 */
1213 void
ble_ll_sched_run(void * arg)1214 ble_ll_sched_run(void *arg)
1215 {
1216 struct ble_ll_sched_item *sch;
1217
1218 /* Look through schedule queue */
1219 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1220 if (sch) {
1221 #if (BLE_LL_SCHED_DEBUG == 1)
1222 int32_t dt;
1223
1224 /* Make sure we have passed the start time of the first event */
1225 dt = (int32_t)(os_cputime_get32() - sch->start_time);
1226 if (dt > g_ble_ll_sched_max_late) {
1227 g_ble_ll_sched_max_late = dt;
1228 }
1229 if (dt < g_ble_ll_sched_max_early) {
1230 g_ble_ll_sched_max_early = dt;
1231 }
1232 #endif
1233
1234 /* Remove schedule item and execute the callback */
1235 TAILQ_REMOVE(&g_ble_ll_sched_q, sch, link);
1236 sch->enqueued = 0;
1237 ble_ll_sched_execute_item(sch);
1238
1239 /* Restart if there is an item on the schedule */
1240 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1241 if (sch) {
1242 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1243 }
1244 }
1245 }
1246
1247 /**
1248 * Called to determine when the next scheduled event will occur.
1249 *
1250 * If there are not scheduled events this function returns 0; otherwise it
1251 * returns 1 and *next_event_time is set to the start time of the next event.
1252 *
1253 * @param next_event_time
1254 *
1255 * @return int 0: No events are scheduled 1: there is an upcoming event
1256 */
1257 int
ble_ll_sched_next_time(uint32_t * next_event_time)1258 ble_ll_sched_next_time(uint32_t *next_event_time)
1259 {
1260 int rc;
1261 os_sr_t sr;
1262 struct ble_ll_sched_item *first;
1263
1264 rc = 0;
1265 OS_ENTER_CRITICAL(sr);
1266 first = TAILQ_FIRST(&g_ble_ll_sched_q);
1267 if (first) {
1268 *next_event_time = first->start_time;
1269 rc = 1;
1270 }
1271 OS_EXIT_CRITICAL(sr);
1272
1273 return rc;
1274 }
1275
1276 #ifdef BLE_XCVR_RFCLK
1277 /**
1278 * Checks to see if we need to restart the cputime timer which starts the
1279 * rf clock settling.
1280 *
1281 * NOTE: Should only be called from the Link Layer task!
1282 *
1283 * Context: Link-Layer task.
1284 *
1285 */
1286 void
ble_ll_sched_rfclk_chk_restart(void)1287 ble_ll_sched_rfclk_chk_restart(void)
1288 {
1289 os_sr_t sr;
1290 uint8_t ll_state;
1291 int32_t time_till_next;
1292 uint32_t next_time;
1293
1294 OS_ENTER_CRITICAL(sr);
1295 ll_state = ble_ll_state_get();
1296 if (ble_ll_sched_next_time(&next_time)) {
1297 /*
1298 * If the time until the next event is too close, no need to start
1299 * the timer. Leave clock on.
1300 */
1301 time_till_next = (int32_t)(next_time - os_cputime_get32());
1302 if (time_till_next > g_ble_ll_data.ll_xtal_ticks) {
1303 /* Restart the rfclk timer based on the next scheduled time */
1304 ble_ll_xcvr_rfclk_timer_start(next_time);
1305
1306 /* Only disable the rfclk if doing nothing */
1307 if (ll_state == BLE_LL_STATE_STANDBY) {
1308 ble_ll_xcvr_rfclk_disable();
1309 }
1310 }
1311 } else {
1312 /*
1313 * Only stop the timer and rfclk if doing nothing currently. If
1314 * in some other state, that state will handle the timer and rfclk
1315 */
1316 if (ll_state == BLE_LL_STATE_STANDBY) {
1317 ble_ll_xcvr_rfclk_stop();
1318 }
1319 }
1320 OS_EXIT_CRITICAL(sr);
1321 }
1322
1323 #endif
1324
1325 #if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
1326 /**
1327 * Called to check if there is place for a planned scan req.
1328 *
1329 * @param chan
1330 * @param phy_mode
1331 *
1332 * @return int 0: Clear for scan req 1: there is an upcoming event
1333 */
1334 int
ble_ll_sched_scan_req_over_aux_ptr(uint32_t chan,uint8_t phy_mode)1335 ble_ll_sched_scan_req_over_aux_ptr(uint32_t chan, uint8_t phy_mode)
1336 {
1337 struct ble_ll_sched_item *sch;
1338 uint32_t usec_dur;
1339 uint32_t now = os_cputime_get32();
1340
1341 /* Lets calculate roughly how much time we need for scan req and scan rsp */
1342 usec_dur = ble_ll_pdu_tx_time_get(BLE_SCAN_REQ_LEN, phy_mode);
1343 if (chan >= BLE_PHY_NUM_DATA_CHANS) {
1344 usec_dur += ble_ll_pdu_tx_time_get(BLE_SCAN_RSP_MAX_LEN, phy_mode);
1345 } else {
1346 usec_dur += ble_ll_pdu_tx_time_get(BLE_SCAN_RSP_MAX_EXT_LEN, phy_mode);
1347 }
1348
1349 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1350 while (sch) {
1351 /* Let's check if there is no scheduled item which want to start within
1352 * given usecs.*/
1353 if ((int32_t)(sch->start_time - now + os_cputime_usecs_to_ticks(usec_dur)) > 0) {
1354 /* We are fine. Have time for scan req */
1355 return 0;
1356 }
1357
1358 /* There is something in the scheduler. If it is not aux ptr we assume
1359 * it is more important that scan req
1360 */
1361 if (sch->sched_type != BLE_LL_SCHED_TYPE_AUX_SCAN) {
1362 return 1;
1363 }
1364
1365 ble_ll_scan_end_adv_evt((struct ble_ll_aux_data *)sch->cb_arg);
1366 TAILQ_REMOVE(&g_ble_ll_sched_q, sch, link);
1367 sch->enqueued = 0;
1368 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1369 }
1370 return 0;
1371 }
1372
1373 /**
1374 * Called to schedule a aux scan.
1375 *
1376 * Context: Interrupt
1377 *
1378 * @param ble_hdr
1379 * @param scansm
1380 * @param aux_scan
1381 *
1382 * @return 0 on success, 1 otherwise
1383 */
1384 int
ble_ll_sched_aux_scan(struct ble_mbuf_hdr * ble_hdr,struct ble_ll_scan_sm * scansm,struct ble_ll_aux_data * aux_scan)1385 ble_ll_sched_aux_scan(struct ble_mbuf_hdr *ble_hdr,
1386 struct ble_ll_scan_sm *scansm,
1387 struct ble_ll_aux_data *aux_scan)
1388 {
1389 int rc;
1390 os_sr_t sr;
1391 uint32_t off_ticks;
1392 uint32_t off_rem_usecs;
1393 uint32_t start_time;
1394 uint32_t start_time_rem_usecs;
1395 uint32_t end_time;
1396 uint32_t dur;
1397 struct ble_ll_sched_item *entry;
1398 struct ble_ll_sched_item *sch;
1399 int phy_mode;
1400
1401 sch = &aux_scan->sch;
1402
1403 off_ticks = os_cputime_usecs_to_ticks(aux_scan->offset);
1404 off_rem_usecs = aux_scan->offset - os_cputime_ticks_to_usecs(off_ticks);
1405
1406 start_time = ble_hdr->beg_cputime + off_ticks;
1407 start_time_rem_usecs = ble_hdr->rem_usecs + off_rem_usecs;
1408 if (start_time_rem_usecs > 30) {
1409 start_time++;
1410 start_time_rem_usecs -= 30;
1411 }
1412 start_time -= g_ble_ll_sched_offset_ticks;
1413
1414 /* Let's calculate time we reserve for aux packet. For now we assume to wait
1415 * for fixed number of bytes and handle possible interrupting it in
1416 * ble_ll_sched_execute_item(). This is because aux packet can be up to
1417 * 256bytes and we don't want to block sched that long
1418 */
1419 phy_mode = ble_ll_phy_to_phy_mode(aux_scan->aux_phy,
1420 BLE_HCI_LE_PHY_CODED_ANY);
1421 dur = ble_ll_pdu_tx_time_get(BLE_LL_SCHED_AUX_PTR_DFLT_BYTES_NUM, phy_mode);
1422 end_time = start_time + os_cputime_usecs_to_ticks(dur);
1423
1424 sch->start_time = start_time;
1425 sch->remainder = start_time_rem_usecs;
1426 sch->end_time = end_time;
1427
1428 OS_ENTER_CRITICAL(sr);
1429
1430 if (!ble_ll_sched_insert_if_empty(sch)) {
1431 /* Nothing in schedule. Schedule as soon as possible
1432 * If we are here it means sch has been added to the scheduler */
1433 rc = 0;
1434 goto done;
1435 }
1436
1437 /* Try to find slot for aux scan. */
1438 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1439 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
1440 /* We can insert if before entry in list */
1441 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
1442 rc = 0;
1443 TAILQ_INSERT_BEFORE(entry, sch, link);
1444 sch->enqueued = 1;
1445 break;
1446 }
1447
1448 /* Check for overlapping events. For now drop if it overlaps with
1449 * anything. We can make it smarter later on
1450 */
1451 if (ble_ll_sched_is_overlap(sch, entry)) {
1452 OS_EXIT_CRITICAL(sr);
1453 return -1;
1454 }
1455 }
1456
1457 if (!entry) {
1458 rc = 0;
1459 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
1460 sch->enqueued = 1;
1461 }
1462
1463 done:
1464
1465 /* Get head of list to restart timer */
1466 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1467
1468 OS_EXIT_CRITICAL(sr);
1469
1470 /* Restart timer */
1471 BLE_LL_ASSERT(sch != NULL);
1472 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1473
1474 STATS_INC(ble_ll_stats, aux_scheduled);
1475 return rc;
1476 }
1477 #endif
1478
1479 #if MYNEWT_VAL(BLE_LL_DIRECT_TEST_MODE) == 1
ble_ll_sched_dtm(struct ble_ll_sched_item * sch)1480 int ble_ll_sched_dtm(struct ble_ll_sched_item *sch)
1481 {
1482 int rc;
1483 os_sr_t sr;
1484 struct ble_ll_sched_item *entry;
1485
1486 OS_ENTER_CRITICAL(sr);
1487
1488 if (!ble_ll_sched_insert_if_empty(sch)) {
1489 /* Nothing in schedule. Schedule as soon as possible
1490 * If we are here it means sch has been added to the scheduler */
1491 rc = 0;
1492 goto done;
1493 }
1494
1495 /* Try to find slot for test. */
1496 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1497 TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
1498 /* We can insert if before entry in list */
1499 if (sch->end_time <= entry->start_time) {
1500 rc = 0;
1501 TAILQ_INSERT_BEFORE(entry, sch, link);
1502 sch->enqueued = 1;
1503 break;
1504 }
1505
1506 /* Check for overlapping events. For now drop if it overlaps with
1507 * anything. We can make it smarter later on
1508 */
1509 if (ble_ll_sched_is_overlap(sch, entry)) {
1510 OS_EXIT_CRITICAL(sr);
1511 return -1;
1512 }
1513 }
1514
1515 if (!entry) {
1516 rc = 0;
1517 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
1518 sch->enqueued = 1;
1519 }
1520
1521 done:
1522
1523 /* Get head of list to restart timer */
1524 sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1525
1526 #ifdef BLE_XCVR_RFCLK
1527 ble_ll_xcvr_rfclk_timer_start(sch->start_time);
1528 #endif
1529
1530 OS_EXIT_CRITICAL(sr);
1531
1532 /* Restart timer */
1533 BLE_LL_ASSERT(sch != NULL);
1534 os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1535
1536 return rc;
1537 }
1538 #endif
1539 /**
1540 * Stop the scheduler
1541 *
1542 * Context: Link Layer task
1543 */
1544 void
ble_ll_sched_stop(void)1545 ble_ll_sched_stop(void)
1546 {
1547 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1548 }
1549
1550 /**
1551 * Initialize the scheduler. Should only be called once and should be called
1552 * before any of the scheduler API are called.
1553 *
1554 * @return int
1555 */
1556 int
ble_ll_sched_init(void)1557 ble_ll_sched_init(void)
1558 {
1559 /*
1560 * Initialize max early to large negative number. This is used
1561 * to determine the worst-case "early" time the schedule was called. Dont
1562 * expect this to be less than -3 or -4.
1563 */
1564 #if (BLE_LL_SCHED_DEBUG == 1)
1565 g_ble_ll_sched_max_early = -50000;
1566 #endif
1567
1568 /*
1569 * This is the offset from the start of the scheduled item until the actual
1570 * tx/rx should occur, in ticks. We also "round up" to the nearest tick.
1571 */
1572 g_ble_ll_sched_offset_ticks =
1573 (uint8_t) os_cputime_usecs_to_ticks(XCVR_TX_SCHED_DELAY_USECS + 30);
1574
1575 /* Initialize cputimer for the scheduler */
1576 os_cputime_timer_init(&g_ble_ll_sched_timer, ble_ll_sched_run, NULL);
1577
1578 #if MYNEWT_VAL(BLE_LL_STRICT_CONN_SCHEDULING)
1579 memset(&g_ble_ll_sched_data, 0, sizeof(struct ble_ll_sched_obj));
1580 g_ble_ll_sched_data.sch_ticks_per_period =
1581 os_cputime_usecs_to_ticks(MYNEWT_VAL(BLE_LL_USECS_PER_PERIOD));
1582 g_ble_ll_sched_data.sch_ticks_per_epoch = BLE_LL_SCHED_PERIODS *
1583 g_ble_ll_sched_data.sch_ticks_per_period;
1584 #endif
1585
1586 return 0;
1587 }
1588