xref: /nrf52832-nimble/packages/NimBLE-latest/nimble/controller/src/ble_ll_sched.c (revision 042d53a763ad75cb1465103098bb88c245d95138)
1*042d53a7SEvalZero /*
2*042d53a7SEvalZero  * Licensed to the Apache Software Foundation (ASF) under one
3*042d53a7SEvalZero  * or more contributor license agreements.  See the NOTICE file
4*042d53a7SEvalZero  * distributed with this work for additional information
5*042d53a7SEvalZero  * regarding copyright ownership.  The ASF licenses this file
6*042d53a7SEvalZero  * to you under the Apache License, Version 2.0 (the
7*042d53a7SEvalZero  * "License"); you may not use this file except in compliance
8*042d53a7SEvalZero  * with the License.  You may obtain a copy of the License at
9*042d53a7SEvalZero  *
10*042d53a7SEvalZero  *  http://www.apache.org/licenses/LICENSE-2.0
11*042d53a7SEvalZero  *
12*042d53a7SEvalZero  * Unless required by applicable law or agreed to in writing,
13*042d53a7SEvalZero  * software distributed under the License is distributed on an
14*042d53a7SEvalZero  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15*042d53a7SEvalZero  * KIND, either express or implied.  See the License for the
16*042d53a7SEvalZero  * specific language governing permissions and limitations
17*042d53a7SEvalZero  * under the License.
18*042d53a7SEvalZero  */
19*042d53a7SEvalZero #include <stdint.h>
20*042d53a7SEvalZero #include <stdlib.h>
21*042d53a7SEvalZero #include <assert.h>
22*042d53a7SEvalZero #include <string.h>
23*042d53a7SEvalZero #include "os/os.h"
24*042d53a7SEvalZero #include "os/os_cputime.h"
25*042d53a7SEvalZero #include "ble/xcvr.h"
26*042d53a7SEvalZero #include "controller/ble_phy.h"
27*042d53a7SEvalZero #include "controller/ble_ll.h"
28*042d53a7SEvalZero #include "controller/ble_ll_sched.h"
29*042d53a7SEvalZero #include "controller/ble_ll_adv.h"
30*042d53a7SEvalZero #include "controller/ble_ll_scan.h"
31*042d53a7SEvalZero #include "controller/ble_ll_xcvr.h"
32*042d53a7SEvalZero #include "controller/ble_ll_trace.h"
33*042d53a7SEvalZero #include "ble_ll_conn_priv.h"
34*042d53a7SEvalZero 
35*042d53a7SEvalZero /* XXX: this is temporary. Not sure what I want to do here */
36*042d53a7SEvalZero struct hal_timer g_ble_ll_sched_timer;
37*042d53a7SEvalZero 
38*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
39*042d53a7SEvalZero /* Settling time of crystal, in ticks */
40*042d53a7SEvalZero uint8_t g_ble_ll_sched_xtal_ticks;
41*042d53a7SEvalZero #endif
42*042d53a7SEvalZero 
43*042d53a7SEvalZero uint8_t g_ble_ll_sched_offset_ticks;
44*042d53a7SEvalZero 
45*042d53a7SEvalZero #define BLE_LL_SCHED_ADV_WORST_CASE_USECS       \
46*042d53a7SEvalZero     (BLE_LL_SCHED_MAX_ADV_PDU_USECS + BLE_LL_IFS + BLE_LL_SCHED_ADV_MAX_USECS \
47*042d53a7SEvalZero      + XCVR_TX_SCHED_DELAY_USECS)
48*042d53a7SEvalZero 
49*042d53a7SEvalZero #if (BLE_LL_SCHED_DEBUG == 1)
50*042d53a7SEvalZero int32_t g_ble_ll_sched_max_late;
51*042d53a7SEvalZero int32_t g_ble_ll_sched_max_early;
52*042d53a7SEvalZero #endif
53*042d53a7SEvalZero 
54*042d53a7SEvalZero /* XXX: TODO:
55*042d53a7SEvalZero  *  1) Add some accounting to the schedule code to see how late we are
56*042d53a7SEvalZero  *  (min/max?)
57*042d53a7SEvalZero  *
58*042d53a7SEvalZero  *  2) Need to determine how we really want to handle the case when we execute
59*042d53a7SEvalZero  *  a schedule item but there is a current event. We could:
60*042d53a7SEvalZero  *      -> Reschedule the schedule item and let current event finish
61*042d53a7SEvalZero  *      -> Kill the current event and run the scheduled item.
62*042d53a7SEvalZero  *      -> Disable schedule timer while in an event; could cause us to be late.
63*042d53a7SEvalZero  *      -> Wait for current event to finish hoping it does before schedule item.
64*042d53a7SEvalZero  */
65*042d53a7SEvalZero 
66*042d53a7SEvalZero /* Queue for timers */
67*042d53a7SEvalZero TAILQ_HEAD(ll_sched_qhead, ble_ll_sched_item) g_ble_ll_sched_q;
68*042d53a7SEvalZero 
69*042d53a7SEvalZero #if MYNEWT_VAL(BLE_LL_STRICT_CONN_SCHEDULING)
70*042d53a7SEvalZero struct ble_ll_sched_obj g_ble_ll_sched_data;
71*042d53a7SEvalZero #endif
72*042d53a7SEvalZero 
73*042d53a7SEvalZero /**
74*042d53a7SEvalZero  * Checks if two events in the schedule will overlap in time. NOTE: consecutive
75*042d53a7SEvalZero  * schedule items can end and start at the same time.
76*042d53a7SEvalZero  *
77*042d53a7SEvalZero  * @param s1
78*042d53a7SEvalZero  * @param s2
79*042d53a7SEvalZero  *
80*042d53a7SEvalZero  * @return int 0: dont overlap 1:overlap
81*042d53a7SEvalZero  */
82*042d53a7SEvalZero static int
ble_ll_sched_is_overlap(struct ble_ll_sched_item * s1,struct ble_ll_sched_item * s2)83*042d53a7SEvalZero ble_ll_sched_is_overlap(struct ble_ll_sched_item *s1,
84*042d53a7SEvalZero                         struct ble_ll_sched_item *s2)
85*042d53a7SEvalZero {
86*042d53a7SEvalZero     int rc;
87*042d53a7SEvalZero 
88*042d53a7SEvalZero     rc = 1;
89*042d53a7SEvalZero     if ((int32_t)(s1->start_time - s2->start_time) < 0) {
90*042d53a7SEvalZero         /* Make sure this event does not overlap current event */
91*042d53a7SEvalZero         if ((int32_t)(s1->end_time - s2->start_time) <= 0) {
92*042d53a7SEvalZero             rc = 0;
93*042d53a7SEvalZero         }
94*042d53a7SEvalZero     } else {
95*042d53a7SEvalZero         /* Check for overlap */
96*042d53a7SEvalZero         if ((int32_t)(s1->start_time - s2->end_time) >= 0) {
97*042d53a7SEvalZero             rc = 0;
98*042d53a7SEvalZero         }
99*042d53a7SEvalZero     }
100*042d53a7SEvalZero 
101*042d53a7SEvalZero     return rc;
102*042d53a7SEvalZero }
103*042d53a7SEvalZero 
104*042d53a7SEvalZero /*
105*042d53a7SEvalZero  * Determines if the schedule item overlaps the currently running schedule
106*042d53a7SEvalZero  * item. We only care about connection schedule items
107*042d53a7SEvalZero  */
108*042d53a7SEvalZero int
ble_ll_sched_overlaps_current(struct ble_ll_sched_item * sch)109*042d53a7SEvalZero ble_ll_sched_overlaps_current(struct ble_ll_sched_item *sch)
110*042d53a7SEvalZero {
111*042d53a7SEvalZero     int rc;
112*042d53a7SEvalZero     uint32_t ce_end_time;
113*042d53a7SEvalZero 
114*042d53a7SEvalZero     rc = 0;
115*042d53a7SEvalZero     if (ble_ll_state_get() == BLE_LL_STATE_CONNECTION) {
116*042d53a7SEvalZero         ce_end_time = ble_ll_conn_get_ce_end_time();
117*042d53a7SEvalZero         if ((int32_t)(ce_end_time - sch->start_time) > 0) {
118*042d53a7SEvalZero             rc = 1;
119*042d53a7SEvalZero         }
120*042d53a7SEvalZero     }
121*042d53a7SEvalZero     return rc;
122*042d53a7SEvalZero }
123*042d53a7SEvalZero 
124*042d53a7SEvalZero static int
ble_ll_sched_conn_overlap(struct ble_ll_sched_item * entry)125*042d53a7SEvalZero ble_ll_sched_conn_overlap(struct ble_ll_sched_item *entry)
126*042d53a7SEvalZero {
127*042d53a7SEvalZero     int rc;
128*042d53a7SEvalZero     struct ble_ll_conn_sm *connsm;
129*042d53a7SEvalZero 
130*042d53a7SEvalZero     /* Should only be advertising or a connection here */
131*042d53a7SEvalZero     if (entry->sched_type == BLE_LL_SCHED_TYPE_CONN) {
132*042d53a7SEvalZero         connsm = (struct ble_ll_conn_sm *)entry->cb_arg;
133*042d53a7SEvalZero         entry->enqueued = 0;
134*042d53a7SEvalZero         TAILQ_REMOVE(&g_ble_ll_sched_q, entry, link);
135*042d53a7SEvalZero         ble_ll_event_send(&connsm->conn_ev_end);
136*042d53a7SEvalZero         rc = 0;
137*042d53a7SEvalZero     } else {
138*042d53a7SEvalZero         rc = -1;
139*042d53a7SEvalZero     }
140*042d53a7SEvalZero 
141*042d53a7SEvalZero     return rc;
142*042d53a7SEvalZero }
143*042d53a7SEvalZero 
144*042d53a7SEvalZero struct ble_ll_sched_item *
ble_ll_sched_insert_if_empty(struct ble_ll_sched_item * sch)145*042d53a7SEvalZero ble_ll_sched_insert_if_empty(struct ble_ll_sched_item *sch)
146*042d53a7SEvalZero {
147*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
148*042d53a7SEvalZero 
149*042d53a7SEvalZero     entry = TAILQ_FIRST(&g_ble_ll_sched_q);
150*042d53a7SEvalZero     if (!entry) {
151*042d53a7SEvalZero         TAILQ_INSERT_HEAD(&g_ble_ll_sched_q, sch, link);
152*042d53a7SEvalZero         sch->enqueued = 1;
153*042d53a7SEvalZero     }
154*042d53a7SEvalZero     return entry;
155*042d53a7SEvalZero }
156*042d53a7SEvalZero 
157*042d53a7SEvalZero int
ble_ll_sched_conn_reschedule(struct ble_ll_conn_sm * connsm)158*042d53a7SEvalZero ble_ll_sched_conn_reschedule(struct ble_ll_conn_sm *connsm)
159*042d53a7SEvalZero {
160*042d53a7SEvalZero     int rc;
161*042d53a7SEvalZero     os_sr_t sr;
162*042d53a7SEvalZero     uint32_t usecs;
163*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
164*042d53a7SEvalZero     struct ble_ll_sched_item *start_overlap;
165*042d53a7SEvalZero     struct ble_ll_sched_item *end_overlap;
166*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
167*042d53a7SEvalZero     struct ble_ll_conn_sm *tmp;
168*042d53a7SEvalZero 
169*042d53a7SEvalZero     /* Get schedule element from connection */
170*042d53a7SEvalZero     sch = &connsm->conn_sch;
171*042d53a7SEvalZero 
172*042d53a7SEvalZero     /* Set schedule start and end times */
173*042d53a7SEvalZero     sch->start_time = connsm->anchor_point - g_ble_ll_sched_offset_ticks;
174*042d53a7SEvalZero     if (connsm->conn_role == BLE_LL_CONN_ROLE_SLAVE) {
175*042d53a7SEvalZero         usecs = connsm->slave_cur_window_widening;
176*042d53a7SEvalZero         sch->start_time -= (os_cputime_usecs_to_ticks(usecs) + 1);
177*042d53a7SEvalZero         sch->remainder = 0;
178*042d53a7SEvalZero     } else {
179*042d53a7SEvalZero         sch->remainder = connsm->anchor_point_usecs;
180*042d53a7SEvalZero     }
181*042d53a7SEvalZero     sch->end_time = connsm->ce_end_time;
182*042d53a7SEvalZero 
183*042d53a7SEvalZero     /* Better be past current time or we just leave */
184*042d53a7SEvalZero     if ((int32_t)(sch->start_time - os_cputime_get32()) < 0) {
185*042d53a7SEvalZero         return -1;
186*042d53a7SEvalZero     }
187*042d53a7SEvalZero 
188*042d53a7SEvalZero     /* We have to find a place for this schedule */
189*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
190*042d53a7SEvalZero 
191*042d53a7SEvalZero     if (ble_ll_sched_overlaps_current(sch)) {
192*042d53a7SEvalZero         OS_EXIT_CRITICAL(sr);
193*042d53a7SEvalZero         return -1;
194*042d53a7SEvalZero     }
195*042d53a7SEvalZero 
196*042d53a7SEvalZero     /* Stop timer since we will add an element */
197*042d53a7SEvalZero     os_cputime_timer_stop(&g_ble_ll_sched_timer);
198*042d53a7SEvalZero 
199*042d53a7SEvalZero     start_overlap = NULL;
200*042d53a7SEvalZero     end_overlap = NULL;
201*042d53a7SEvalZero     rc = 0;
202*042d53a7SEvalZero     TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
203*042d53a7SEvalZero         if (ble_ll_sched_is_overlap(sch, entry)) {
204*042d53a7SEvalZero             if (entry->sched_type == BLE_LL_SCHED_TYPE_AUX_SCAN) {
205*042d53a7SEvalZero                 /* Do nothing, we start_mark overlap below */
206*042d53a7SEvalZero             } else if (!ble_ll_conn_is_lru((struct ble_ll_conn_sm *)sch->cb_arg,
207*042d53a7SEvalZero                                     (struct ble_ll_conn_sm *)entry->cb_arg)) {
208*042d53a7SEvalZero                 /* Only insert if this element is older than all that we
209*042d53a7SEvalZero                  * overlap
210*042d53a7SEvalZero                  */
211*042d53a7SEvalZero                 start_overlap = NULL;
212*042d53a7SEvalZero                 rc = -1;
213*042d53a7SEvalZero                 break;
214*042d53a7SEvalZero             }
215*042d53a7SEvalZero 
216*042d53a7SEvalZero             if (start_overlap == NULL) {
217*042d53a7SEvalZero                 start_overlap = entry;
218*042d53a7SEvalZero                 end_overlap = entry;
219*042d53a7SEvalZero             } else {
220*042d53a7SEvalZero                 end_overlap = entry;
221*042d53a7SEvalZero             }
222*042d53a7SEvalZero         } else {
223*042d53a7SEvalZero             if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
224*042d53a7SEvalZero                 rc = 0;
225*042d53a7SEvalZero                 TAILQ_INSERT_BEFORE(entry, sch, link);
226*042d53a7SEvalZero                 break;
227*042d53a7SEvalZero             }
228*042d53a7SEvalZero         }
229*042d53a7SEvalZero     }
230*042d53a7SEvalZero 
231*042d53a7SEvalZero     if (!rc) {
232*042d53a7SEvalZero         if (!entry) {
233*042d53a7SEvalZero             TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
234*042d53a7SEvalZero         }
235*042d53a7SEvalZero         sch->enqueued = 1;
236*042d53a7SEvalZero     }
237*042d53a7SEvalZero 
238*042d53a7SEvalZero     /* Remove first to last scheduled elements */
239*042d53a7SEvalZero     entry = start_overlap;
240*042d53a7SEvalZero     while (entry) {
241*042d53a7SEvalZero         start_overlap = TAILQ_NEXT(entry,link);
242*042d53a7SEvalZero         switch (entry->sched_type) {
243*042d53a7SEvalZero             case BLE_LL_SCHED_TYPE_CONN:
244*042d53a7SEvalZero             tmp = (struct ble_ll_conn_sm *)entry->cb_arg;
245*042d53a7SEvalZero             ble_ll_event_send(&tmp->conn_ev_end);
246*042d53a7SEvalZero             break;
247*042d53a7SEvalZero             case BLE_LL_SCHED_TYPE_ADV:
248*042d53a7SEvalZero                 ble_ll_adv_event_rmvd_from_sched((struct ble_ll_adv_sm *)
249*042d53a7SEvalZero                                                   entry->cb_arg);
250*042d53a7SEvalZero                 break;
251*042d53a7SEvalZero #if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
252*042d53a7SEvalZero             case BLE_LL_SCHED_TYPE_AUX_SCAN:
253*042d53a7SEvalZero                 ble_ll_scan_end_adv_evt((struct ble_ll_aux_data *)
254*042d53a7SEvalZero                                           entry->cb_arg);
255*042d53a7SEvalZero 
256*042d53a7SEvalZero                 break;
257*042d53a7SEvalZero #endif
258*042d53a7SEvalZero             default:
259*042d53a7SEvalZero                 BLE_LL_ASSERT(0);
260*042d53a7SEvalZero                 break;
261*042d53a7SEvalZero         }
262*042d53a7SEvalZero 
263*042d53a7SEvalZero         TAILQ_REMOVE(&g_ble_ll_sched_q, entry, link);
264*042d53a7SEvalZero         entry->enqueued = 0;
265*042d53a7SEvalZero 
266*042d53a7SEvalZero         if (entry == end_overlap) {
267*042d53a7SEvalZero             break;
268*042d53a7SEvalZero         }
269*042d53a7SEvalZero         entry = start_overlap;
270*042d53a7SEvalZero     }
271*042d53a7SEvalZero 
272*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
273*042d53a7SEvalZero     entry = TAILQ_FIRST(&g_ble_ll_sched_q);
274*042d53a7SEvalZero     if (entry == sch) {
275*042d53a7SEvalZero         ble_ll_xcvr_rfclk_timer_start(sch->start_time);
276*042d53a7SEvalZero     } else {
277*042d53a7SEvalZero         sch = entry;
278*042d53a7SEvalZero     }
279*042d53a7SEvalZero #else
280*042d53a7SEvalZero     /* Get first on list */
281*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
282*042d53a7SEvalZero #endif
283*042d53a7SEvalZero 
284*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
285*042d53a7SEvalZero 
286*042d53a7SEvalZero     /* Restart timer */
287*042d53a7SEvalZero     BLE_LL_ASSERT(sch != NULL);
288*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
289*042d53a7SEvalZero 
290*042d53a7SEvalZero     return rc;
291*042d53a7SEvalZero }
292*042d53a7SEvalZero 
293*042d53a7SEvalZero /**
294*042d53a7SEvalZero  * Called to schedule a connection when the current role is master.
295*042d53a7SEvalZero  *
296*042d53a7SEvalZero  * Context: Interrupt
297*042d53a7SEvalZero  *
298*042d53a7SEvalZero  * @param connsm
299*042d53a7SEvalZero  * @param ble_hdr
300*042d53a7SEvalZero  * @param pyld_len
301*042d53a7SEvalZero  *
302*042d53a7SEvalZero  * @return int
303*042d53a7SEvalZero  */
304*042d53a7SEvalZero #if MYNEWT_VAL(BLE_LL_STRICT_CONN_SCHEDULING)
305*042d53a7SEvalZero int
ble_ll_sched_master_new(struct ble_ll_conn_sm * connsm,struct ble_mbuf_hdr * ble_hdr,uint8_t pyld_len)306*042d53a7SEvalZero ble_ll_sched_master_new(struct ble_ll_conn_sm *connsm,
307*042d53a7SEvalZero                         struct ble_mbuf_hdr *ble_hdr, uint8_t pyld_len)
308*042d53a7SEvalZero {
309*042d53a7SEvalZero     int rc;
310*042d53a7SEvalZero     os_sr_t sr;
311*042d53a7SEvalZero     uint32_t initial_start;
312*042d53a7SEvalZero     uint32_t earliest_start;
313*042d53a7SEvalZero     uint32_t earliest_end;
314*042d53a7SEvalZero     uint32_t dur;
315*042d53a7SEvalZero     uint32_t itvl_t;
316*042d53a7SEvalZero     uint32_t adv_rxend;
317*042d53a7SEvalZero     int i;
318*042d53a7SEvalZero     uint32_t tpp;
319*042d53a7SEvalZero     uint32_t tse;
320*042d53a7SEvalZero     uint32_t np;
321*042d53a7SEvalZero     uint32_t cp;
322*042d53a7SEvalZero     uint32_t tick_in_period;
323*042d53a7SEvalZero 
324*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
325*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
326*042d53a7SEvalZero 
327*042d53a7SEvalZero     /* Better have a connsm */
328*042d53a7SEvalZero     BLE_LL_ASSERT(connsm != NULL);
329*042d53a7SEvalZero 
330*042d53a7SEvalZero     /* Get schedule element from connection */
331*042d53a7SEvalZero     rc = -1;
332*042d53a7SEvalZero     sch = &connsm->conn_sch;
333*042d53a7SEvalZero 
334*042d53a7SEvalZero     /* XXX:
335*042d53a7SEvalZero      * The calculations for the 32kHz crystal bear alot of explanation. The
336*042d53a7SEvalZero      * earliest possible time that the master can start the connection with a
337*042d53a7SEvalZero      * slave is 1.25 msecs from the end of the connection request. The
338*042d53a7SEvalZero      * connection request is sent an IFS time from the end of the advertising
339*042d53a7SEvalZero      * packet that was received plus the time it takes to send the connection
340*042d53a7SEvalZero      * request. At 1 Mbps, this is 1752 usecs, or 57.41 ticks. Using 57 ticks
341*042d53a7SEvalZero      * makes us off ~13 usecs. Since we dont want to actually calculate the
342*042d53a7SEvalZero      * receive end time tick (this would take too long), we assume the end of
343*042d53a7SEvalZero      * the advertising PDU is 'now' (we call os_cputime_get32). We dont know
344*042d53a7SEvalZero      * how much time it will take to service the ISR but if we are more than the
345*042d53a7SEvalZero      * rx to tx time of the chip we will not be successful transmitting the
346*042d53a7SEvalZero      * connect request. All this means is that we presume that the slave will
347*042d53a7SEvalZero      * receive the connect request later than we expect but no earlier than
348*042d53a7SEvalZero      * 13 usecs before (this is important).
349*042d53a7SEvalZero      *
350*042d53a7SEvalZero      * The code then attempts to schedule the connection at the
351*042d53a7SEvalZero      * earliest time although this may not be possible. When the actual
352*042d53a7SEvalZero      * schedule start time is determined, the master has to determine if this
353*042d53a7SEvalZero      * time is more than a transmit window offset interval (1.25 msecs). The
354*042d53a7SEvalZero      * master has to tell the slave how many transmit window offsets there are
355*042d53a7SEvalZero      * from the earliest possible time to when the actual transmit start will
356*042d53a7SEvalZero      * occur. Later in this function you will see the calculation. The actual
357*042d53a7SEvalZero      * transmission start has to occur within the transmit window. The transmit
358*042d53a7SEvalZero      * window interval is in units of 1.25 msecs and has to be at least 1. To
359*042d53a7SEvalZero      * make things a bit easier (but less power efficient for the slave), we
360*042d53a7SEvalZero      * use a transmit window of 2. We do this because we dont quite know the
361*042d53a7SEvalZero      * exact start of the transmission and if we are too early or too late we
362*042d53a7SEvalZero      * could miss the transmit window. A final note: the actual transmission
363*042d53a7SEvalZero      * start (the anchor point) is sched offset ticks from the schedule start
364*042d53a7SEvalZero      * time. We dont add this to the calculation when calculating the window
365*042d53a7SEvalZero      * offset. The reason we dont do this is we want to insure we transmit
366*042d53a7SEvalZero      * after the window offset we tell the slave. For example, say we think
367*042d53a7SEvalZero      * we are transmitting 1253 usecs from the earliest start. This would cause
368*042d53a7SEvalZero      * us to send a transmit window offset of 1. Since we are actually
369*042d53a7SEvalZero      * transmitting earlier than the slave thinks we could end up transmitting
370*042d53a7SEvalZero      * before the window offset. Transmitting later is fine since we have the
371*042d53a7SEvalZero      * transmit window to do so. Transmitting before is bad, since the slave
372*042d53a7SEvalZero      * wont be listening. We could do better calculation if we wanted to use
373*042d53a7SEvalZero      * a transmit window of 1 as opposed to 2, but for now we dont care.
374*042d53a7SEvalZero      */
375*042d53a7SEvalZero     dur = os_cputime_usecs_to_ticks(g_ble_ll_sched_data.sch_ticks_per_period);
376*042d53a7SEvalZero     adv_rxend = os_cputime_get32();
377*042d53a7SEvalZero     if (ble_hdr->rxinfo.channel >= BLE_PHY_NUM_DATA_CHANS) {
378*042d53a7SEvalZero         /*
379*042d53a7SEvalZero          * We received packet on advertising channel which means this is a legacy
380*042d53a7SEvalZero          * PDU on 1 Mbps - we do as described above.
381*042d53a7SEvalZero          */
382*042d53a7SEvalZero         earliest_start = adv_rxend + 57;
383*042d53a7SEvalZero     } else {
384*042d53a7SEvalZero         /*
385*042d53a7SEvalZero          * The calculations are similar as above.
386*042d53a7SEvalZero          *
387*042d53a7SEvalZero          * We received packet on data channel which means this is AUX_ADV_IND
388*042d53a7SEvalZero          * received on secondary adv channel. We can schedule first packet at
389*042d53a7SEvalZero          * the earliest after "T_IFS + AUX_CONNECT_REQ + transmitWindowDelay".
390*042d53a7SEvalZero          * AUX_CONNECT_REQ and transmitWindowDelay times vary depending on which
391*042d53a7SEvalZero          * PHY we received on.
392*042d53a7SEvalZero          *
393*042d53a7SEvalZero          */
394*042d53a7SEvalZero         if (ble_hdr->rxinfo.phy == BLE_PHY_1M) {
395*042d53a7SEvalZero             // 150 + 352 + 2500 = 3002us = 98.37 ticks
396*042d53a7SEvalZero             earliest_start = adv_rxend + 98;
397*042d53a7SEvalZero         } else if (ble_hdr->rxinfo.phy == BLE_PHY_2M) {
398*042d53a7SEvalZero             // 150 + 180 + 2500 = 2830us = 92.73 ticks
399*042d53a7SEvalZero             earliest_start = adv_rxend + 93;
400*042d53a7SEvalZero         } else if (ble_hdr->rxinfo.phy == BLE_PHY_CODED) {
401*042d53a7SEvalZero             // 150 + 2896 + 3750 = 6796us = 222.69 ticks
402*042d53a7SEvalZero             earliest_start = adv_rxend + 223;
403*042d53a7SEvalZero         } else {
404*042d53a7SEvalZero             BLE_LL_ASSERT(0);
405*042d53a7SEvalZero         }
406*042d53a7SEvalZero     }
407*042d53a7SEvalZero     earliest_start += MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET) *
408*042d53a7SEvalZero                       BLE_LL_SCHED_32KHZ_TICKS_PER_SLOT;
409*042d53a7SEvalZero     itvl_t = connsm->conn_itvl_ticks;
410*042d53a7SEvalZero 
411*042d53a7SEvalZero     /* We have to find a place for this schedule */
412*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
413*042d53a7SEvalZero 
414*042d53a7SEvalZero     /*
415*042d53a7SEvalZero      * Are there any allocated periods? If not, set epoch start to earliest
416*042d53a7SEvalZero      * time
417*042d53a7SEvalZero      */
418*042d53a7SEvalZero     if (g_ble_ll_sched_data.sch_num_occ_periods == 0) {
419*042d53a7SEvalZero         g_ble_ll_sched_data.sch_epoch_start = earliest_start;
420*042d53a7SEvalZero         cp = 0;
421*042d53a7SEvalZero     } else {
422*042d53a7SEvalZero         /*
423*042d53a7SEvalZero          * Earliest start must occur on period boundary.
424*042d53a7SEvalZero          * (tse = ticks since epoch)
425*042d53a7SEvalZero          */
426*042d53a7SEvalZero         tpp = g_ble_ll_sched_data.sch_ticks_per_period;
427*042d53a7SEvalZero         tse = earliest_start - g_ble_ll_sched_data.sch_epoch_start;
428*042d53a7SEvalZero         np = tse / tpp;
429*042d53a7SEvalZero         cp = np % BLE_LL_SCHED_PERIODS;
430*042d53a7SEvalZero         tick_in_period = tse - (np * tpp);
431*042d53a7SEvalZero         if (tick_in_period != 0) {
432*042d53a7SEvalZero             ++cp;
433*042d53a7SEvalZero             if (cp == BLE_LL_SCHED_PERIODS) {
434*042d53a7SEvalZero                 cp = 0;
435*042d53a7SEvalZero             }
436*042d53a7SEvalZero             earliest_start += (tpp - tick_in_period);
437*042d53a7SEvalZero         }
438*042d53a7SEvalZero 
439*042d53a7SEvalZero         /* Now find first un-occupied period starting from cp */
440*042d53a7SEvalZero         for (i = 0; i < BLE_LL_SCHED_PERIODS; ++i) {
441*042d53a7SEvalZero             if (g_ble_ll_sched_data.sch_occ_period_mask & (1 << cp)) {
442*042d53a7SEvalZero                 ++cp;
443*042d53a7SEvalZero                 if (cp == BLE_LL_SCHED_PERIODS) {
444*042d53a7SEvalZero                     cp = 0;
445*042d53a7SEvalZero                 }
446*042d53a7SEvalZero                 earliest_start += tpp;
447*042d53a7SEvalZero             } else {
448*042d53a7SEvalZero                 /* not occupied */
449*042d53a7SEvalZero                 break;
450*042d53a7SEvalZero             }
451*042d53a7SEvalZero         }
452*042d53a7SEvalZero         /* Should never happen but if it does... */
453*042d53a7SEvalZero         if (i == BLE_LL_SCHED_PERIODS) {
454*042d53a7SEvalZero             OS_EXIT_CRITICAL(sr);
455*042d53a7SEvalZero             return rc;
456*042d53a7SEvalZero         }
457*042d53a7SEvalZero     }
458*042d53a7SEvalZero 
459*042d53a7SEvalZero     sch->start_time = earliest_start;
460*042d53a7SEvalZero     initial_start = earliest_start;
461*042d53a7SEvalZero     earliest_end = earliest_start + dur;
462*042d53a7SEvalZero 
463*042d53a7SEvalZero     if (!ble_ll_sched_insert_if_empty(sch)) {
464*042d53a7SEvalZero         /* Nothing in schedule. Schedule as soon as possible */
465*042d53a7SEvalZero         rc = 0;
466*042d53a7SEvalZero         connsm->tx_win_off = MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET);
467*042d53a7SEvalZero     } else {
468*042d53a7SEvalZero         os_cputime_timer_stop(&g_ble_ll_sched_timer);
469*042d53a7SEvalZero         TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
470*042d53a7SEvalZero             /* Set these because overlap function needs them to be set */
471*042d53a7SEvalZero             sch->start_time = earliest_start;
472*042d53a7SEvalZero             sch->end_time = earliest_end;
473*042d53a7SEvalZero 
474*042d53a7SEvalZero             /* We can insert if before entry in list */
475*042d53a7SEvalZero             if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
476*042d53a7SEvalZero                 if ((earliest_start - initial_start) <= itvl_t) {
477*042d53a7SEvalZero                     rc = 0;
478*042d53a7SEvalZero                     TAILQ_INSERT_BEFORE(entry, sch, link);
479*042d53a7SEvalZero                 }
480*042d53a7SEvalZero                 break;
481*042d53a7SEvalZero             }
482*042d53a7SEvalZero 
483*042d53a7SEvalZero             /* Check for overlapping events */
484*042d53a7SEvalZero             if (ble_ll_sched_is_overlap(sch, entry)) {
485*042d53a7SEvalZero                 /* Earliest start is end of this event since we overlap */
486*042d53a7SEvalZero                 earliest_start = entry->end_time;
487*042d53a7SEvalZero                 earliest_end = earliest_start + dur;
488*042d53a7SEvalZero             }
489*042d53a7SEvalZero         }
490*042d53a7SEvalZero 
491*042d53a7SEvalZero         /* Must be able to schedule within one connection interval */
492*042d53a7SEvalZero         if (!entry) {
493*042d53a7SEvalZero             if ((earliest_start - initial_start) <= itvl_t) {
494*042d53a7SEvalZero                 rc = 0;
495*042d53a7SEvalZero                 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
496*042d53a7SEvalZero             }
497*042d53a7SEvalZero         }
498*042d53a7SEvalZero 
499*042d53a7SEvalZero         if (!rc) {
500*042d53a7SEvalZero             /* calculate number of window offsets. Each offset is 1.25 ms */
501*042d53a7SEvalZero             sch->enqueued = 1;
502*042d53a7SEvalZero             /*
503*042d53a7SEvalZero              * NOTE: we dont add sched offset ticks as we want to under-estimate
504*042d53a7SEvalZero              * the transmit window slightly since the window size is currently
505*042d53a7SEvalZero              * 2 when using a 32768 crystal.
506*042d53a7SEvalZero              */
507*042d53a7SEvalZero             dur = os_cputime_ticks_to_usecs(earliest_start - initial_start);
508*042d53a7SEvalZero             connsm->tx_win_off = dur / BLE_LL_CONN_TX_OFF_USECS;
509*042d53a7SEvalZero         }
510*042d53a7SEvalZero     }
511*042d53a7SEvalZero 
512*042d53a7SEvalZero     if (!rc) {
513*042d53a7SEvalZero         sch->start_time = earliest_start;
514*042d53a7SEvalZero         sch->end_time = earliest_end;
515*042d53a7SEvalZero         /*
516*042d53a7SEvalZero          * Since we have the transmit window to transmit in, we dont need
517*042d53a7SEvalZero          * to set the anchor point usecs; just transmit to the nearest tick.
518*042d53a7SEvalZero          */
519*042d53a7SEvalZero         connsm->anchor_point = earliest_start + g_ble_ll_sched_offset_ticks;
520*042d53a7SEvalZero         connsm->anchor_point_usecs = 0;
521*042d53a7SEvalZero         connsm->ce_end_time = earliest_end;
522*042d53a7SEvalZero         connsm->period_occ_mask = (1 << cp);
523*042d53a7SEvalZero         g_ble_ll_sched_data.sch_occ_period_mask |= connsm->period_occ_mask;
524*042d53a7SEvalZero         ++g_ble_ll_sched_data.sch_num_occ_periods;
525*042d53a7SEvalZero     }
526*042d53a7SEvalZero 
527*042d53a7SEvalZero 
528*042d53a7SEvalZero     /* Get head of list to restart timer */
529*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
530*042d53a7SEvalZero 
531*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
532*042d53a7SEvalZero 
533*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
534*042d53a7SEvalZero 
535*042d53a7SEvalZero     return rc;
536*042d53a7SEvalZero }
537*042d53a7SEvalZero #else
538*042d53a7SEvalZero int
ble_ll_sched_master_new(struct ble_ll_conn_sm * connsm,struct ble_mbuf_hdr * ble_hdr,uint8_t pyld_len)539*042d53a7SEvalZero ble_ll_sched_master_new(struct ble_ll_conn_sm *connsm,
540*042d53a7SEvalZero                         struct ble_mbuf_hdr *ble_hdr, uint8_t pyld_len)
541*042d53a7SEvalZero {
542*042d53a7SEvalZero     int rc;
543*042d53a7SEvalZero     os_sr_t sr;
544*042d53a7SEvalZero     uint8_t req_slots;
545*042d53a7SEvalZero     uint32_t initial_start;
546*042d53a7SEvalZero     uint32_t earliest_start;
547*042d53a7SEvalZero     uint32_t earliest_end;
548*042d53a7SEvalZero     uint32_t dur;
549*042d53a7SEvalZero     uint32_t itvl_t;
550*042d53a7SEvalZero     uint32_t adv_rxend;
551*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
552*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
553*042d53a7SEvalZero 
554*042d53a7SEvalZero     /*
555*042d53a7SEvalZero      * XXX: TODO this code assumes the advertisement and connect request were
556*042d53a7SEvalZero      * sent at 1Mbps.
557*042d53a7SEvalZero      */
558*042d53a7SEvalZero 
559*042d53a7SEvalZero     /* Get schedule element from connection */
560*042d53a7SEvalZero     rc = -1;
561*042d53a7SEvalZero     sch = &connsm->conn_sch;
562*042d53a7SEvalZero     req_slots = MYNEWT_VAL(BLE_LL_CONN_INIT_SLOTS);
563*042d53a7SEvalZero 
564*042d53a7SEvalZero     /* XXX:
565*042d53a7SEvalZero      * The calculations for the 32kHz crystal bear alot of explanation. The
566*042d53a7SEvalZero      * earliest possible time that the master can start the connection with a
567*042d53a7SEvalZero      * slave is 1.25 msecs from the end of the connection request. The
568*042d53a7SEvalZero      * connection request is sent an IFS time from the end of the advertising
569*042d53a7SEvalZero      * packet that was received plus the time it takes to send the connection
570*042d53a7SEvalZero      * request. At 1 Mbps, this is 1752 usecs, or 57.41 ticks. Using 57 ticks
571*042d53a7SEvalZero      * makes us off ~13 usecs. Since we dont want to actually calculate the
572*042d53a7SEvalZero      * receive end time tick (this would take too long), we assume the end of
573*042d53a7SEvalZero      * the advertising PDU is 'now' (we call os_cputime_get32). We dont know
574*042d53a7SEvalZero      * how much time it will take to service the ISR but if we are more than the
575*042d53a7SEvalZero      * rx to tx time of the chip we will not be successful transmitting the
576*042d53a7SEvalZero      * connect request. All this means is that we presume that the slave will
577*042d53a7SEvalZero      * receive the connect request later than we expect but no earlier than
578*042d53a7SEvalZero      * 13 usecs before (this is important).
579*042d53a7SEvalZero      *
580*042d53a7SEvalZero      * The code then attempts to schedule the connection at the
581*042d53a7SEvalZero      * earliest time although this may not be possible. When the actual
582*042d53a7SEvalZero      * schedule start time is determined, the master has to determine if this
583*042d53a7SEvalZero      * time is more than a transmit window offset interval (1.25 msecs). The
584*042d53a7SEvalZero      * master has to tell the slave how many transmit window offsets there are
585*042d53a7SEvalZero      * from the earliest possible time to when the actual transmit start will
586*042d53a7SEvalZero      * occur. Later in this function you will see the calculation. The actual
587*042d53a7SEvalZero      * transmission start has to occur within the transmit window. The transmit
588*042d53a7SEvalZero      * window interval is in units of 1.25 msecs and has to be at least 1. To
589*042d53a7SEvalZero      * make things a bit easier (but less power efficient for the slave), we
590*042d53a7SEvalZero      * use a transmit window of 2. We do this because we dont quite know the
591*042d53a7SEvalZero      * exact start of the transmission and if we are too early or too late we
592*042d53a7SEvalZero      * could miss the transmit window. A final note: the actual transmission
593*042d53a7SEvalZero      * start (the anchor point) is sched offset ticks from the schedule start
594*042d53a7SEvalZero      * time. We dont add this to the calculation when calculating the window
595*042d53a7SEvalZero      * offset. The reason we dont do this is we want to insure we transmit
596*042d53a7SEvalZero      * after the window offset we tell the slave. For example, say we think
597*042d53a7SEvalZero      * we are transmitting 1253 usecs from the earliest start. This would cause
598*042d53a7SEvalZero      * us to send a transmit window offset of 1. Since we are actually
599*042d53a7SEvalZero      * transmitting earlier than the slave thinks we could end up transmitting
600*042d53a7SEvalZero      * before the window offset. Transmitting later is fine since we have the
601*042d53a7SEvalZero      * transmit window to do so. Transmitting before is bad, since the slave
602*042d53a7SEvalZero      * wont be listening. We could do better calculation if we wanted to use
603*042d53a7SEvalZero      * a transmit window of 1 as opposed to 2, but for now we dont care.
604*042d53a7SEvalZero      */
605*042d53a7SEvalZero     dur = req_slots * BLE_LL_SCHED_32KHZ_TICKS_PER_SLOT;
606*042d53a7SEvalZero     adv_rxend = os_cputime_get32();
607*042d53a7SEvalZero     if (ble_hdr->rxinfo.channel >= BLE_PHY_NUM_DATA_CHANS) {
608*042d53a7SEvalZero         /*
609*042d53a7SEvalZero          * We received packet on advertising channel which means this is a legacy
610*042d53a7SEvalZero          * PDU on 1 Mbps - we do as described above.
611*042d53a7SEvalZero          */
612*042d53a7SEvalZero         earliest_start = adv_rxend + 57;
613*042d53a7SEvalZero     } else {
614*042d53a7SEvalZero         /*
615*042d53a7SEvalZero          * The calculations are similar as above.
616*042d53a7SEvalZero          *
617*042d53a7SEvalZero          * We received packet on data channel which means this is AUX_ADV_IND
618*042d53a7SEvalZero          * received on secondary adv channel. We can schedule first packet at
619*042d53a7SEvalZero          * the earliest after "T_IFS + AUX_CONNECT_REQ + transmitWindowDelay".
620*042d53a7SEvalZero          * AUX_CONNECT_REQ and transmitWindowDelay times vary depending on which
621*042d53a7SEvalZero          * PHY we received on.
622*042d53a7SEvalZero          *
623*042d53a7SEvalZero          */
624*042d53a7SEvalZero         if (ble_hdr->rxinfo.phy == BLE_PHY_1M) {
625*042d53a7SEvalZero             // 150 + 352 + 2500 = 3002us = 98.37 ticks
626*042d53a7SEvalZero             earliest_start = adv_rxend + 98;
627*042d53a7SEvalZero         } else if (ble_hdr->rxinfo.phy == BLE_PHY_2M) {
628*042d53a7SEvalZero             // 150 + 180 + 2500 = 2830us = 92.73 ticks
629*042d53a7SEvalZero             earliest_start = adv_rxend + 93;
630*042d53a7SEvalZero         } else if (ble_hdr->rxinfo.phy == BLE_PHY_CODED) {
631*042d53a7SEvalZero             // 150 + 2896 + 3750 = 6796us = 222.69 ticks
632*042d53a7SEvalZero             earliest_start = adv_rxend + 223;
633*042d53a7SEvalZero         } else {
634*042d53a7SEvalZero             BLE_LL_ASSERT(0);
635*042d53a7SEvalZero         }
636*042d53a7SEvalZero     }
637*042d53a7SEvalZero     earliest_start += MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET) *
638*042d53a7SEvalZero                       BLE_LL_SCHED_32KHZ_TICKS_PER_SLOT;
639*042d53a7SEvalZero     earliest_end = earliest_start + dur;
640*042d53a7SEvalZero     itvl_t = connsm->conn_itvl_ticks;
641*042d53a7SEvalZero 
642*042d53a7SEvalZero     /* We have to find a place for this schedule */
643*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
644*042d53a7SEvalZero 
645*042d53a7SEvalZero     /* The schedule item must occur after current running item (if any) */
646*042d53a7SEvalZero     sch->start_time = earliest_start;
647*042d53a7SEvalZero     initial_start = earliest_start;
648*042d53a7SEvalZero 
649*042d53a7SEvalZero     if (!ble_ll_sched_insert_if_empty(sch)) {
650*042d53a7SEvalZero         /* Nothing in schedule. Schedule as soon as possible */
651*042d53a7SEvalZero         rc = 0;
652*042d53a7SEvalZero         connsm->tx_win_off = MYNEWT_VAL(BLE_LL_CONN_INIT_MIN_WIN_OFFSET);
653*042d53a7SEvalZero     } else {
654*042d53a7SEvalZero         os_cputime_timer_stop(&g_ble_ll_sched_timer);
655*042d53a7SEvalZero         TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
656*042d53a7SEvalZero             /* Set these because overlap function needs them to be set */
657*042d53a7SEvalZero             sch->start_time = earliest_start;
658*042d53a7SEvalZero             sch->end_time = earliest_end;
659*042d53a7SEvalZero 
660*042d53a7SEvalZero             /* We can insert if before entry in list */
661*042d53a7SEvalZero             if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
662*042d53a7SEvalZero                 if ((earliest_start - initial_start) <= itvl_t) {
663*042d53a7SEvalZero                     rc = 0;
664*042d53a7SEvalZero                     TAILQ_INSERT_BEFORE(entry, sch, link);
665*042d53a7SEvalZero                 }
666*042d53a7SEvalZero                 break;
667*042d53a7SEvalZero             }
668*042d53a7SEvalZero 
669*042d53a7SEvalZero             /* Check for overlapping events */
670*042d53a7SEvalZero             if (ble_ll_sched_is_overlap(sch, entry)) {
671*042d53a7SEvalZero                 /* Earliest start is end of this event since we overlap */
672*042d53a7SEvalZero                 earliest_start = entry->end_time;
673*042d53a7SEvalZero                 earliest_end = earliest_start + dur;
674*042d53a7SEvalZero             }
675*042d53a7SEvalZero         }
676*042d53a7SEvalZero 
677*042d53a7SEvalZero         /* Must be able to schedule within one connection interval */
678*042d53a7SEvalZero         if (!entry) {
679*042d53a7SEvalZero             if ((earliest_start - initial_start) <= itvl_t) {
680*042d53a7SEvalZero                 rc = 0;
681*042d53a7SEvalZero                 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
682*042d53a7SEvalZero             }
683*042d53a7SEvalZero         }
684*042d53a7SEvalZero 
685*042d53a7SEvalZero         if (!rc) {
686*042d53a7SEvalZero             /* calculate number of window offsets. Each offset is 1.25 ms */
687*042d53a7SEvalZero             sch->enqueued = 1;
688*042d53a7SEvalZero             /*
689*042d53a7SEvalZero              * NOTE: we dont add sched offset ticks as we want to under-estimate
690*042d53a7SEvalZero              * the transmit window slightly since the window size is currently
691*042d53a7SEvalZero              * 2 when using a 32768 crystal.
692*042d53a7SEvalZero              */
693*042d53a7SEvalZero             dur = os_cputime_ticks_to_usecs(earliest_start - initial_start);
694*042d53a7SEvalZero             connsm->tx_win_off = dur / BLE_LL_CONN_TX_OFF_USECS;
695*042d53a7SEvalZero         }
696*042d53a7SEvalZero     }
697*042d53a7SEvalZero 
698*042d53a7SEvalZero     if (!rc) {
699*042d53a7SEvalZero         sch->start_time = earliest_start;
700*042d53a7SEvalZero         sch->end_time = earliest_end;
701*042d53a7SEvalZero         /*
702*042d53a7SEvalZero          * Since we have the transmit window to transmit in, we dont need
703*042d53a7SEvalZero          * to set the anchor point usecs; just transmit to the nearest tick.
704*042d53a7SEvalZero          */
705*042d53a7SEvalZero         connsm->anchor_point = earliest_start + g_ble_ll_sched_offset_ticks;
706*042d53a7SEvalZero         connsm->anchor_point_usecs = 0;
707*042d53a7SEvalZero         connsm->ce_end_time = earliest_end;
708*042d53a7SEvalZero     }
709*042d53a7SEvalZero 
710*042d53a7SEvalZero     /* Get head of list to restart timer */
711*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
712*042d53a7SEvalZero 
713*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
714*042d53a7SEvalZero 
715*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
716*042d53a7SEvalZero 
717*042d53a7SEvalZero     return rc;
718*042d53a7SEvalZero }
719*042d53a7SEvalZero #endif
720*042d53a7SEvalZero 
721*042d53a7SEvalZero /**
722*042d53a7SEvalZero  * Schedules a slave connection for the first time.
723*042d53a7SEvalZero  *
724*042d53a7SEvalZero  * Context: Link Layer
725*042d53a7SEvalZero  *
726*042d53a7SEvalZero  * @param connsm
727*042d53a7SEvalZero  *
728*042d53a7SEvalZero  * @return int
729*042d53a7SEvalZero  */
730*042d53a7SEvalZero int
ble_ll_sched_slave_new(struct ble_ll_conn_sm * connsm)731*042d53a7SEvalZero ble_ll_sched_slave_new(struct ble_ll_conn_sm *connsm)
732*042d53a7SEvalZero {
733*042d53a7SEvalZero     int rc;
734*042d53a7SEvalZero     os_sr_t sr;
735*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
736*042d53a7SEvalZero     struct ble_ll_sched_item *next_sch;
737*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
738*042d53a7SEvalZero 
739*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
740*042d53a7SEvalZero     int first;
741*042d53a7SEvalZero     first = 0;
742*042d53a7SEvalZero #endif
743*042d53a7SEvalZero 
744*042d53a7SEvalZero     /* Get schedule element from connection */
745*042d53a7SEvalZero     rc = -1;
746*042d53a7SEvalZero     sch = &connsm->conn_sch;
747*042d53a7SEvalZero 
748*042d53a7SEvalZero     /* Set schedule start and end times */
749*042d53a7SEvalZero     /*
750*042d53a7SEvalZero      * XXX: for now, we dont care about anchor point usecs for the slave. It
751*042d53a7SEvalZero      * does not matter if we turn on the receiver up to one tick before w
752*042d53a7SEvalZero      * need to. We also subtract one extra tick since the conversion from
753*042d53a7SEvalZero      * usecs to ticks could be off by up to 1 tick.
754*042d53a7SEvalZero      */
755*042d53a7SEvalZero     sch->start_time = connsm->anchor_point - g_ble_ll_sched_offset_ticks -
756*042d53a7SEvalZero         os_cputime_usecs_to_ticks(connsm->slave_cur_window_widening) - 1;
757*042d53a7SEvalZero     sch->end_time = connsm->ce_end_time;
758*042d53a7SEvalZero     sch->remainder = 0;
759*042d53a7SEvalZero 
760*042d53a7SEvalZero     /* We have to find a place for this schedule */
761*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
762*042d53a7SEvalZero 
763*042d53a7SEvalZero     /* The schedule item must occur after current running item (if any) */
764*042d53a7SEvalZero     if (ble_ll_sched_overlaps_current(sch)) {
765*042d53a7SEvalZero         OS_EXIT_CRITICAL(sr);
766*042d53a7SEvalZero         return rc;
767*042d53a7SEvalZero     }
768*042d53a7SEvalZero 
769*042d53a7SEvalZero     entry = ble_ll_sched_insert_if_empty(sch);
770*042d53a7SEvalZero     if (!entry) {
771*042d53a7SEvalZero         /* Nothing in schedule. Schedule as soon as possible */
772*042d53a7SEvalZero         rc = 0;
773*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
774*042d53a7SEvalZero         first = 1;
775*042d53a7SEvalZero #endif
776*042d53a7SEvalZero     } else {
777*042d53a7SEvalZero         os_cputime_timer_stop(&g_ble_ll_sched_timer);
778*042d53a7SEvalZero         while (1) {
779*042d53a7SEvalZero             next_sch = entry->link.tqe_next;
780*042d53a7SEvalZero             /* Insert if event ends before next starts */
781*042d53a7SEvalZero             if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
782*042d53a7SEvalZero                 rc = 0;
783*042d53a7SEvalZero                 TAILQ_INSERT_BEFORE(entry, sch, link);
784*042d53a7SEvalZero                 break;
785*042d53a7SEvalZero             }
786*042d53a7SEvalZero 
787*042d53a7SEvalZero             if (ble_ll_sched_is_overlap(sch, entry)) {
788*042d53a7SEvalZero                 /* If we overlap with a connection, we re-schedule */
789*042d53a7SEvalZero                 if (ble_ll_sched_conn_overlap(entry)) {
790*042d53a7SEvalZero                     break;
791*042d53a7SEvalZero                 }
792*042d53a7SEvalZero             }
793*042d53a7SEvalZero 
794*042d53a7SEvalZero             /* Move to next entry */
795*042d53a7SEvalZero             entry = next_sch;
796*042d53a7SEvalZero 
797*042d53a7SEvalZero             /* Insert at tail if none left to check */
798*042d53a7SEvalZero             if (!entry) {
799*042d53a7SEvalZero                 rc = 0;
800*042d53a7SEvalZero                 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
801*042d53a7SEvalZero                 break;
802*042d53a7SEvalZero             }
803*042d53a7SEvalZero         }
804*042d53a7SEvalZero 
805*042d53a7SEvalZero         if (!rc) {
806*042d53a7SEvalZero             sch->enqueued = 1;
807*042d53a7SEvalZero         }
808*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
809*042d53a7SEvalZero         next_sch = TAILQ_FIRST(&g_ble_ll_sched_q);
810*042d53a7SEvalZero         if (next_sch == sch) {
811*042d53a7SEvalZero             first = 1;
812*042d53a7SEvalZero         } else {
813*042d53a7SEvalZero             sch = next_sch;
814*042d53a7SEvalZero         }
815*042d53a7SEvalZero #else
816*042d53a7SEvalZero         sch = TAILQ_FIRST(&g_ble_ll_sched_q);
817*042d53a7SEvalZero #endif
818*042d53a7SEvalZero     }
819*042d53a7SEvalZero 
820*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
821*042d53a7SEvalZero     if (first) {
822*042d53a7SEvalZero         ble_ll_xcvr_rfclk_timer_start(sch->start_time);
823*042d53a7SEvalZero     }
824*042d53a7SEvalZero #endif
825*042d53a7SEvalZero 
826*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
827*042d53a7SEvalZero 
828*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
829*042d53a7SEvalZero 
830*042d53a7SEvalZero     return rc;
831*042d53a7SEvalZero }
832*042d53a7SEvalZero 
833*042d53a7SEvalZero int
ble_ll_sched_adv_new(struct ble_ll_sched_item * sch,ble_ll_sched_adv_new_cb cb,void * arg)834*042d53a7SEvalZero ble_ll_sched_adv_new(struct ble_ll_sched_item *sch, ble_ll_sched_adv_new_cb cb,
835*042d53a7SEvalZero                      void *arg)
836*042d53a7SEvalZero {
837*042d53a7SEvalZero     int rc;
838*042d53a7SEvalZero     os_sr_t sr;
839*042d53a7SEvalZero     uint32_t adv_start;
840*042d53a7SEvalZero     uint32_t duration;
841*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
842*042d53a7SEvalZero     struct ble_ll_sched_item *orig;
843*042d53a7SEvalZero 
844*042d53a7SEvalZero     /* Get length of schedule item */
845*042d53a7SEvalZero     duration = sch->end_time - sch->start_time;
846*042d53a7SEvalZero     orig = sch;
847*042d53a7SEvalZero 
848*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
849*042d53a7SEvalZero     entry = ble_ll_sched_insert_if_empty(sch);
850*042d53a7SEvalZero     if (!entry) {
851*042d53a7SEvalZero         rc = 0;
852*042d53a7SEvalZero         adv_start = sch->start_time;
853*042d53a7SEvalZero     } else {
854*042d53a7SEvalZero         /* XXX: no need to stop timer if not first on list. Modify code? */
855*042d53a7SEvalZero         os_cputime_timer_stop(&g_ble_ll_sched_timer);
856*042d53a7SEvalZero         TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
857*042d53a7SEvalZero             /* We can insert if before entry in list */
858*042d53a7SEvalZero             if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
859*042d53a7SEvalZero                 rc = 0;
860*042d53a7SEvalZero                 TAILQ_INSERT_BEFORE(entry, sch, link);
861*042d53a7SEvalZero                 break;
862*042d53a7SEvalZero             }
863*042d53a7SEvalZero 
864*042d53a7SEvalZero             /* Check for overlapping events */
865*042d53a7SEvalZero             if (ble_ll_sched_is_overlap(sch, entry)) {
866*042d53a7SEvalZero                 /* Earliest start is end of this event since we overlap */
867*042d53a7SEvalZero                 sch->start_time = entry->end_time;
868*042d53a7SEvalZero                 sch->end_time = sch->start_time + duration;
869*042d53a7SEvalZero             }
870*042d53a7SEvalZero         }
871*042d53a7SEvalZero 
872*042d53a7SEvalZero         if (!entry) {
873*042d53a7SEvalZero             rc = 0;
874*042d53a7SEvalZero             TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
875*042d53a7SEvalZero         }
876*042d53a7SEvalZero         adv_start = sch->start_time;
877*042d53a7SEvalZero 
878*042d53a7SEvalZero         if (!rc) {
879*042d53a7SEvalZero             sch->enqueued = 1;
880*042d53a7SEvalZero         }
881*042d53a7SEvalZero 
882*042d53a7SEvalZero         /* Restart with head of list */
883*042d53a7SEvalZero         sch = TAILQ_FIRST(&g_ble_ll_sched_q);
884*042d53a7SEvalZero     }
885*042d53a7SEvalZero 
886*042d53a7SEvalZero     if (cb) {
887*042d53a7SEvalZero         cb((struct ble_ll_adv_sm *)orig->cb_arg, adv_start, arg);
888*042d53a7SEvalZero     }
889*042d53a7SEvalZero 
890*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
891*042d53a7SEvalZero     if (orig == sch) {
892*042d53a7SEvalZero         ble_ll_xcvr_rfclk_timer_start(sch->start_time);
893*042d53a7SEvalZero     }
894*042d53a7SEvalZero #endif
895*042d53a7SEvalZero 
896*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
897*042d53a7SEvalZero 
898*042d53a7SEvalZero     /* Restart timer */
899*042d53a7SEvalZero     BLE_LL_ASSERT(sch != NULL);
900*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
901*042d53a7SEvalZero 
902*042d53a7SEvalZero     return rc;
903*042d53a7SEvalZero }
904*042d53a7SEvalZero 
905*042d53a7SEvalZero int
ble_ll_sched_adv_reschedule(struct ble_ll_sched_item * sch,uint32_t * start,uint32_t max_delay_ticks)906*042d53a7SEvalZero ble_ll_sched_adv_reschedule(struct ble_ll_sched_item *sch, uint32_t *start,
907*042d53a7SEvalZero                             uint32_t max_delay_ticks)
908*042d53a7SEvalZero {
909*042d53a7SEvalZero     int rc;
910*042d53a7SEvalZero     os_sr_t sr;
911*042d53a7SEvalZero     uint32_t orig_start;
912*042d53a7SEvalZero     uint32_t duration;
913*042d53a7SEvalZero     uint32_t rand_ticks;
914*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
915*042d53a7SEvalZero     struct ble_ll_sched_item *next_sch;
916*042d53a7SEvalZero     struct ble_ll_sched_item *before;
917*042d53a7SEvalZero     struct ble_ll_sched_item *start_overlap;
918*042d53a7SEvalZero     struct ble_ll_sched_item *end_overlap;
919*042d53a7SEvalZero 
920*042d53a7SEvalZero     /* Get length of schedule item */
921*042d53a7SEvalZero     duration = sch->end_time - sch->start_time;
922*042d53a7SEvalZero 
923*042d53a7SEvalZero     /* Add maximum randomization delay to end */
924*042d53a7SEvalZero     rand_ticks = max_delay_ticks;
925*042d53a7SEvalZero     sch->end_time += max_delay_ticks;
926*042d53a7SEvalZero 
927*042d53a7SEvalZero     start_overlap = NULL;
928*042d53a7SEvalZero     end_overlap = NULL;
929*042d53a7SEvalZero     before = NULL;
930*042d53a7SEvalZero     rc = 0;
931*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
932*042d53a7SEvalZero 
933*042d53a7SEvalZero     entry = ble_ll_sched_insert_if_empty(sch);
934*042d53a7SEvalZero     if (entry) {
935*042d53a7SEvalZero         os_cputime_timer_stop(&g_ble_ll_sched_timer);
936*042d53a7SEvalZero         while (1) {
937*042d53a7SEvalZero             next_sch = entry->link.tqe_next;
938*042d53a7SEvalZero             if (ble_ll_sched_is_overlap(sch, entry)) {
939*042d53a7SEvalZero                 if (start_overlap == NULL) {
940*042d53a7SEvalZero                     start_overlap = entry;
941*042d53a7SEvalZero                     end_overlap = entry;
942*042d53a7SEvalZero                 } else {
943*042d53a7SEvalZero                     end_overlap = entry;
944*042d53a7SEvalZero                 }
945*042d53a7SEvalZero             } else {
946*042d53a7SEvalZero                 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
947*042d53a7SEvalZero                     before = entry;
948*042d53a7SEvalZero                     break;
949*042d53a7SEvalZero                 }
950*042d53a7SEvalZero             }
951*042d53a7SEvalZero 
952*042d53a7SEvalZero             entry = next_sch;
953*042d53a7SEvalZero             if (entry == NULL) {
954*042d53a7SEvalZero                 break;
955*042d53a7SEvalZero             }
956*042d53a7SEvalZero         }
957*042d53a7SEvalZero 
958*042d53a7SEvalZero         /*
959*042d53a7SEvalZero          * If there is no overlap, we either insert before the 'before' entry
960*042d53a7SEvalZero          * or we insert at the end if there is no before entry.
961*042d53a7SEvalZero          */
962*042d53a7SEvalZero         if (start_overlap == NULL) {
963*042d53a7SEvalZero             if (before) {
964*042d53a7SEvalZero                 TAILQ_INSERT_BEFORE(before, sch, link);
965*042d53a7SEvalZero             } else {
966*042d53a7SEvalZero                 TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
967*042d53a7SEvalZero             }
968*042d53a7SEvalZero         } else {
969*042d53a7SEvalZero             /*
970*042d53a7SEvalZero              * This item will overlap with others. See if we can fit it in
971*042d53a7SEvalZero              * with original duration.
972*042d53a7SEvalZero              */
973*042d53a7SEvalZero             before = NULL;
974*042d53a7SEvalZero             orig_start = sch->start_time;
975*042d53a7SEvalZero             entry = start_overlap;
976*042d53a7SEvalZero             sch->end_time = sch->start_time + duration;
977*042d53a7SEvalZero             while (1) {
978*042d53a7SEvalZero                 next_sch = entry->link.tqe_next;
979*042d53a7SEvalZero                 if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
980*042d53a7SEvalZero                     rand_ticks = entry->start_time - sch->end_time;
981*042d53a7SEvalZero                     before = entry;
982*042d53a7SEvalZero                     TAILQ_INSERT_BEFORE(before, sch, link);
983*042d53a7SEvalZero                     break;
984*042d53a7SEvalZero                 } else {
985*042d53a7SEvalZero                     sch->start_time = entry->end_time;
986*042d53a7SEvalZero                     sch->end_time = sch->start_time + duration;
987*042d53a7SEvalZero                 }
988*042d53a7SEvalZero 
989*042d53a7SEvalZero                 if (entry == end_overlap) {
990*042d53a7SEvalZero                     rand_ticks = (orig_start + max_delay_ticks) - sch->start_time;
991*042d53a7SEvalZero                     if (rand_ticks > max_delay_ticks) {
992*042d53a7SEvalZero                         /* No place for advertisement. */
993*042d53a7SEvalZero                         rc = -1;
994*042d53a7SEvalZero                     } else {
995*042d53a7SEvalZero                         if (next_sch == NULL) {
996*042d53a7SEvalZero                             TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
997*042d53a7SEvalZero                         } else {
998*042d53a7SEvalZero                             TAILQ_INSERT_BEFORE(next_sch, sch, link);
999*042d53a7SEvalZero                         }
1000*042d53a7SEvalZero                     }
1001*042d53a7SEvalZero                     break;
1002*042d53a7SEvalZero                 }
1003*042d53a7SEvalZero                 entry = next_sch;
1004*042d53a7SEvalZero                 BLE_LL_ASSERT(entry != NULL);
1005*042d53a7SEvalZero             }
1006*042d53a7SEvalZero         }
1007*042d53a7SEvalZero     }
1008*042d53a7SEvalZero 
1009*042d53a7SEvalZero     if (!rc) {
1010*042d53a7SEvalZero         sch->enqueued = 1;
1011*042d53a7SEvalZero         if (rand_ticks) {
1012*042d53a7SEvalZero             sch->start_time += rand() % rand_ticks;
1013*042d53a7SEvalZero         }
1014*042d53a7SEvalZero         sch->end_time = sch->start_time + duration;
1015*042d53a7SEvalZero         *start = sch->start_time;
1016*042d53a7SEvalZero 
1017*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
1018*042d53a7SEvalZero         if (sch == TAILQ_FIRST(&g_ble_ll_sched_q)) {
1019*042d53a7SEvalZero             ble_ll_xcvr_rfclk_timer_start(sch->start_time);
1020*042d53a7SEvalZero         }
1021*042d53a7SEvalZero #endif
1022*042d53a7SEvalZero     }
1023*042d53a7SEvalZero 
1024*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1025*042d53a7SEvalZero 
1026*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1027*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1028*042d53a7SEvalZero 
1029*042d53a7SEvalZero     return rc;
1030*042d53a7SEvalZero }
1031*042d53a7SEvalZero 
1032*042d53a7SEvalZero int
ble_ll_sched_adv_resched_pdu(struct ble_ll_sched_item * sch)1033*042d53a7SEvalZero ble_ll_sched_adv_resched_pdu(struct ble_ll_sched_item *sch)
1034*042d53a7SEvalZero {
1035*042d53a7SEvalZero     uint8_t lls;
1036*042d53a7SEvalZero     os_sr_t sr;
1037*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
1038*042d53a7SEvalZero 
1039*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1040*042d53a7SEvalZero 
1041*042d53a7SEvalZero     lls = ble_ll_state_get();
1042*042d53a7SEvalZero     if ((lls == BLE_LL_STATE_ADV) || (lls == BLE_LL_STATE_CONNECTION)) {
1043*042d53a7SEvalZero         goto adv_resched_pdu_fail;
1044*042d53a7SEvalZero     }
1045*042d53a7SEvalZero 
1046*042d53a7SEvalZero     entry = ble_ll_sched_insert_if_empty(sch);
1047*042d53a7SEvalZero     if (entry) {
1048*042d53a7SEvalZero         /* If we overlap with the first item, simply re-schedule */
1049*042d53a7SEvalZero         if (ble_ll_sched_is_overlap(sch, entry)) {
1050*042d53a7SEvalZero             goto adv_resched_pdu_fail;
1051*042d53a7SEvalZero         }
1052*042d53a7SEvalZero         os_cputime_timer_stop(&g_ble_ll_sched_timer);
1053*042d53a7SEvalZero         TAILQ_INSERT_BEFORE(entry, sch, link);
1054*042d53a7SEvalZero     }
1055*042d53a7SEvalZero 
1056*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1057*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1058*042d53a7SEvalZero     return 0;
1059*042d53a7SEvalZero 
1060*042d53a7SEvalZero adv_resched_pdu_fail:
1061*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1062*042d53a7SEvalZero     return -1;
1063*042d53a7SEvalZero }
1064*042d53a7SEvalZero 
1065*042d53a7SEvalZero /**
1066*042d53a7SEvalZero  * Remove a schedule element
1067*042d53a7SEvalZero  *
1068*042d53a7SEvalZero  * @param sched_type
1069*042d53a7SEvalZero  *
1070*042d53a7SEvalZero  * @return int 0 - removed, 1 - not in the list
1071*042d53a7SEvalZero  */
1072*042d53a7SEvalZero int
ble_ll_sched_rmv_elem(struct ble_ll_sched_item * sch)1073*042d53a7SEvalZero ble_ll_sched_rmv_elem(struct ble_ll_sched_item *sch)
1074*042d53a7SEvalZero {
1075*042d53a7SEvalZero     os_sr_t sr;
1076*042d53a7SEvalZero     struct ble_ll_sched_item *first;
1077*042d53a7SEvalZero     int rc = 1;
1078*042d53a7SEvalZero 
1079*042d53a7SEvalZero     if (!sch) {
1080*042d53a7SEvalZero         return rc;
1081*042d53a7SEvalZero     }
1082*042d53a7SEvalZero 
1083*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1084*042d53a7SEvalZero     if (sch->enqueued) {
1085*042d53a7SEvalZero         first = TAILQ_FIRST(&g_ble_ll_sched_q);
1086*042d53a7SEvalZero         if (first == sch) {
1087*042d53a7SEvalZero             os_cputime_timer_stop(&g_ble_ll_sched_timer);
1088*042d53a7SEvalZero         }
1089*042d53a7SEvalZero 
1090*042d53a7SEvalZero         TAILQ_REMOVE(&g_ble_ll_sched_q, sch, link);
1091*042d53a7SEvalZero         sch->enqueued = 0;
1092*042d53a7SEvalZero         rc = 0;
1093*042d53a7SEvalZero 
1094*042d53a7SEvalZero         if (first == sch) {
1095*042d53a7SEvalZero             first = TAILQ_FIRST(&g_ble_ll_sched_q);
1096*042d53a7SEvalZero             if (first) {
1097*042d53a7SEvalZero                 os_cputime_timer_start(&g_ble_ll_sched_timer, first->start_time);
1098*042d53a7SEvalZero             }
1099*042d53a7SEvalZero         }
1100*042d53a7SEvalZero     }
1101*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1102*042d53a7SEvalZero 
1103*042d53a7SEvalZero     return rc;
1104*042d53a7SEvalZero }
1105*042d53a7SEvalZero 
1106*042d53a7SEvalZero void
ble_ll_sched_rmv_elem_type(uint8_t type,sched_remove_cb_func remove_cb)1107*042d53a7SEvalZero ble_ll_sched_rmv_elem_type(uint8_t type, sched_remove_cb_func remove_cb)
1108*042d53a7SEvalZero {
1109*042d53a7SEvalZero     os_sr_t sr;
1110*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
1111*042d53a7SEvalZero     struct ble_ll_sched_item *first;
1112*042d53a7SEvalZero 
1113*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1114*042d53a7SEvalZero     first = TAILQ_FIRST(&g_ble_ll_sched_q);
1115*042d53a7SEvalZero 
1116*042d53a7SEvalZero     TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
1117*042d53a7SEvalZero         if (entry->sched_type == type) {
1118*042d53a7SEvalZero             if (first == entry) {
1119*042d53a7SEvalZero                 os_cputime_timer_stop(&g_ble_ll_sched_timer);
1120*042d53a7SEvalZero                 first = NULL;
1121*042d53a7SEvalZero             }
1122*042d53a7SEvalZero 
1123*042d53a7SEvalZero             TAILQ_REMOVE(&g_ble_ll_sched_q, entry, link);
1124*042d53a7SEvalZero             remove_cb(entry);
1125*042d53a7SEvalZero             entry->enqueued = 0;
1126*042d53a7SEvalZero         }
1127*042d53a7SEvalZero     }
1128*042d53a7SEvalZero 
1129*042d53a7SEvalZero     if (!first) {
1130*042d53a7SEvalZero         first = TAILQ_FIRST(&g_ble_ll_sched_q);
1131*042d53a7SEvalZero         os_cputime_timer_start(&g_ble_ll_sched_timer, first->start_time);
1132*042d53a7SEvalZero     }
1133*042d53a7SEvalZero 
1134*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1135*042d53a7SEvalZero }
1136*042d53a7SEvalZero 
1137*042d53a7SEvalZero /**
1138*042d53a7SEvalZero  * Executes a schedule item by calling the schedule callback function.
1139*042d53a7SEvalZero  *
1140*042d53a7SEvalZero  * Context: Interrupt
1141*042d53a7SEvalZero  *
1142*042d53a7SEvalZero  * @param sch Pointer to schedule item
1143*042d53a7SEvalZero  *
1144*042d53a7SEvalZero  * @return int 0: schedule item is not over; otherwise schedule item is done.
1145*042d53a7SEvalZero  */
1146*042d53a7SEvalZero static int
ble_ll_sched_execute_item(struct ble_ll_sched_item * sch)1147*042d53a7SEvalZero ble_ll_sched_execute_item(struct ble_ll_sched_item *sch)
1148*042d53a7SEvalZero {
1149*042d53a7SEvalZero     int rc;
1150*042d53a7SEvalZero     uint8_t lls;
1151*042d53a7SEvalZero 
1152*042d53a7SEvalZero     lls = ble_ll_state_get();
1153*042d53a7SEvalZero 
1154*042d53a7SEvalZero     ble_ll_trace_u32x3(BLE_LL_TRACE_ID_SCHED, lls, os_cputime_get32(),
1155*042d53a7SEvalZero                        sch->start_time);
1156*042d53a7SEvalZero 
1157*042d53a7SEvalZero     if (lls == BLE_LL_STATE_STANDBY) {
1158*042d53a7SEvalZero         goto sched;
1159*042d53a7SEvalZero     }
1160*042d53a7SEvalZero 
1161*042d53a7SEvalZero     /* If aux scan scheduled and LL is in state when scanner is running
1162*042d53a7SEvalZero      * in 3 states:
1163*042d53a7SEvalZero      * BLE_LL_STATE_SCANNING
1164*042d53a7SEvalZero      * BLE_LL_STATE_INITIATING
1165*042d53a7SEvalZero      * BLE_LL_STATE_STANDBY
1166*042d53a7SEvalZero      *
1167*042d53a7SEvalZero      * Let scanner to decide to disable phy or not.
1168*042d53a7SEvalZero      */
1169*042d53a7SEvalZero     if (sch->sched_type == BLE_LL_SCHED_TYPE_AUX_SCAN) {
1170*042d53a7SEvalZero         if (lls == BLE_LL_STATE_INITIATING || lls == BLE_LL_STATE_SCANNING) {
1171*042d53a7SEvalZero             goto sched;
1172*042d53a7SEvalZero         }
1173*042d53a7SEvalZero     }
1174*042d53a7SEvalZero 
1175*042d53a7SEvalZero     /*
1176*042d53a7SEvalZero      * This is either an advertising event or connection event start. If
1177*042d53a7SEvalZero      * we are scanning or initiating just stop it.
1178*042d53a7SEvalZero      */
1179*042d53a7SEvalZero 
1180*042d53a7SEvalZero     /* We have to disable the PHY no matter what */
1181*042d53a7SEvalZero     ble_phy_disable();
1182*042d53a7SEvalZero     ble_ll_wfr_disable();
1183*042d53a7SEvalZero 
1184*042d53a7SEvalZero     if (lls == BLE_LL_STATE_SCANNING) {
1185*042d53a7SEvalZero         ble_ll_state_set(BLE_LL_STATE_STANDBY);
1186*042d53a7SEvalZero         ble_ll_scan_clean_cur_aux_data();
1187*042d53a7SEvalZero     } else if (lls == BLE_LL_STATE_INITIATING) {
1188*042d53a7SEvalZero         ble_ll_state_set(BLE_LL_STATE_STANDBY);
1189*042d53a7SEvalZero         ble_ll_scan_clean_cur_aux_data();
1190*042d53a7SEvalZero         /* PHY is disabled - make sure we do not wait for AUX_CONNECT_RSP */
1191*042d53a7SEvalZero         ble_ll_conn_reset_pending_aux_conn_rsp();
1192*042d53a7SEvalZero     } else if (lls == BLE_LL_STATE_ADV) {
1193*042d53a7SEvalZero         STATS_INC(ble_ll_stats, sched_state_adv_errs);
1194*042d53a7SEvalZero         ble_ll_adv_halt();
1195*042d53a7SEvalZero     } else {
1196*042d53a7SEvalZero         STATS_INC(ble_ll_stats, sched_state_conn_errs);
1197*042d53a7SEvalZero         ble_ll_conn_event_halt();
1198*042d53a7SEvalZero     }
1199*042d53a7SEvalZero 
1200*042d53a7SEvalZero sched:
1201*042d53a7SEvalZero     BLE_LL_ASSERT(sch->sched_cb);
1202*042d53a7SEvalZero     rc = sch->sched_cb(sch);
1203*042d53a7SEvalZero     return rc;
1204*042d53a7SEvalZero }
1205*042d53a7SEvalZero 
1206*042d53a7SEvalZero /**
1207*042d53a7SEvalZero  * Run the BLE scheduler. Iterate through all items on the schedule queue.
1208*042d53a7SEvalZero  *
1209*042d53a7SEvalZero  * Context: interrupt (scheduler)
1210*042d53a7SEvalZero  *
1211*042d53a7SEvalZero  * @return int
1212*042d53a7SEvalZero  */
1213*042d53a7SEvalZero void
ble_ll_sched_run(void * arg)1214*042d53a7SEvalZero ble_ll_sched_run(void *arg)
1215*042d53a7SEvalZero {
1216*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
1217*042d53a7SEvalZero 
1218*042d53a7SEvalZero     /* Look through schedule queue */
1219*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1220*042d53a7SEvalZero     if (sch) {
1221*042d53a7SEvalZero #if (BLE_LL_SCHED_DEBUG == 1)
1222*042d53a7SEvalZero         int32_t dt;
1223*042d53a7SEvalZero 
1224*042d53a7SEvalZero         /* Make sure we have passed the start time of the first event */
1225*042d53a7SEvalZero         dt = (int32_t)(os_cputime_get32() - sch->start_time);
1226*042d53a7SEvalZero         if (dt > g_ble_ll_sched_max_late) {
1227*042d53a7SEvalZero             g_ble_ll_sched_max_late = dt;
1228*042d53a7SEvalZero         }
1229*042d53a7SEvalZero         if (dt < g_ble_ll_sched_max_early) {
1230*042d53a7SEvalZero             g_ble_ll_sched_max_early = dt;
1231*042d53a7SEvalZero         }
1232*042d53a7SEvalZero #endif
1233*042d53a7SEvalZero 
1234*042d53a7SEvalZero         /* Remove schedule item and execute the callback */
1235*042d53a7SEvalZero         TAILQ_REMOVE(&g_ble_ll_sched_q, sch, link);
1236*042d53a7SEvalZero         sch->enqueued = 0;
1237*042d53a7SEvalZero         ble_ll_sched_execute_item(sch);
1238*042d53a7SEvalZero 
1239*042d53a7SEvalZero         /* Restart if there is an item on the schedule */
1240*042d53a7SEvalZero         sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1241*042d53a7SEvalZero         if (sch) {
1242*042d53a7SEvalZero             os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1243*042d53a7SEvalZero         }
1244*042d53a7SEvalZero     }
1245*042d53a7SEvalZero }
1246*042d53a7SEvalZero 
1247*042d53a7SEvalZero /**
1248*042d53a7SEvalZero  * Called to determine when the next scheduled event will occur.
1249*042d53a7SEvalZero  *
1250*042d53a7SEvalZero  * If there are not scheduled events this function returns 0; otherwise it
1251*042d53a7SEvalZero  * returns 1 and *next_event_time is set to the start time of the next event.
1252*042d53a7SEvalZero  *
1253*042d53a7SEvalZero  * @param next_event_time
1254*042d53a7SEvalZero  *
1255*042d53a7SEvalZero  * @return int 0: No events are scheduled 1: there is an upcoming event
1256*042d53a7SEvalZero  */
1257*042d53a7SEvalZero int
ble_ll_sched_next_time(uint32_t * next_event_time)1258*042d53a7SEvalZero ble_ll_sched_next_time(uint32_t *next_event_time)
1259*042d53a7SEvalZero {
1260*042d53a7SEvalZero     int rc;
1261*042d53a7SEvalZero     os_sr_t sr;
1262*042d53a7SEvalZero     struct ble_ll_sched_item *first;
1263*042d53a7SEvalZero 
1264*042d53a7SEvalZero     rc = 0;
1265*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1266*042d53a7SEvalZero     first = TAILQ_FIRST(&g_ble_ll_sched_q);
1267*042d53a7SEvalZero     if (first) {
1268*042d53a7SEvalZero         *next_event_time = first->start_time;
1269*042d53a7SEvalZero         rc = 1;
1270*042d53a7SEvalZero     }
1271*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1272*042d53a7SEvalZero 
1273*042d53a7SEvalZero     return rc;
1274*042d53a7SEvalZero }
1275*042d53a7SEvalZero 
1276*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
1277*042d53a7SEvalZero /**
1278*042d53a7SEvalZero  * Checks to see if we need to restart the cputime timer which starts the
1279*042d53a7SEvalZero  * rf clock settling.
1280*042d53a7SEvalZero  *
1281*042d53a7SEvalZero  * NOTE: Should only be called from the Link Layer task!
1282*042d53a7SEvalZero  *
1283*042d53a7SEvalZero  * Context: Link-Layer task.
1284*042d53a7SEvalZero  *
1285*042d53a7SEvalZero  */
1286*042d53a7SEvalZero void
ble_ll_sched_rfclk_chk_restart(void)1287*042d53a7SEvalZero ble_ll_sched_rfclk_chk_restart(void)
1288*042d53a7SEvalZero {
1289*042d53a7SEvalZero     os_sr_t sr;
1290*042d53a7SEvalZero     uint8_t ll_state;
1291*042d53a7SEvalZero     int32_t time_till_next;
1292*042d53a7SEvalZero     uint32_t next_time;
1293*042d53a7SEvalZero 
1294*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1295*042d53a7SEvalZero     ll_state = ble_ll_state_get();
1296*042d53a7SEvalZero     if (ble_ll_sched_next_time(&next_time)) {
1297*042d53a7SEvalZero         /*
1298*042d53a7SEvalZero          * If the time until the next event is too close, no need to start
1299*042d53a7SEvalZero          * the timer. Leave clock on.
1300*042d53a7SEvalZero          */
1301*042d53a7SEvalZero         time_till_next = (int32_t)(next_time - os_cputime_get32());
1302*042d53a7SEvalZero         if (time_till_next > g_ble_ll_data.ll_xtal_ticks) {
1303*042d53a7SEvalZero             /* Restart the rfclk timer based on the next scheduled time */
1304*042d53a7SEvalZero             ble_ll_xcvr_rfclk_timer_start(next_time);
1305*042d53a7SEvalZero 
1306*042d53a7SEvalZero             /* Only disable the rfclk if doing nothing */
1307*042d53a7SEvalZero             if (ll_state == BLE_LL_STATE_STANDBY) {
1308*042d53a7SEvalZero                 ble_ll_xcvr_rfclk_disable();
1309*042d53a7SEvalZero             }
1310*042d53a7SEvalZero         }
1311*042d53a7SEvalZero     } else {
1312*042d53a7SEvalZero         /*
1313*042d53a7SEvalZero          * Only stop the timer and rfclk if doing nothing currently. If
1314*042d53a7SEvalZero          * in some other state, that state will handle the timer and rfclk
1315*042d53a7SEvalZero          */
1316*042d53a7SEvalZero         if (ll_state == BLE_LL_STATE_STANDBY) {
1317*042d53a7SEvalZero             ble_ll_xcvr_rfclk_stop();
1318*042d53a7SEvalZero         }
1319*042d53a7SEvalZero     }
1320*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1321*042d53a7SEvalZero }
1322*042d53a7SEvalZero 
1323*042d53a7SEvalZero #endif
1324*042d53a7SEvalZero 
1325*042d53a7SEvalZero #if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
1326*042d53a7SEvalZero /**
1327*042d53a7SEvalZero  * Called to check if there is place for a planned scan req.
1328*042d53a7SEvalZero  *
1329*042d53a7SEvalZero  * @param chan
1330*042d53a7SEvalZero  * @param phy_mode
1331*042d53a7SEvalZero  *
1332*042d53a7SEvalZero  * @return int 0: Clear for scan req 1: there is an upcoming event
1333*042d53a7SEvalZero  */
1334*042d53a7SEvalZero int
ble_ll_sched_scan_req_over_aux_ptr(uint32_t chan,uint8_t phy_mode)1335*042d53a7SEvalZero ble_ll_sched_scan_req_over_aux_ptr(uint32_t chan, uint8_t phy_mode)
1336*042d53a7SEvalZero {
1337*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
1338*042d53a7SEvalZero     uint32_t usec_dur;
1339*042d53a7SEvalZero     uint32_t now = os_cputime_get32();
1340*042d53a7SEvalZero 
1341*042d53a7SEvalZero     /* Lets calculate roughly how much time we need for scan req and scan rsp */
1342*042d53a7SEvalZero     usec_dur = ble_ll_pdu_tx_time_get(BLE_SCAN_REQ_LEN, phy_mode);
1343*042d53a7SEvalZero     if (chan >=  BLE_PHY_NUM_DATA_CHANS) {
1344*042d53a7SEvalZero         usec_dur += ble_ll_pdu_tx_time_get(BLE_SCAN_RSP_MAX_LEN, phy_mode);
1345*042d53a7SEvalZero     } else {
1346*042d53a7SEvalZero         usec_dur += ble_ll_pdu_tx_time_get(BLE_SCAN_RSP_MAX_EXT_LEN, phy_mode);
1347*042d53a7SEvalZero     }
1348*042d53a7SEvalZero 
1349*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1350*042d53a7SEvalZero     while (sch) {
1351*042d53a7SEvalZero         /* Let's check if there is no scheduled item which want to start within
1352*042d53a7SEvalZero          * given usecs.*/
1353*042d53a7SEvalZero         if ((int32_t)(sch->start_time - now + os_cputime_usecs_to_ticks(usec_dur)) > 0) {
1354*042d53a7SEvalZero             /* We are fine. Have time for scan req */
1355*042d53a7SEvalZero             return 0;
1356*042d53a7SEvalZero         }
1357*042d53a7SEvalZero 
1358*042d53a7SEvalZero         /* There is something in the scheduler. If it is not aux ptr we assume
1359*042d53a7SEvalZero          * it is more important that scan req
1360*042d53a7SEvalZero          */
1361*042d53a7SEvalZero         if (sch->sched_type != BLE_LL_SCHED_TYPE_AUX_SCAN) {
1362*042d53a7SEvalZero             return 1;
1363*042d53a7SEvalZero         }
1364*042d53a7SEvalZero 
1365*042d53a7SEvalZero         ble_ll_scan_end_adv_evt((struct ble_ll_aux_data *)sch->cb_arg);
1366*042d53a7SEvalZero         TAILQ_REMOVE(&g_ble_ll_sched_q, sch, link);
1367*042d53a7SEvalZero         sch->enqueued = 0;
1368*042d53a7SEvalZero         sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1369*042d53a7SEvalZero     }
1370*042d53a7SEvalZero     return 0;
1371*042d53a7SEvalZero }
1372*042d53a7SEvalZero 
1373*042d53a7SEvalZero /**
1374*042d53a7SEvalZero  * Called to schedule a aux scan.
1375*042d53a7SEvalZero  *
1376*042d53a7SEvalZero  * Context: Interrupt
1377*042d53a7SEvalZero  *
1378*042d53a7SEvalZero  * @param ble_hdr
1379*042d53a7SEvalZero  * @param scansm
1380*042d53a7SEvalZero  * @param aux_scan
1381*042d53a7SEvalZero  *
1382*042d53a7SEvalZero  * @return 0 on success, 1 otherwise
1383*042d53a7SEvalZero  */
1384*042d53a7SEvalZero int
ble_ll_sched_aux_scan(struct ble_mbuf_hdr * ble_hdr,struct ble_ll_scan_sm * scansm,struct ble_ll_aux_data * aux_scan)1385*042d53a7SEvalZero ble_ll_sched_aux_scan(struct ble_mbuf_hdr *ble_hdr,
1386*042d53a7SEvalZero                       struct ble_ll_scan_sm *scansm,
1387*042d53a7SEvalZero                       struct ble_ll_aux_data *aux_scan)
1388*042d53a7SEvalZero {
1389*042d53a7SEvalZero     int rc;
1390*042d53a7SEvalZero     os_sr_t sr;
1391*042d53a7SEvalZero     uint32_t off_ticks;
1392*042d53a7SEvalZero     uint32_t off_rem_usecs;
1393*042d53a7SEvalZero     uint32_t start_time;
1394*042d53a7SEvalZero     uint32_t start_time_rem_usecs;
1395*042d53a7SEvalZero     uint32_t end_time;
1396*042d53a7SEvalZero     uint32_t dur;
1397*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
1398*042d53a7SEvalZero     struct ble_ll_sched_item *sch;
1399*042d53a7SEvalZero     int phy_mode;
1400*042d53a7SEvalZero 
1401*042d53a7SEvalZero     sch = &aux_scan->sch;
1402*042d53a7SEvalZero 
1403*042d53a7SEvalZero     off_ticks = os_cputime_usecs_to_ticks(aux_scan->offset);
1404*042d53a7SEvalZero     off_rem_usecs = aux_scan->offset - os_cputime_ticks_to_usecs(off_ticks);
1405*042d53a7SEvalZero 
1406*042d53a7SEvalZero     start_time = ble_hdr->beg_cputime + off_ticks;
1407*042d53a7SEvalZero     start_time_rem_usecs = ble_hdr->rem_usecs + off_rem_usecs;
1408*042d53a7SEvalZero     if (start_time_rem_usecs > 30) {
1409*042d53a7SEvalZero         start_time++;
1410*042d53a7SEvalZero         start_time_rem_usecs -= 30;
1411*042d53a7SEvalZero     }
1412*042d53a7SEvalZero     start_time -= g_ble_ll_sched_offset_ticks;
1413*042d53a7SEvalZero 
1414*042d53a7SEvalZero     /* Let's calculate time we reserve for aux packet. For now we assume to wait
1415*042d53a7SEvalZero      * for fixed number of bytes and handle possible interrupting it in
1416*042d53a7SEvalZero      * ble_ll_sched_execute_item(). This is because aux packet can be up to
1417*042d53a7SEvalZero      * 256bytes and we don't want to block sched that long
1418*042d53a7SEvalZero      */
1419*042d53a7SEvalZero     phy_mode = ble_ll_phy_to_phy_mode(aux_scan->aux_phy,
1420*042d53a7SEvalZero                                       BLE_HCI_LE_PHY_CODED_ANY);
1421*042d53a7SEvalZero     dur = ble_ll_pdu_tx_time_get(BLE_LL_SCHED_AUX_PTR_DFLT_BYTES_NUM, phy_mode);
1422*042d53a7SEvalZero     end_time = start_time + os_cputime_usecs_to_ticks(dur);
1423*042d53a7SEvalZero 
1424*042d53a7SEvalZero     sch->start_time = start_time;
1425*042d53a7SEvalZero     sch->remainder = start_time_rem_usecs;
1426*042d53a7SEvalZero     sch->end_time = end_time;
1427*042d53a7SEvalZero 
1428*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1429*042d53a7SEvalZero 
1430*042d53a7SEvalZero     if (!ble_ll_sched_insert_if_empty(sch)) {
1431*042d53a7SEvalZero         /* Nothing in schedule. Schedule as soon as possible
1432*042d53a7SEvalZero          * If we are here it means sch has been added to the scheduler */
1433*042d53a7SEvalZero         rc = 0;
1434*042d53a7SEvalZero         goto done;
1435*042d53a7SEvalZero     }
1436*042d53a7SEvalZero 
1437*042d53a7SEvalZero     /* Try to find slot for aux scan. */
1438*042d53a7SEvalZero     os_cputime_timer_stop(&g_ble_ll_sched_timer);
1439*042d53a7SEvalZero     TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
1440*042d53a7SEvalZero         /* We can insert if before entry in list */
1441*042d53a7SEvalZero         if ((int32_t)(sch->end_time - entry->start_time) <= 0) {
1442*042d53a7SEvalZero             rc = 0;
1443*042d53a7SEvalZero             TAILQ_INSERT_BEFORE(entry, sch, link);
1444*042d53a7SEvalZero             sch->enqueued = 1;
1445*042d53a7SEvalZero             break;
1446*042d53a7SEvalZero         }
1447*042d53a7SEvalZero 
1448*042d53a7SEvalZero         /* Check for overlapping events. For now drop if it overlaps with
1449*042d53a7SEvalZero          * anything. We can make it smarter later on
1450*042d53a7SEvalZero          */
1451*042d53a7SEvalZero         if (ble_ll_sched_is_overlap(sch, entry)) {
1452*042d53a7SEvalZero             OS_EXIT_CRITICAL(sr);
1453*042d53a7SEvalZero             return -1;
1454*042d53a7SEvalZero         }
1455*042d53a7SEvalZero     }
1456*042d53a7SEvalZero 
1457*042d53a7SEvalZero     if (!entry) {
1458*042d53a7SEvalZero         rc = 0;
1459*042d53a7SEvalZero         TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
1460*042d53a7SEvalZero         sch->enqueued = 1;
1461*042d53a7SEvalZero     }
1462*042d53a7SEvalZero 
1463*042d53a7SEvalZero done:
1464*042d53a7SEvalZero 
1465*042d53a7SEvalZero     /* Get head of list to restart timer */
1466*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1467*042d53a7SEvalZero 
1468*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1469*042d53a7SEvalZero 
1470*042d53a7SEvalZero     /* Restart timer */
1471*042d53a7SEvalZero     BLE_LL_ASSERT(sch != NULL);
1472*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1473*042d53a7SEvalZero 
1474*042d53a7SEvalZero     STATS_INC(ble_ll_stats, aux_scheduled);
1475*042d53a7SEvalZero     return rc;
1476*042d53a7SEvalZero }
1477*042d53a7SEvalZero #endif
1478*042d53a7SEvalZero 
1479*042d53a7SEvalZero #if MYNEWT_VAL(BLE_LL_DIRECT_TEST_MODE) == 1
ble_ll_sched_dtm(struct ble_ll_sched_item * sch)1480*042d53a7SEvalZero int ble_ll_sched_dtm(struct ble_ll_sched_item *sch)
1481*042d53a7SEvalZero {
1482*042d53a7SEvalZero     int rc;
1483*042d53a7SEvalZero     os_sr_t sr;
1484*042d53a7SEvalZero     struct ble_ll_sched_item *entry;
1485*042d53a7SEvalZero 
1486*042d53a7SEvalZero     OS_ENTER_CRITICAL(sr);
1487*042d53a7SEvalZero 
1488*042d53a7SEvalZero     if (!ble_ll_sched_insert_if_empty(sch)) {
1489*042d53a7SEvalZero         /* Nothing in schedule. Schedule as soon as possible
1490*042d53a7SEvalZero          * If we are here it means sch has been added to the scheduler */
1491*042d53a7SEvalZero         rc = 0;
1492*042d53a7SEvalZero         goto done;
1493*042d53a7SEvalZero     }
1494*042d53a7SEvalZero 
1495*042d53a7SEvalZero     /* Try to find slot for test. */
1496*042d53a7SEvalZero     os_cputime_timer_stop(&g_ble_ll_sched_timer);
1497*042d53a7SEvalZero     TAILQ_FOREACH(entry, &g_ble_ll_sched_q, link) {
1498*042d53a7SEvalZero         /* We can insert if before entry in list */
1499*042d53a7SEvalZero         if (sch->end_time <= entry->start_time) {
1500*042d53a7SEvalZero             rc = 0;
1501*042d53a7SEvalZero             TAILQ_INSERT_BEFORE(entry, sch, link);
1502*042d53a7SEvalZero             sch->enqueued = 1;
1503*042d53a7SEvalZero             break;
1504*042d53a7SEvalZero         }
1505*042d53a7SEvalZero 
1506*042d53a7SEvalZero         /* Check for overlapping events. For now drop if it overlaps with
1507*042d53a7SEvalZero          * anything. We can make it smarter later on
1508*042d53a7SEvalZero          */
1509*042d53a7SEvalZero         if (ble_ll_sched_is_overlap(sch, entry)) {
1510*042d53a7SEvalZero             OS_EXIT_CRITICAL(sr);
1511*042d53a7SEvalZero             return -1;
1512*042d53a7SEvalZero         }
1513*042d53a7SEvalZero     }
1514*042d53a7SEvalZero 
1515*042d53a7SEvalZero     if (!entry) {
1516*042d53a7SEvalZero         rc = 0;
1517*042d53a7SEvalZero         TAILQ_INSERT_TAIL(&g_ble_ll_sched_q, sch, link);
1518*042d53a7SEvalZero         sch->enqueued = 1;
1519*042d53a7SEvalZero     }
1520*042d53a7SEvalZero 
1521*042d53a7SEvalZero done:
1522*042d53a7SEvalZero 
1523*042d53a7SEvalZero     /* Get head of list to restart timer */
1524*042d53a7SEvalZero     sch = TAILQ_FIRST(&g_ble_ll_sched_q);
1525*042d53a7SEvalZero 
1526*042d53a7SEvalZero #ifdef BLE_XCVR_RFCLK
1527*042d53a7SEvalZero     ble_ll_xcvr_rfclk_timer_start(sch->start_time);
1528*042d53a7SEvalZero #endif
1529*042d53a7SEvalZero 
1530*042d53a7SEvalZero     OS_EXIT_CRITICAL(sr);
1531*042d53a7SEvalZero 
1532*042d53a7SEvalZero     /* Restart timer */
1533*042d53a7SEvalZero     BLE_LL_ASSERT(sch != NULL);
1534*042d53a7SEvalZero     os_cputime_timer_start(&g_ble_ll_sched_timer, sch->start_time);
1535*042d53a7SEvalZero 
1536*042d53a7SEvalZero     return rc;
1537*042d53a7SEvalZero }
1538*042d53a7SEvalZero #endif
1539*042d53a7SEvalZero /**
1540*042d53a7SEvalZero  * Stop the scheduler
1541*042d53a7SEvalZero  *
1542*042d53a7SEvalZero  * Context: Link Layer task
1543*042d53a7SEvalZero  */
1544*042d53a7SEvalZero void
ble_ll_sched_stop(void)1545*042d53a7SEvalZero ble_ll_sched_stop(void)
1546*042d53a7SEvalZero {
1547*042d53a7SEvalZero     os_cputime_timer_stop(&g_ble_ll_sched_timer);
1548*042d53a7SEvalZero }
1549*042d53a7SEvalZero 
1550*042d53a7SEvalZero /**
1551*042d53a7SEvalZero  * Initialize the scheduler. Should only be called once and should be called
1552*042d53a7SEvalZero  * before any of the scheduler API are called.
1553*042d53a7SEvalZero  *
1554*042d53a7SEvalZero  * @return int
1555*042d53a7SEvalZero  */
1556*042d53a7SEvalZero int
ble_ll_sched_init(void)1557*042d53a7SEvalZero ble_ll_sched_init(void)
1558*042d53a7SEvalZero {
1559*042d53a7SEvalZero     /*
1560*042d53a7SEvalZero      * Initialize max early to large negative number. This is used
1561*042d53a7SEvalZero      * to determine the worst-case "early" time the schedule was called. Dont
1562*042d53a7SEvalZero      * expect this to be less than -3 or -4.
1563*042d53a7SEvalZero      */
1564*042d53a7SEvalZero #if (BLE_LL_SCHED_DEBUG == 1)
1565*042d53a7SEvalZero     g_ble_ll_sched_max_early = -50000;
1566*042d53a7SEvalZero #endif
1567*042d53a7SEvalZero 
1568*042d53a7SEvalZero     /*
1569*042d53a7SEvalZero      * This is the offset from the start of the scheduled item until the actual
1570*042d53a7SEvalZero      * tx/rx should occur, in ticks. We also "round up" to the nearest tick.
1571*042d53a7SEvalZero      */
1572*042d53a7SEvalZero     g_ble_ll_sched_offset_ticks =
1573*042d53a7SEvalZero         (uint8_t) os_cputime_usecs_to_ticks(XCVR_TX_SCHED_DELAY_USECS + 30);
1574*042d53a7SEvalZero 
1575*042d53a7SEvalZero     /* Initialize cputimer for the scheduler */
1576*042d53a7SEvalZero     os_cputime_timer_init(&g_ble_ll_sched_timer, ble_ll_sched_run, NULL);
1577*042d53a7SEvalZero 
1578*042d53a7SEvalZero #if MYNEWT_VAL(BLE_LL_STRICT_CONN_SCHEDULING)
1579*042d53a7SEvalZero     memset(&g_ble_ll_sched_data, 0, sizeof(struct ble_ll_sched_obj));
1580*042d53a7SEvalZero     g_ble_ll_sched_data.sch_ticks_per_period =
1581*042d53a7SEvalZero         os_cputime_usecs_to_ticks(MYNEWT_VAL(BLE_LL_USECS_PER_PERIOD));
1582*042d53a7SEvalZero     g_ble_ll_sched_data.sch_ticks_per_epoch = BLE_LL_SCHED_PERIODS *
1583*042d53a7SEvalZero         g_ble_ll_sched_data.sch_ticks_per_period;
1584*042d53a7SEvalZero #endif
1585*042d53a7SEvalZero 
1586*042d53a7SEvalZero     return 0;
1587*042d53a7SEvalZero }
1588