1*161a5569SMatthias Ringwald /* 2*161a5569SMatthias Ringwald * Copyright (C) 2014 BlueKitchen GmbH 3*161a5569SMatthias Ringwald * 4*161a5569SMatthias Ringwald * Redistribution and use in source and binary forms, with or without 5*161a5569SMatthias Ringwald * modification, are permitted provided that the following conditions 6*161a5569SMatthias Ringwald * are met: 7*161a5569SMatthias Ringwald * 8*161a5569SMatthias Ringwald * 1. Redistributions of source code must retain the above copyright 9*161a5569SMatthias Ringwald * notice, this list of conditions and the following disclaimer. 10*161a5569SMatthias Ringwald * 2. Redistributions in binary form must reproduce the above copyright 11*161a5569SMatthias Ringwald * notice, this list of conditions and the following disclaimer in the 12*161a5569SMatthias Ringwald * documentation and/or other materials provided with the distribution. 13*161a5569SMatthias Ringwald * 3. Neither the name of the copyright holders nor the names of 14*161a5569SMatthias Ringwald * contributors may be used to endorse or promote products derived 15*161a5569SMatthias Ringwald * from this software without specific prior written permission. 16*161a5569SMatthias Ringwald * 4. Any redistribution, use, or modification is done solely for 17*161a5569SMatthias Ringwald * personal benefit and not for any commercial purpose or for 18*161a5569SMatthias Ringwald * monetary gain. 19*161a5569SMatthias Ringwald * 20*161a5569SMatthias Ringwald * THIS SOFTWARE IS PROVIDED BY BLUEKITCHEN GMBH AND CONTRIBUTORS 21*161a5569SMatthias Ringwald * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22*161a5569SMatthias Ringwald * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23*161a5569SMatthias Ringwald * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MATTHIAS 24*161a5569SMatthias Ringwald * RINGWALD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25*161a5569SMatthias Ringwald * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26*161a5569SMatthias Ringwald * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27*161a5569SMatthias Ringwald * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28*161a5569SMatthias Ringwald * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29*161a5569SMatthias Ringwald * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 30*161a5569SMatthias Ringwald * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31*161a5569SMatthias Ringwald * SUCH DAMAGE. 32*161a5569SMatthias Ringwald * 33*161a5569SMatthias Ringwald * Please inquire about commercial licensing options at 34*161a5569SMatthias Ringwald * [email protected] 35*161a5569SMatthias Ringwald * 36*161a5569SMatthias Ringwald */ 37*161a5569SMatthias Ringwald 38*161a5569SMatthias Ringwald #define __BTSTACK_FILE__ "hci_transport_em9304_spi.c" 39*161a5569SMatthias Ringwald 40*161a5569SMatthias Ringwald #include "btstack_config.h" 41*161a5569SMatthias Ringwald #include "btstack_em9304_spi.h" 42*161a5569SMatthias Ringwald 43*161a5569SMatthias Ringwald // EM9304 SPI Driver 44*161a5569SMatthias Ringwald static const btstack_em9304_spi_t * btstack_em9304_spi; 45*161a5569SMatthias Ringwald 46*161a5569SMatthias Ringwald ///////////////////////// 47*161a5569SMatthias Ringwald // em9304 engine 48*161a5569SMatthias Ringwald #include "btstack_ring_buffer.h" 49*161a5569SMatthias Ringwald #include "btstack_debug.h" 50*161a5569SMatthias Ringwald #include "btstack_util.h" 51*161a5569SMatthias Ringwald #include "hci.h" 52*161a5569SMatthias Ringwald #include "hci_transport.h" 53*161a5569SMatthias Ringwald 54*161a5569SMatthias Ringwald static void em9304_spi_engine_process(void); 55*161a5569SMatthias Ringwald 56*161a5569SMatthias Ringwald #define STS_SLAVE_READY 0xc0 57*161a5569SMatthias Ringwald 58*161a5569SMatthias Ringwald #define EM9304_SPI_HEADER_TX 0x42 59*161a5569SMatthias Ringwald #define EM9304_SPI_HEADER_RX 0x81 60*161a5569SMatthias Ringwald 61*161a5569SMatthias Ringwald #define SPI_EM9304_RX_BUFFER_SIZE 64 62*161a5569SMatthias Ringwald #define SPI_EM9304_TX_BUFFER_SIZE 64 63*161a5569SMatthias Ringwald #define SPI_EM9304_RING_BUFFER_SIZE 128 64*161a5569SMatthias Ringwald 65*161a5569SMatthias Ringwald // state 66*161a5569SMatthias Ringwald static volatile enum { 67*161a5569SMatthias Ringwald SPI_EM9304_IDLE, 68*161a5569SMatthias Ringwald SPI_EM9304_RX_W4_READ_COMMAND_SENT, 69*161a5569SMatthias Ringwald SPI_EM9304_RX_READ_COMMAND_SENT, 70*161a5569SMatthias Ringwald SPI_EM9304_RX_W4_STS2_RECEIVED, 71*161a5569SMatthias Ringwald SPI_EM9304_RX_STS2_RECEIVED, 72*161a5569SMatthias Ringwald SPI_EM9304_RX_W4_DATA_RECEIVED, 73*161a5569SMatthias Ringwald SPI_EM9304_RX_DATA_RECEIVED, 74*161a5569SMatthias Ringwald SPI_EM9304_TX_W4_RDY, 75*161a5569SMatthias Ringwald SPI_EM9304_TX_W4_WRITE_COMMAND_SENT, 76*161a5569SMatthias Ringwald SPI_EM9304_TX_WRITE_COMMAND_SENT, 77*161a5569SMatthias Ringwald SPI_EM9304_TX_W4_STS2_RECEIVED, 78*161a5569SMatthias Ringwald SPI_EM9304_TX_STS2_RECEIVED, 79*161a5569SMatthias Ringwald SPI_EM9304_TX_W4_DATA_SENT, 80*161a5569SMatthias Ringwald SPI_EM9304_TX_DATA_SENT, 81*161a5569SMatthias Ringwald } em9304_spi_engine_state; 82*161a5569SMatthias Ringwald 83*161a5569SMatthias Ringwald static uint16_t em9304_spi_engine_rx_request_len; 84*161a5569SMatthias Ringwald static uint16_t em9304_spi_engine_tx_request_len; 85*161a5569SMatthias Ringwald 86*161a5569SMatthias Ringwald static btstack_ring_buffer_t em9304_spi_engine_rx_ring_buffer; 87*161a5569SMatthias Ringwald static uint8_t em9304_spi_engine_rx_ring_buffer_storage[SPI_EM9304_RING_BUFFER_SIZE]; 88*161a5569SMatthias Ringwald 89*161a5569SMatthias Ringwald static const uint8_t * em9304_spi_engine_tx_data; 90*161a5569SMatthias Ringwald static uint16_t em9304_spi_engine_tx_size; 91*161a5569SMatthias Ringwald 92*161a5569SMatthias Ringwald static uint8_t * em9304_spi_engine_rx_buffer; 93*161a5569SMatthias Ringwald static uint16_t em9304_spi_engine_rx_len; 94*161a5569SMatthias Ringwald 95*161a5569SMatthias Ringwald // handlers 96*161a5569SMatthias Ringwald static void (*em9304_spi_engine_rx_done_handler)(void); 97*161a5569SMatthias Ringwald static void (*em9304_spi_engine_tx_done_handler)(void); 98*161a5569SMatthias Ringwald 99*161a5569SMatthias Ringwald // TODO: get rid of alignment requirement 100*161a5569SMatthias Ringwald union { 101*161a5569SMatthias Ringwald uint32_t words[1]; 102*161a5569SMatthias Ringwald uint8_t bytes[1]; 103*161a5569SMatthias Ringwald } sCommand; 104*161a5569SMatthias Ringwald 105*161a5569SMatthias Ringwald union { 106*161a5569SMatthias Ringwald uint32_t words[1]; 107*161a5569SMatthias Ringwald uint8_t bytes[1]; 108*161a5569SMatthias Ringwald } sStas; 109*161a5569SMatthias Ringwald 110*161a5569SMatthias Ringwald union { 111*161a5569SMatthias Ringwald uint32_t words[SPI_EM9304_RX_BUFFER_SIZE/4]; 112*161a5569SMatthias Ringwald uint8_t bytes[SPI_EM9304_RX_BUFFER_SIZE]; 113*161a5569SMatthias Ringwald } em9304_spi_engine_spi_rx_buffer; 114*161a5569SMatthias Ringwald 115*161a5569SMatthias Ringwald static void em9304_spi_engine_ready_callback(void){ 116*161a5569SMatthias Ringwald em9304_spi_engine_process(); 117*161a5569SMatthias Ringwald } 118*161a5569SMatthias Ringwald 119*161a5569SMatthias Ringwald static void em9304_spi_engine_transfer_done(void){ 120*161a5569SMatthias Ringwald switch (em9304_spi_engine_state){ 121*161a5569SMatthias Ringwald case SPI_EM9304_RX_W4_READ_COMMAND_SENT: 122*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_RX_READ_COMMAND_SENT; 123*161a5569SMatthias Ringwald break; 124*161a5569SMatthias Ringwald case SPI_EM9304_RX_W4_STS2_RECEIVED: 125*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_RX_STS2_RECEIVED; 126*161a5569SMatthias Ringwald break; 127*161a5569SMatthias Ringwald case SPI_EM9304_RX_W4_DATA_RECEIVED: 128*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_RX_DATA_RECEIVED; 129*161a5569SMatthias Ringwald break; 130*161a5569SMatthias Ringwald case SPI_EM9304_TX_W4_WRITE_COMMAND_SENT: 131*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_WRITE_COMMAND_SENT; 132*161a5569SMatthias Ringwald break; 133*161a5569SMatthias Ringwald case SPI_EM9304_TX_W4_STS2_RECEIVED: 134*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_STS2_RECEIVED; 135*161a5569SMatthias Ringwald break; 136*161a5569SMatthias Ringwald case SPI_EM9304_TX_W4_DATA_SENT: 137*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_DATA_SENT; 138*161a5569SMatthias Ringwald break; 139*161a5569SMatthias Ringwald default: 140*161a5569SMatthias Ringwald return; 141*161a5569SMatthias Ringwald } 142*161a5569SMatthias Ringwald em9304_spi_engine_process(); 143*161a5569SMatthias Ringwald } 144*161a5569SMatthias Ringwald 145*161a5569SMatthias Ringwald static void em9304_spi_engine_transfer_rx_data(void){ 146*161a5569SMatthias Ringwald while (1){ 147*161a5569SMatthias Ringwald int bytes_available = btstack_ring_buffer_bytes_available(&em9304_spi_engine_rx_ring_buffer); 148*161a5569SMatthias Ringwald log_debug("transfer_rx_data: ring buffer has %u -> hci buffer needs %u", bytes_available, em9304_spi_engine_rx_len); 149*161a5569SMatthias Ringwald 150*161a5569SMatthias Ringwald if (!bytes_available) break; 151*161a5569SMatthias Ringwald if (!em9304_spi_engine_rx_len) break; 152*161a5569SMatthias Ringwald 153*161a5569SMatthias Ringwald int bytes_to_copy = btstack_min(bytes_available, em9304_spi_engine_rx_len); 154*161a5569SMatthias Ringwald uint32_t bytes_read; 155*161a5569SMatthias Ringwald btstack_ring_buffer_read(&em9304_spi_engine_rx_ring_buffer, em9304_spi_engine_rx_buffer, bytes_to_copy, &bytes_read); 156*161a5569SMatthias Ringwald em9304_spi_engine_rx_buffer += bytes_read; 157*161a5569SMatthias Ringwald em9304_spi_engine_rx_len -= bytes_read; 158*161a5569SMatthias Ringwald 159*161a5569SMatthias Ringwald if (em9304_spi_engine_rx_len == 0){ 160*161a5569SMatthias Ringwald (*em9304_spi_engine_rx_done_handler)(); 161*161a5569SMatthias Ringwald break; 162*161a5569SMatthias Ringwald } 163*161a5569SMatthias Ringwald } 164*161a5569SMatthias Ringwald } 165*161a5569SMatthias Ringwald 166*161a5569SMatthias Ringwald static void em9304_spi_engine_start_tx_transaction(void){ 167*161a5569SMatthias Ringwald // state = wait for RDY 168*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_W4_RDY; 169*161a5569SMatthias Ringwald 170*161a5569SMatthias Ringwald // chip select 171*161a5569SMatthias Ringwald btstack_em9304_spi->set_chip_select(1); 172*161a5569SMatthias Ringwald 173*161a5569SMatthias Ringwald // enable IRQ 174*161a5569SMatthias Ringwald btstack_em9304_spi->set_ready_callback(&em9304_spi_engine_ready_callback); 175*161a5569SMatthias Ringwald } 176*161a5569SMatthias Ringwald 177*161a5569SMatthias Ringwald static void em9304_spi_engine_process(void){ 178*161a5569SMatthias Ringwald uint16_t max_bytes_to_send; 179*161a5569SMatthias Ringwald 180*161a5569SMatthias Ringwald switch (em9304_spi_engine_state){ 181*161a5569SMatthias Ringwald case SPI_EM9304_IDLE: 182*161a5569SMatthias Ringwald if (btstack_em9304_spi->get_ready()){ 183*161a5569SMatthias Ringwald // RDY -> data available 184*161a5569SMatthias Ringwald if (btstack_ring_buffer_bytes_free(&em9304_spi_engine_rx_ring_buffer) >= SPI_EM9304_RX_BUFFER_SIZE) { 185*161a5569SMatthias Ringwald 186*161a5569SMatthias Ringwald // disable interrupt again 187*161a5569SMatthias Ringwald btstack_em9304_spi->set_ready_callback(NULL); 188*161a5569SMatthias Ringwald // enable chip select 189*161a5569SMatthias Ringwald btstack_em9304_spi->set_chip_select(1); 190*161a5569SMatthias Ringwald 191*161a5569SMatthias Ringwald // send read command 192*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_RX_W4_READ_COMMAND_SENT; 193*161a5569SMatthias Ringwald sCommand.bytes[0] = EM9304_SPI_HEADER_RX; 194*161a5569SMatthias Ringwald btstack_em9304_spi->transmit(sCommand.bytes, 1); 195*161a5569SMatthias Ringwald } 196*161a5569SMatthias Ringwald } else if (em9304_spi_engine_tx_size){ 197*161a5569SMatthias Ringwald em9304_spi_engine_start_tx_transaction(); 198*161a5569SMatthias Ringwald } else if (em9304_spi_engine_rx_len){ 199*161a5569SMatthias Ringwald // no data ready, no data to send, but read request -> enable IRQ 200*161a5569SMatthias Ringwald btstack_em9304_spi->set_ready_callback(&em9304_spi_engine_ready_callback); 201*161a5569SMatthias Ringwald } 202*161a5569SMatthias Ringwald break; 203*161a5569SMatthias Ringwald 204*161a5569SMatthias Ringwald case SPI_EM9304_RX_READ_COMMAND_SENT: 205*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_RX_W4_STS2_RECEIVED; 206*161a5569SMatthias Ringwald btstack_em9304_spi->receive(sStas.bytes, 1); 207*161a5569SMatthias Ringwald break; 208*161a5569SMatthias Ringwald 209*161a5569SMatthias Ringwald case SPI_EM9304_RX_STS2_RECEIVED: 210*161a5569SMatthias Ringwald // check slave status 211*161a5569SMatthias Ringwald log_debug("RX: STS2 0x%02X", sStas.bytes[0]); 212*161a5569SMatthias Ringwald 213*161a5569SMatthias Ringwald // read data and send '0's 214*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_RX_W4_DATA_RECEIVED; 215*161a5569SMatthias Ringwald em9304_spi_engine_rx_request_len = sStas.bytes[0]; 216*161a5569SMatthias Ringwald btstack_em9304_spi->receive(em9304_spi_engine_spi_rx_buffer.bytes, em9304_spi_engine_rx_request_len); 217*161a5569SMatthias Ringwald break; 218*161a5569SMatthias Ringwald 219*161a5569SMatthias Ringwald case SPI_EM9304_RX_DATA_RECEIVED: 220*161a5569SMatthias Ringwald 221*161a5569SMatthias Ringwald // chip deselect & done 222*161a5569SMatthias Ringwald btstack_em9304_spi->set_chip_select(0); 223*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_IDLE; 224*161a5569SMatthias Ringwald 225*161a5569SMatthias Ringwald // move data into ring buffer 226*161a5569SMatthias Ringwald btstack_ring_buffer_write(&em9304_spi_engine_rx_ring_buffer, em9304_spi_engine_spi_rx_buffer.bytes, em9304_spi_engine_rx_request_len); 227*161a5569SMatthias Ringwald em9304_spi_engine_rx_request_len = 0; 228*161a5569SMatthias Ringwald 229*161a5569SMatthias Ringwald // deliver new data 230*161a5569SMatthias Ringwald em9304_spi_engine_transfer_rx_data(); 231*161a5569SMatthias Ringwald break; 232*161a5569SMatthias Ringwald 233*161a5569SMatthias Ringwald case SPI_EM9304_TX_W4_RDY: 234*161a5569SMatthias Ringwald // check if ready 235*161a5569SMatthias Ringwald if (!btstack_em9304_spi->get_ready()) break; 236*161a5569SMatthias Ringwald 237*161a5569SMatthias Ringwald // disable interrupt again 238*161a5569SMatthias Ringwald btstack_em9304_spi->set_ready_callback(NULL); 239*161a5569SMatthias Ringwald 240*161a5569SMatthias Ringwald // send write command 241*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_W4_WRITE_COMMAND_SENT; 242*161a5569SMatthias Ringwald sCommand.bytes[0] = EM9304_SPI_HEADER_TX; 243*161a5569SMatthias Ringwald btstack_em9304_spi->transmit(sCommand.bytes, 1); 244*161a5569SMatthias Ringwald break; 245*161a5569SMatthias Ringwald 246*161a5569SMatthias Ringwald case SPI_EM9304_TX_WRITE_COMMAND_SENT: 247*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_W4_STS2_RECEIVED; 248*161a5569SMatthias Ringwald btstack_em9304_spi->receive(sStas.bytes, 1); 249*161a5569SMatthias Ringwald break; 250*161a5569SMatthias Ringwald 251*161a5569SMatthias Ringwald case SPI_EM9304_TX_STS2_RECEIVED: 252*161a5569SMatthias Ringwald // check slave status and em9304 rx buffer space 253*161a5569SMatthias Ringwald log_debug("TX: STS2 0x%02X", sStas.bytes[0]); 254*161a5569SMatthias Ringwald max_bytes_to_send = sStas.bytes[0]; 255*161a5569SMatthias Ringwald if (max_bytes_to_send == 0){ 256*161a5569SMatthias Ringwald // chip deselect & retry 257*161a5569SMatthias Ringwald btstack_em9304_spi->set_chip_select(0); 258*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_IDLE; 259*161a5569SMatthias Ringwald break; 260*161a5569SMatthias Ringwald } 261*161a5569SMatthias Ringwald 262*161a5569SMatthias Ringwald // number bytes to send 263*161a5569SMatthias Ringwald em9304_spi_engine_tx_request_len = btstack_min(em9304_spi_engine_tx_size, max_bytes_to_send); 264*161a5569SMatthias Ringwald 265*161a5569SMatthias Ringwald // send command 266*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_TX_W4_DATA_SENT; 267*161a5569SMatthias Ringwald btstack_em9304_spi->transmit( (uint8_t*) em9304_spi_engine_tx_data, em9304_spi_engine_tx_request_len); 268*161a5569SMatthias Ringwald break; 269*161a5569SMatthias Ringwald 270*161a5569SMatthias Ringwald case SPI_EM9304_TX_DATA_SENT: 271*161a5569SMatthias Ringwald 272*161a5569SMatthias Ringwald // chip deselect & done 273*161a5569SMatthias Ringwald btstack_em9304_spi->set_chip_select(0); 274*161a5569SMatthias Ringwald em9304_spi_engine_state = SPI_EM9304_IDLE; 275*161a5569SMatthias Ringwald 276*161a5569SMatthias Ringwald // chunk processed 277*161a5569SMatthias Ringwald em9304_spi_engine_tx_size -= em9304_spi_engine_tx_request_len; 278*161a5569SMatthias Ringwald em9304_spi_engine_tx_data += em9304_spi_engine_tx_request_len; 279*161a5569SMatthias Ringwald em9304_spi_engine_tx_request_len = 0; 280*161a5569SMatthias Ringwald 281*161a5569SMatthias Ringwald // handle TX Complete 282*161a5569SMatthias Ringwald if (em9304_spi_engine_tx_size){ 283*161a5569SMatthias Ringwald // more data to send 284*161a5569SMatthias Ringwald em9304_spi_engine_start_tx_transaction(); 285*161a5569SMatthias Ringwald } else { 286*161a5569SMatthias Ringwald // notify higher layer 287*161a5569SMatthias Ringwald (*em9304_spi_engine_tx_done_handler)(); 288*161a5569SMatthias Ringwald 289*161a5569SMatthias Ringwald // re-enable irq if read pending 290*161a5569SMatthias Ringwald if (em9304_spi_engine_rx_len){ 291*161a5569SMatthias Ringwald // no data ready, no data to send, but read request -> enable IRQ 292*161a5569SMatthias Ringwald btstack_em9304_spi->set_ready_callback(&em9304_spi_engine_ready_callback); 293*161a5569SMatthias Ringwald } 294*161a5569SMatthias Ringwald } 295*161a5569SMatthias Ringwald break; 296*161a5569SMatthias Ringwald 297*161a5569SMatthias Ringwald default: 298*161a5569SMatthias Ringwald break; 299*161a5569SMatthias Ringwald } 300*161a5569SMatthias Ringwald } 301*161a5569SMatthias Ringwald 302*161a5569SMatthias Ringwald static void em9304_spi_engine_init(void){ 303*161a5569SMatthias Ringwald btstack_em9304_spi->open(); 304*161a5569SMatthias Ringwald btstack_em9304_spi->set_transfer_done_callback(&em9304_spi_engine_transfer_done); 305*161a5569SMatthias Ringwald btstack_ring_buffer_init(&em9304_spi_engine_rx_ring_buffer, &em9304_spi_engine_rx_ring_buffer_storage[0], SPI_EM9304_RING_BUFFER_SIZE); 306*161a5569SMatthias Ringwald } 307*161a5569SMatthias Ringwald 308*161a5569SMatthias Ringwald static void em9304_spi_engine_close(void){ 309*161a5569SMatthias Ringwald btstack_em9304_spi->close(); 310*161a5569SMatthias Ringwald } 311*161a5569SMatthias Ringwald 312*161a5569SMatthias Ringwald static void em9304_spi_engine_set_block_received( void (*the_block_handler)(void)){ 313*161a5569SMatthias Ringwald em9304_spi_engine_rx_done_handler = the_block_handler; 314*161a5569SMatthias Ringwald } 315*161a5569SMatthias Ringwald 316*161a5569SMatthias Ringwald static void em9304_spi_engine_set_block_sent( void (*the_block_handler)(void)){ 317*161a5569SMatthias Ringwald em9304_spi_engine_tx_done_handler = the_block_handler; 318*161a5569SMatthias Ringwald } 319*161a5569SMatthias Ringwald 320*161a5569SMatthias Ringwald static void em9304_spi_engine_send_block(const uint8_t *buffer, uint16_t length){ 321*161a5569SMatthias Ringwald em9304_spi_engine_tx_data = buffer; 322*161a5569SMatthias Ringwald em9304_spi_engine_tx_size = length; 323*161a5569SMatthias Ringwald em9304_spi_engine_process(); 324*161a5569SMatthias Ringwald } 325*161a5569SMatthias Ringwald 326*161a5569SMatthias Ringwald static void em9304_spi_engine_receive_block(uint8_t *buffer, uint16_t length){ 327*161a5569SMatthias Ringwald log_debug("em9304_spi_engine_receive_block: len %u, ring buffer has %u, UART_RX_LEN %u", length, btstack_ring_buffer_bytes_available(&em9304_spi_engine_rx_ring_buffer), em9304_spi_engine_rx_len); 328*161a5569SMatthias Ringwald em9304_spi_engine_rx_buffer = buffer; 329*161a5569SMatthias Ringwald em9304_spi_engine_rx_len = length; 330*161a5569SMatthias Ringwald em9304_spi_engine_transfer_rx_data(); 331*161a5569SMatthias Ringwald em9304_spi_engine_process(); 332*161a5569SMatthias Ringwald } 333*161a5569SMatthias Ringwald 334*161a5569SMatthias Ringwald ////////////////////////////////////////////////////////////////////////////// 335*161a5569SMatthias Ringwald 336*161a5569SMatthias Ringwald // assert pre-buffer for packet type is available 337*161a5569SMatthias Ringwald #if !defined(HCI_OUTGOING_PRE_BUFFER_SIZE) || (HCI_OUTGOING_PRE_BUFFER_SIZE == 0) 338*161a5569SMatthias Ringwald #error HCI_OUTGOING_PRE_BUFFER_SIZE not defined. Please update hci.h 339*161a5569SMatthias Ringwald #endif 340*161a5569SMatthias Ringwald 341*161a5569SMatthias Ringwald static void dummy_handler(uint8_t packet_type, uint8_t *packet, uint16_t size); 342*161a5569SMatthias Ringwald 343*161a5569SMatthias Ringwald typedef enum { 344*161a5569SMatthias Ringwald H4_W4_PACKET_TYPE, 345*161a5569SMatthias Ringwald H4_W4_EVENT_HEADER, 346*161a5569SMatthias Ringwald H4_W4_ACL_HEADER, 347*161a5569SMatthias Ringwald H4_W4_PAYLOAD, 348*161a5569SMatthias Ringwald } H4_STATE; 349*161a5569SMatthias Ringwald 350*161a5569SMatthias Ringwald typedef enum { 351*161a5569SMatthias Ringwald TX_IDLE = 1, 352*161a5569SMatthias Ringwald TX_W4_PACKET_SENT, 353*161a5569SMatthias Ringwald } TX_STATE; 354*161a5569SMatthias Ringwald 355*161a5569SMatthias Ringwald // write state 356*161a5569SMatthias Ringwald static TX_STATE tx_state; 357*161a5569SMatthias Ringwald 358*161a5569SMatthias Ringwald static uint8_t packet_sent_event[] = { HCI_EVENT_TRANSPORT_PACKET_SENT, 0}; 359*161a5569SMatthias Ringwald 360*161a5569SMatthias Ringwald static void (*packet_handler)(uint8_t packet_type, uint8_t *packet, uint16_t size) = dummy_handler; 361*161a5569SMatthias Ringwald 362*161a5569SMatthias Ringwald // packet reader state machine 363*161a5569SMatthias Ringwald static H4_STATE h4_state; 364*161a5569SMatthias Ringwald static int bytes_to_read; 365*161a5569SMatthias Ringwald static int read_pos; 366*161a5569SMatthias Ringwald 367*161a5569SMatthias Ringwald // incoming packet buffer 368*161a5569SMatthias Ringwald static uint8_t hci_packet_with_pre_buffer[HCI_INCOMING_PRE_BUFFER_SIZE + 1 + HCI_PACKET_BUFFER_SIZE]; // packet type + max(acl header + acl payload, event header + event data) 369*161a5569SMatthias Ringwald static uint8_t * hci_packet = &hci_packet_with_pre_buffer[HCI_INCOMING_PRE_BUFFER_SIZE]; 370*161a5569SMatthias Ringwald 371*161a5569SMatthias Ringwald static void hci_transport_em9304_spi_reset_statemachine(void){ 372*161a5569SMatthias Ringwald h4_state = H4_W4_PACKET_TYPE; 373*161a5569SMatthias Ringwald read_pos = 0; 374*161a5569SMatthias Ringwald bytes_to_read = 1; 375*161a5569SMatthias Ringwald } 376*161a5569SMatthias Ringwald 377*161a5569SMatthias Ringwald static void hci_transport_em9304_spi_trigger_next_read(void){ 378*161a5569SMatthias Ringwald // log_info("hci_transport_em9304_spi_trigger_next_read: %u bytes", bytes_to_read); 379*161a5569SMatthias Ringwald em9304_spi_engine_receive_block(&hci_packet[read_pos], bytes_to_read); 380*161a5569SMatthias Ringwald } 381*161a5569SMatthias Ringwald 382*161a5569SMatthias Ringwald static void hci_transport_em9304_spi_block_read(void){ 383*161a5569SMatthias Ringwald 384*161a5569SMatthias Ringwald read_pos += bytes_to_read; 385*161a5569SMatthias Ringwald 386*161a5569SMatthias Ringwald switch (h4_state) { 387*161a5569SMatthias Ringwald case H4_W4_PACKET_TYPE: 388*161a5569SMatthias Ringwald switch (hci_packet[0]){ 389*161a5569SMatthias Ringwald case HCI_EVENT_PACKET: 390*161a5569SMatthias Ringwald bytes_to_read = HCI_EVENT_HEADER_SIZE; 391*161a5569SMatthias Ringwald h4_state = H4_W4_EVENT_HEADER; 392*161a5569SMatthias Ringwald break; 393*161a5569SMatthias Ringwald case HCI_ACL_DATA_PACKET: 394*161a5569SMatthias Ringwald bytes_to_read = HCI_ACL_HEADER_SIZE; 395*161a5569SMatthias Ringwald h4_state = H4_W4_ACL_HEADER; 396*161a5569SMatthias Ringwald break; 397*161a5569SMatthias Ringwald default: 398*161a5569SMatthias Ringwald log_error("hci_transport_h4: invalid packet type 0x%02x", hci_packet[0]); 399*161a5569SMatthias Ringwald hci_transport_em9304_spi_reset_statemachine(); 400*161a5569SMatthias Ringwald break; 401*161a5569SMatthias Ringwald } 402*161a5569SMatthias Ringwald break; 403*161a5569SMatthias Ringwald 404*161a5569SMatthias Ringwald case H4_W4_EVENT_HEADER: 405*161a5569SMatthias Ringwald bytes_to_read = hci_packet[2]; 406*161a5569SMatthias Ringwald h4_state = H4_W4_PAYLOAD; 407*161a5569SMatthias Ringwald break; 408*161a5569SMatthias Ringwald 409*161a5569SMatthias Ringwald case H4_W4_ACL_HEADER: 410*161a5569SMatthias Ringwald bytes_to_read = little_endian_read_16( hci_packet, 3); 411*161a5569SMatthias Ringwald // check ACL length 412*161a5569SMatthias Ringwald if (HCI_ACL_HEADER_SIZE + bytes_to_read > HCI_PACKET_BUFFER_SIZE){ 413*161a5569SMatthias Ringwald log_error("hci_transport_h4: invalid ACL payload len %d - only space for %u", bytes_to_read, HCI_PACKET_BUFFER_SIZE - HCI_ACL_HEADER_SIZE); 414*161a5569SMatthias Ringwald hci_transport_em9304_spi_reset_statemachine(); 415*161a5569SMatthias Ringwald break; 416*161a5569SMatthias Ringwald } 417*161a5569SMatthias Ringwald h4_state = H4_W4_PAYLOAD; 418*161a5569SMatthias Ringwald break; 419*161a5569SMatthias Ringwald 420*161a5569SMatthias Ringwald case H4_W4_PAYLOAD: 421*161a5569SMatthias Ringwald packet_handler(hci_packet[0], &hci_packet[1], read_pos-1); 422*161a5569SMatthias Ringwald hci_transport_em9304_spi_reset_statemachine(); 423*161a5569SMatthias Ringwald break; 424*161a5569SMatthias Ringwald default: 425*161a5569SMatthias Ringwald break; 426*161a5569SMatthias Ringwald } 427*161a5569SMatthias Ringwald 428*161a5569SMatthias Ringwald hci_transport_em9304_spi_trigger_next_read(); 429*161a5569SMatthias Ringwald } 430*161a5569SMatthias Ringwald 431*161a5569SMatthias Ringwald static void hci_transport_em9304_spi_block_sent(void){ 432*161a5569SMatthias Ringwald switch (tx_state){ 433*161a5569SMatthias Ringwald case TX_W4_PACKET_SENT: 434*161a5569SMatthias Ringwald // packet fully sent, reset state 435*161a5569SMatthias Ringwald tx_state = TX_IDLE; 436*161a5569SMatthias Ringwald // notify upper stack that it can send again 437*161a5569SMatthias Ringwald packet_handler(HCI_EVENT_PACKET, &packet_sent_event[0], sizeof(packet_sent_event)); 438*161a5569SMatthias Ringwald break; 439*161a5569SMatthias Ringwald default: 440*161a5569SMatthias Ringwald break; 441*161a5569SMatthias Ringwald } 442*161a5569SMatthias Ringwald } 443*161a5569SMatthias Ringwald 444*161a5569SMatthias Ringwald static int hci_transport_em9304_spi_can_send_now(uint8_t packet_type){ 445*161a5569SMatthias Ringwald return tx_state == TX_IDLE; 446*161a5569SMatthias Ringwald } 447*161a5569SMatthias Ringwald 448*161a5569SMatthias Ringwald static int hci_transport_em9304_spi_send_packet(uint8_t packet_type, uint8_t * packet, int size){ 449*161a5569SMatthias Ringwald 450*161a5569SMatthias Ringwald // store packet type before actual data and increase size 451*161a5569SMatthias Ringwald size++; 452*161a5569SMatthias Ringwald packet--; 453*161a5569SMatthias Ringwald *packet = packet_type; 454*161a5569SMatthias Ringwald 455*161a5569SMatthias Ringwald // start sending 456*161a5569SMatthias Ringwald tx_state = TX_W4_PACKET_SENT; 457*161a5569SMatthias Ringwald em9304_spi_engine_send_block(packet, size); 458*161a5569SMatthias Ringwald return 0; 459*161a5569SMatthias Ringwald } 460*161a5569SMatthias Ringwald 461*161a5569SMatthias Ringwald static void hci_transport_em9304_spi_init(const void * transport_config){ 462*161a5569SMatthias Ringwald } 463*161a5569SMatthias Ringwald 464*161a5569SMatthias Ringwald static int hci_transport_em9304_spi_open(void){ 465*161a5569SMatthias Ringwald 466*161a5569SMatthias Ringwald // setup UART driver 467*161a5569SMatthias Ringwald em9304_spi_engine_init(); 468*161a5569SMatthias Ringwald em9304_spi_engine_set_block_received(&hci_transport_em9304_spi_block_read); 469*161a5569SMatthias Ringwald em9304_spi_engine_set_block_sent(&hci_transport_em9304_spi_block_sent); 470*161a5569SMatthias Ringwald // setup H4 RX 471*161a5569SMatthias Ringwald hci_transport_em9304_spi_reset_statemachine(); 472*161a5569SMatthias Ringwald hci_transport_em9304_spi_trigger_next_read(); 473*161a5569SMatthias Ringwald // setup H4 TX 474*161a5569SMatthias Ringwald tx_state = TX_IDLE; 475*161a5569SMatthias Ringwald return 0; 476*161a5569SMatthias Ringwald } 477*161a5569SMatthias Ringwald 478*161a5569SMatthias Ringwald static int hci_transport_em9304_spi_close(void){ 479*161a5569SMatthias Ringwald em9304_spi_engine_close(); 480*161a5569SMatthias Ringwald return 0; 481*161a5569SMatthias Ringwald } 482*161a5569SMatthias Ringwald 483*161a5569SMatthias Ringwald static void hci_transport_em9304_spi_register_packet_handler(void (*handler)(uint8_t packet_type, uint8_t *packet, uint16_t size)){ 484*161a5569SMatthias Ringwald packet_handler = handler; 485*161a5569SMatthias Ringwald } 486*161a5569SMatthias Ringwald 487*161a5569SMatthias Ringwald static void dummy_handler(uint8_t packet_type, uint8_t *packet, uint16_t size){ 488*161a5569SMatthias Ringwald } 489*161a5569SMatthias Ringwald 490*161a5569SMatthias Ringwald // --- end of eHCILL implementation --------- 491*161a5569SMatthias Ringwald 492*161a5569SMatthias Ringwald static const hci_transport_t hci_transport_em9304_spi = { 493*161a5569SMatthias Ringwald /* const char * name; */ "H4", 494*161a5569SMatthias Ringwald /* void (*init) (const void *transport_config); */ &hci_transport_em9304_spi_init, 495*161a5569SMatthias Ringwald /* int (*open)(void); */ &hci_transport_em9304_spi_open, 496*161a5569SMatthias Ringwald /* int (*close)(void); */ &hci_transport_em9304_spi_close, 497*161a5569SMatthias Ringwald /* void (*register_packet_handler)(void (*handler)(...); */ &hci_transport_em9304_spi_register_packet_handler, 498*161a5569SMatthias Ringwald /* int (*can_send_packet_now)(uint8_t packet_type); */ &hci_transport_em9304_spi_can_send_now, 499*161a5569SMatthias Ringwald /* int (*send_packet)(...); */ &hci_transport_em9304_spi_send_packet, 500*161a5569SMatthias Ringwald /* int (*set_baudrate)(uint32_t baudrate); */ NULL, 501*161a5569SMatthias Ringwald /* void (*reset_link)(void); */ NULL, 502*161a5569SMatthias Ringwald /* void (*set_sco_config)(uint16_t voice_setting, int num_connections); */ NULL, 503*161a5569SMatthias Ringwald }; 504*161a5569SMatthias Ringwald 505*161a5569SMatthias Ringwald // configure and return h4 singleton 506*161a5569SMatthias Ringwald const hci_transport_t * hci_transport_em9304_spi_instance(const btstack_em9304_spi_t * em9304_spi_driver) { 507*161a5569SMatthias Ringwald btstack_em9304_spi = em9304_spi_driver; 508*161a5569SMatthias Ringwald return &hci_transport_em9304_spi; 509*161a5569SMatthias Ringwald } 510