1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2011-07-25 weety first version
9 */
10
11 #include <rtthread.h>
12 #include <drivers/mmcsd_core.h>
13 #include <drivers/sd.h>
14 #include <drivers/mmc.h>
15 #include <drivers/sdio.h>
16
17 #define DBG_ENABLE
18 #define DBG_SECTION_NAME "SDIO"
19 #ifdef RT_SDIO_DEBUG
20 #define DBG_LEVEL DBG_LOG
21 #else
22 #define DBG_LEVEL DBG_INFO
23 #endif /* RT_SDIO_DEBUG */
24 #define DBG_COLOR
25 #include <rtdbg.h>
26
27 #ifndef RT_MMCSD_STACK_SIZE
28 #define RT_MMCSD_STACK_SIZE 1024
29 #endif
30 #ifndef RT_MMCSD_THREAD_PREORITY
31 #if (RT_THREAD_PRIORITY_MAX == 32)
32 #define RT_MMCSD_THREAD_PREORITY 0x16
33 #else
34 #define RT_MMCSD_THREAD_PREORITY 0x40
35 #endif
36 #endif
37
38 //static struct rt_semaphore mmcsd_sem;
39 static struct rt_thread mmcsd_detect_thread;
40 static rt_uint8_t mmcsd_stack[RT_MMCSD_STACK_SIZE];
41 static struct rt_mailbox mmcsd_detect_mb;
42 static rt_uint32_t mmcsd_detect_mb_pool[4];
43 static struct rt_mailbox mmcsd_hotpluge_mb;
44 static rt_uint32_t mmcsd_hotpluge_mb_pool[4];
45
mmcsd_host_lock(struct rt_mmcsd_host * host)46 void mmcsd_host_lock(struct rt_mmcsd_host *host)
47 {
48 rt_mutex_take(&host->bus_lock, RT_WAITING_FOREVER);
49 }
50
mmcsd_host_unlock(struct rt_mmcsd_host * host)51 void mmcsd_host_unlock(struct rt_mmcsd_host *host)
52 {
53 rt_mutex_release(&host->bus_lock);
54 }
55
mmcsd_req_complete(struct rt_mmcsd_host * host)56 void mmcsd_req_complete(struct rt_mmcsd_host *host)
57 {
58 rt_sem_release(&host->sem_ack);
59 }
60
mmcsd_send_request(struct rt_mmcsd_host * host,struct rt_mmcsd_req * req)61 void mmcsd_send_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
62 {
63 do {
64 req->cmd->retries--;
65 req->cmd->err = 0;
66 req->cmd->mrq = req;
67 if (req->data)
68 {
69 req->cmd->data = req->data;
70 req->data->err = 0;
71 req->data->mrq = req;
72 if (req->stop)
73 {
74 req->data->stop = req->stop;
75 req->stop->err = 0;
76 req->stop->mrq = req;
77 }
78 }
79 host->ops->request(host, req);
80
81 rt_sem_take(&host->sem_ack, RT_WAITING_FOREVER);
82
83 } while(req->cmd->err && (req->cmd->retries > 0));
84
85
86 }
87
mmcsd_send_cmd(struct rt_mmcsd_host * host,struct rt_mmcsd_cmd * cmd,int retries)88 rt_int32_t mmcsd_send_cmd(struct rt_mmcsd_host *host,
89 struct rt_mmcsd_cmd *cmd,
90 int retries)
91 {
92 struct rt_mmcsd_req req;
93
94 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
95 rt_memset(cmd->resp, 0, sizeof(cmd->resp));
96 cmd->retries = retries;
97
98 req.cmd = cmd;
99 cmd->data = RT_NULL;
100
101 mmcsd_send_request(host, &req);
102
103 return cmd->err;
104 }
105
mmcsd_go_idle(struct rt_mmcsd_host * host)106 rt_int32_t mmcsd_go_idle(struct rt_mmcsd_host *host)
107 {
108 rt_int32_t err;
109 struct rt_mmcsd_cmd cmd;
110
111 if (!controller_is_spi(host))
112 {
113 mmcsd_set_chip_select(host, MMCSD_CS_HIGH);
114 mmcsd_delay_ms(1);
115 }
116
117 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
118
119 cmd.cmd_code = GO_IDLE_STATE;
120 cmd.arg = 0;
121 cmd.flags = RESP_SPI_R1 | RESP_NONE | CMD_BC;
122
123 err = mmcsd_send_cmd(host, &cmd, 0);
124
125 mmcsd_delay_ms(1);
126
127 if (!controller_is_spi(host))
128 {
129 mmcsd_set_chip_select(host, MMCSD_CS_IGNORE);
130 mmcsd_delay_ms(1);
131 }
132
133 return err;
134 }
135
mmcsd_spi_read_ocr(struct rt_mmcsd_host * host,rt_int32_t high_capacity,rt_uint32_t * ocr)136 rt_int32_t mmcsd_spi_read_ocr(struct rt_mmcsd_host *host,
137 rt_int32_t high_capacity,
138 rt_uint32_t *ocr)
139 {
140 struct rt_mmcsd_cmd cmd;
141 rt_int32_t err;
142
143 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
144
145 cmd.cmd_code = SPI_READ_OCR;
146 cmd.arg = high_capacity ? (1 << 30) : 0;
147 cmd.flags = RESP_SPI_R3;
148
149 err = mmcsd_send_cmd(host, &cmd, 0);
150
151 *ocr = cmd.resp[1];
152
153 return err;
154 }
155
mmcsd_all_get_cid(struct rt_mmcsd_host * host,rt_uint32_t * cid)156 rt_int32_t mmcsd_all_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid)
157 {
158 rt_int32_t err;
159 struct rt_mmcsd_cmd cmd;
160
161 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
162
163 cmd.cmd_code = ALL_SEND_CID;
164 cmd.arg = 0;
165 cmd.flags = RESP_R2 | CMD_BCR;
166
167 err = mmcsd_send_cmd(host, &cmd, 3);
168 if (err)
169 return err;
170
171 rt_memcpy(cid, cmd.resp, sizeof(rt_uint32_t) * 4);
172
173 return 0;
174 }
175
mmcsd_get_cid(struct rt_mmcsd_host * host,rt_uint32_t * cid)176 rt_int32_t mmcsd_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid)
177 {
178 rt_int32_t err, i;
179 struct rt_mmcsd_req req;
180 struct rt_mmcsd_cmd cmd;
181 struct rt_mmcsd_data data;
182 rt_uint32_t *buf = RT_NULL;
183
184 if (!controller_is_spi(host))
185 {
186 if (!host->card)
187 return -RT_ERROR;
188 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
189
190 cmd.cmd_code = SEND_CID;
191 cmd.arg = host->card->rca << 16;
192 cmd.flags = RESP_R2 | CMD_AC;
193 err = mmcsd_send_cmd(host, &cmd, 3);
194 if (err)
195 return err;
196
197 rt_memcpy(cid, cmd.resp, sizeof(rt_uint32_t) * 4);
198
199 return 0;
200 }
201
202 buf = (rt_uint32_t *)rt_malloc(16);
203 if (!buf)
204 {
205 LOG_E("allocate memory failed!");
206
207 return -RT_ENOMEM;
208 }
209
210 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
211 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
212 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
213
214 req.cmd = &cmd;
215 req.data = &data;
216
217 cmd.cmd_code = SEND_CID;
218 cmd.arg = 0;
219
220 /* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
221 * rely on callers to never use this with "native" calls for reading
222 * CSD or CID. Native versions of those commands use the R2 type,
223 * not R1 plus a data block.
224 */
225 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
226
227 data.blksize = 16;
228 data.blks = 1;
229 data.flags = DATA_DIR_READ;
230 data.buf = buf;
231 /*
232 * The spec states that CSR and CID accesses have a timeout
233 * of 64 clock cycles.
234 */
235 data.timeout_ns = 0;
236 data.timeout_clks = 64;
237
238 mmcsd_send_request(host, &req);
239
240 if (cmd.err || data.err)
241 {
242 rt_free(buf);
243
244 return -RT_ERROR;
245 }
246
247 for (i = 0;i < 4;i++)
248 cid[i] = buf[i];
249 rt_free(buf);
250
251 return 0;
252 }
253
mmcsd_get_csd(struct rt_mmcsd_card * card,rt_uint32_t * csd)254 rt_int32_t mmcsd_get_csd(struct rt_mmcsd_card *card, rt_uint32_t *csd)
255 {
256 rt_int32_t err, i;
257 struct rt_mmcsd_req req;
258 struct rt_mmcsd_cmd cmd;
259 struct rt_mmcsd_data data;
260 rt_uint32_t *buf = RT_NULL;
261
262 if (!controller_is_spi(card->host))
263 {
264 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
265
266 cmd.cmd_code = SEND_CSD;
267 cmd.arg = card->rca << 16;
268 cmd.flags = RESP_R2 | CMD_AC;
269 err = mmcsd_send_cmd(card->host, &cmd, 3);
270 if (err)
271 return err;
272
273 rt_memcpy(csd, cmd.resp, sizeof(rt_uint32_t) * 4);
274
275 return 0;
276 }
277
278 buf = (rt_uint32_t*)rt_malloc(16);
279 if (!buf)
280 {
281 LOG_E("allocate memory failed!");
282
283 return -RT_ENOMEM;
284 }
285
286 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
287 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
288 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
289
290 req.cmd = &cmd;
291 req.data = &data;
292
293 cmd.cmd_code = SEND_CSD;
294 cmd.arg = 0;
295
296 /* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
297 * rely on callers to never use this with "native" calls for reading
298 * CSD or CID. Native versions of those commands use the R2 type,
299 * not R1 plus a data block.
300 */
301 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
302
303 data.blksize = 16;
304 data.blks = 1;
305 data.flags = DATA_DIR_READ;
306 data.buf = buf;
307
308 /*
309 * The spec states that CSR and CID accesses have a timeout
310 * of 64 clock cycles.
311 */
312 data.timeout_ns = 0;
313 data.timeout_clks = 64;
314
315 mmcsd_send_request(card->host, &req);
316
317 if (cmd.err || data.err)
318 {
319 rt_free(buf);
320
321 return -RT_ERROR;
322 }
323
324 for (i = 0;i < 4;i++)
325 csd[i] = buf[i];
326 rt_free(buf);
327
328 return 0;
329 }
330
_mmcsd_select_card(struct rt_mmcsd_host * host,struct rt_mmcsd_card * card)331 static rt_int32_t _mmcsd_select_card(struct rt_mmcsd_host *host,
332 struct rt_mmcsd_card *card)
333 {
334 rt_int32_t err;
335 struct rt_mmcsd_cmd cmd;
336
337 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
338
339 cmd.cmd_code = SELECT_CARD;
340
341 if (card)
342 {
343 cmd.arg = card->rca << 16;
344 cmd.flags = RESP_R1 | CMD_AC;
345 }
346 else
347 {
348 cmd.arg = 0;
349 cmd.flags = RESP_NONE | CMD_AC;
350 }
351
352 err = mmcsd_send_cmd(host, &cmd, 3);
353 if (err)
354 return err;
355
356 return 0;
357 }
358
mmcsd_select_card(struct rt_mmcsd_card * card)359 rt_int32_t mmcsd_select_card(struct rt_mmcsd_card *card)
360 {
361 return _mmcsd_select_card(card->host, card);
362 }
363
mmcsd_deselect_cards(struct rt_mmcsd_card * card)364 rt_int32_t mmcsd_deselect_cards(struct rt_mmcsd_card *card)
365 {
366 return _mmcsd_select_card(card->host, RT_NULL);
367 }
368
mmcsd_spi_use_crc(struct rt_mmcsd_host * host,rt_int32_t use_crc)369 rt_int32_t mmcsd_spi_use_crc(struct rt_mmcsd_host *host, rt_int32_t use_crc)
370 {
371 struct rt_mmcsd_cmd cmd;
372 rt_int32_t err;
373
374 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
375
376 cmd.cmd_code = SPI_CRC_ON_OFF;
377 cmd.flags = RESP_SPI_R1;
378 cmd.arg = use_crc;
379
380 err = mmcsd_send_cmd(host, &cmd, 0);
381 if (!err)
382 host->spi_use_crc = use_crc;
383
384 return err;
385 }
386
mmcsd_set_iocfg(struct rt_mmcsd_host * host)387 rt_inline void mmcsd_set_iocfg(struct rt_mmcsd_host *host)
388 {
389 struct rt_mmcsd_io_cfg *io_cfg = &host->io_cfg;
390
391 mmcsd_dbg("clock %uHz busmode %u powermode %u cs %u Vdd %u "
392 "width %u \n",
393 io_cfg->clock, io_cfg->bus_mode,
394 io_cfg->power_mode, io_cfg->chip_select, io_cfg->vdd,
395 io_cfg->bus_width);
396
397 host->ops->set_iocfg(host, io_cfg);
398 }
399
400 /*
401 * Control chip select pin on a host.
402 */
mmcsd_set_chip_select(struct rt_mmcsd_host * host,rt_int32_t mode)403 void mmcsd_set_chip_select(struct rt_mmcsd_host *host, rt_int32_t mode)
404 {
405 host->io_cfg.chip_select = mode;
406 mmcsd_set_iocfg(host);
407 }
408
409 /*
410 * Sets the host clock to the highest possible frequency that
411 * is below "hz".
412 */
mmcsd_set_clock(struct rt_mmcsd_host * host,rt_uint32_t clk)413 void mmcsd_set_clock(struct rt_mmcsd_host *host, rt_uint32_t clk)
414 {
415 if (clk < host->freq_min)
416 {
417 LOG_W("clock too low!");
418 }
419
420 host->io_cfg.clock = clk;
421 mmcsd_set_iocfg(host);
422 }
423
424 /*
425 * Change the bus mode (open drain/push-pull) of a host.
426 */
mmcsd_set_bus_mode(struct rt_mmcsd_host * host,rt_uint32_t mode)427 void mmcsd_set_bus_mode(struct rt_mmcsd_host *host, rt_uint32_t mode)
428 {
429 host->io_cfg.bus_mode = mode;
430 mmcsd_set_iocfg(host);
431 }
432
433 /*
434 * Change data bus width of a host.
435 */
mmcsd_set_bus_width(struct rt_mmcsd_host * host,rt_uint32_t width)436 void mmcsd_set_bus_width(struct rt_mmcsd_host *host, rt_uint32_t width)
437 {
438 host->io_cfg.bus_width = width;
439 mmcsd_set_iocfg(host);
440 }
441
mmcsd_set_data_timeout(struct rt_mmcsd_data * data,const struct rt_mmcsd_card * card)442 void mmcsd_set_data_timeout(struct rt_mmcsd_data *data,
443 const struct rt_mmcsd_card *card)
444 {
445 rt_uint32_t mult;
446
447 if (card->card_type == CARD_TYPE_SDIO)
448 {
449 data->timeout_ns = 1000000000; /* SDIO card 1s */
450 data->timeout_clks = 0;
451
452 return;
453 }
454
455 /*
456 * SD cards use a 100 multiplier rather than 10
457 */
458 mult = (card->card_type == CARD_TYPE_SD) ? 100 : 10;
459
460 /*
461 * Scale up the multiplier (and therefore the timeout) by
462 * the r2w factor for writes.
463 */
464 if (data->flags & DATA_DIR_WRITE)
465 mult <<= card->csd.r2w_factor;
466
467 data->timeout_ns = card->tacc_ns * mult;
468 data->timeout_clks = card->tacc_clks * mult;
469
470 /*
471 * SD cards also have an upper limit on the timeout.
472 */
473 if (card->card_type == CARD_TYPE_SD)
474 {
475 rt_uint32_t timeout_us, limit_us;
476
477 timeout_us = data->timeout_ns / 1000;
478 timeout_us += data->timeout_clks * 1000 /
479 (card->host->io_cfg.clock / 1000);
480
481 if (data->flags & DATA_DIR_WRITE)
482 /*
483 * The limit is really 250 ms, but that is
484 * insufficient for some crappy cards.
485 */
486 limit_us = 300000;
487 else
488 limit_us = 100000;
489
490 /*
491 * SDHC cards always use these fixed values.
492 */
493 if (timeout_us > limit_us || card->flags & CARD_FLAG_SDHC)
494 {
495 data->timeout_ns = limit_us * 1000; /* SDHC card fixed 250ms */
496 data->timeout_clks = 0;
497 }
498 }
499
500 if (controller_is_spi(card->host))
501 {
502 if (data->flags & DATA_DIR_WRITE)
503 {
504 if (data->timeout_ns < 1000000000)
505 data->timeout_ns = 1000000000; /* 1s */
506 }
507 else
508 {
509 if (data->timeout_ns < 100000000)
510 data->timeout_ns = 100000000; /* 100ms */
511 }
512 }
513 }
514
515 /*
516 * Mask off any voltages we don't support and select
517 * the lowest voltage
518 */
mmcsd_select_voltage(struct rt_mmcsd_host * host,rt_uint32_t ocr)519 rt_uint32_t mmcsd_select_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr)
520 {
521 int bit;
522 extern int __rt_ffs(int value);
523
524 ocr &= host->valid_ocr;
525
526 bit = __rt_ffs(ocr);
527 if (bit)
528 {
529 bit -= 1;
530
531 ocr &= 3 << bit;
532
533 host->io_cfg.vdd = bit;
534 mmcsd_set_iocfg(host);
535 }
536 else
537 {
538 LOG_W("host doesn't support card's voltages!");
539 ocr = 0;
540 }
541
542 return ocr;
543 }
544
mmcsd_power_up(struct rt_mmcsd_host * host)545 static void mmcsd_power_up(struct rt_mmcsd_host *host)
546 {
547 int bit = __rt_fls(host->valid_ocr) - 1;
548
549 host->io_cfg.vdd = bit;
550 if (controller_is_spi(host))
551 {
552 host->io_cfg.chip_select = MMCSD_CS_HIGH;
553 host->io_cfg.bus_mode = MMCSD_BUSMODE_PUSHPULL;
554 }
555 else
556 {
557 host->io_cfg.chip_select = MMCSD_CS_IGNORE;
558 host->io_cfg.bus_mode = MMCSD_BUSMODE_OPENDRAIN;
559 }
560 host->io_cfg.power_mode = MMCSD_POWER_UP;
561 host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1;
562 mmcsd_set_iocfg(host);
563
564 /*
565 * This delay should be sufficient to allow the power supply
566 * to reach the minimum voltage.
567 */
568 mmcsd_delay_ms(10);
569
570 host->io_cfg.clock = host->freq_min;
571 host->io_cfg.power_mode = MMCSD_POWER_ON;
572 mmcsd_set_iocfg(host);
573
574 /*
575 * This delay must be at least 74 clock sizes, or 1 ms, or the
576 * time required to reach a stable voltage.
577 */
578 mmcsd_delay_ms(10);
579 }
580
mmcsd_power_off(struct rt_mmcsd_host * host)581 static void mmcsd_power_off(struct rt_mmcsd_host *host)
582 {
583 host->io_cfg.clock = 0;
584 host->io_cfg.vdd = 0;
585 if (!controller_is_spi(host))
586 {
587 host->io_cfg.bus_mode = MMCSD_BUSMODE_OPENDRAIN;
588 host->io_cfg.chip_select = MMCSD_CS_IGNORE;
589 }
590 host->io_cfg.power_mode = MMCSD_POWER_OFF;
591 host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1;
592 mmcsd_set_iocfg(host);
593 }
594
mmcsd_wait_cd_changed(rt_int32_t timeout)595 int mmcsd_wait_cd_changed(rt_int32_t timeout)
596 {
597 struct rt_mmcsd_host *host;
598 if (rt_mb_recv(&mmcsd_hotpluge_mb, (rt_ubase_t *)&host, timeout) == RT_EOK)
599 {
600 if(host->card == RT_NULL)
601 {
602 return MMCSD_HOST_UNPLUGED;
603 }
604 else
605 {
606 return MMCSD_HOST_PLUGED;
607 }
608 }
609 return -RT_ETIMEOUT;
610 }
611 RTM_EXPORT(mmcsd_wait_cd_changed);
612
mmcsd_change(struct rt_mmcsd_host * host)613 void mmcsd_change(struct rt_mmcsd_host *host)
614 {
615 rt_mb_send(&mmcsd_detect_mb, (rt_uint32_t)host);
616 }
617
mmcsd_detect(void * param)618 void mmcsd_detect(void *param)
619 {
620 struct rt_mmcsd_host *host;
621 rt_uint32_t ocr;
622 rt_int32_t err;
623
624 while (1)
625 {
626 if (rt_mb_recv(&mmcsd_detect_mb, (rt_ubase_t *)&host, RT_WAITING_FOREVER) == RT_EOK)
627 {
628 if (host->card == RT_NULL)
629 {
630 mmcsd_host_lock(host);
631 mmcsd_power_up(host);
632 mmcsd_go_idle(host);
633
634 mmcsd_send_if_cond(host, host->valid_ocr);
635
636 err = sdio_io_send_op_cond(host, 0, &ocr);
637 if (!err)
638 {
639 if (init_sdio(host, ocr))
640 mmcsd_power_off(host);
641 mmcsd_host_unlock(host);
642 continue;
643 }
644
645 /*
646 * detect SD card
647 */
648 err = mmcsd_send_app_op_cond(host, 0, &ocr);
649 if (!err)
650 {
651 if (init_sd(host, ocr))
652 mmcsd_power_off(host);
653 mmcsd_host_unlock(host);
654 rt_mb_send(&mmcsd_hotpluge_mb, (rt_uint32_t)host);
655 continue;
656 }
657
658 /*
659 * detect mmc card
660 */
661 err = mmc_send_op_cond(host, 0, &ocr);
662 if (!err)
663 {
664 if (init_mmc(host, ocr))
665 mmcsd_power_off(host);
666 mmcsd_host_unlock(host);
667 rt_mb_send(&mmcsd_hotpluge_mb, (rt_uint32_t)host);
668 continue;
669 }
670 mmcsd_host_unlock(host);
671 }
672 else
673 {
674 /* card removed */
675 mmcsd_host_lock(host);
676 if (host->card->sdio_function_num != 0)
677 {
678 LOG_W("unsupport sdio card plug out!");
679 }
680 else
681 {
682 rt_mmcsd_blk_remove(host->card);
683 rt_free(host->card);
684
685 host->card = RT_NULL;
686 }
687 mmcsd_host_unlock(host);
688 rt_mb_send(&mmcsd_hotpluge_mb, (rt_uint32_t)host);
689 }
690 }
691 }
692 }
693
mmcsd_alloc_host(void)694 struct rt_mmcsd_host *mmcsd_alloc_host(void)
695 {
696 struct rt_mmcsd_host *host;
697
698 host = rt_malloc(sizeof(struct rt_mmcsd_host));
699 if (!host)
700 {
701 LOG_E("alloc host failed");
702
703 return RT_NULL;
704 }
705
706 rt_memset(host, 0, sizeof(struct rt_mmcsd_host));
707
708 host->max_seg_size = 65535;
709 host->max_dma_segs = 1;
710 host->max_blk_size = 512;
711 host->max_blk_count = 4096;
712
713 rt_mutex_init(&host->bus_lock, "sd_bus_lock", RT_IPC_FLAG_FIFO);
714 rt_sem_init(&host->sem_ack, "sd_ack", 0, RT_IPC_FLAG_FIFO);
715
716 return host;
717 }
718
mmcsd_free_host(struct rt_mmcsd_host * host)719 void mmcsd_free_host(struct rt_mmcsd_host *host)
720 {
721 rt_mutex_detach(&host->bus_lock);
722 rt_sem_detach(&host->sem_ack);
723 rt_free(host);
724 }
725
rt_mmcsd_core_init(void)726 int rt_mmcsd_core_init(void)
727 {
728 rt_err_t ret;
729
730 /* initialize detect SD cart thread */
731 /* initialize mailbox and create detect SD card thread */
732 ret = rt_mb_init(&mmcsd_detect_mb, "mmcsdmb",
733 &mmcsd_detect_mb_pool[0], sizeof(mmcsd_detect_mb_pool) / sizeof(mmcsd_detect_mb_pool[0]),
734 RT_IPC_FLAG_FIFO);
735 RT_ASSERT(ret == RT_EOK);
736
737 ret = rt_mb_init(&mmcsd_hotpluge_mb, "mmcsdhotplugmb",
738 &mmcsd_hotpluge_mb_pool[0], sizeof(mmcsd_hotpluge_mb_pool) / sizeof(mmcsd_hotpluge_mb_pool[0]),
739 RT_IPC_FLAG_FIFO);
740 RT_ASSERT(ret == RT_EOK);
741 ret = rt_thread_init(&mmcsd_detect_thread, "mmcsd_detect", mmcsd_detect, RT_NULL,
742 &mmcsd_stack[0], RT_MMCSD_STACK_SIZE, RT_MMCSD_THREAD_PREORITY, 20);
743 if (ret == RT_EOK)
744 {
745 rt_thread_startup(&mmcsd_detect_thread);
746 }
747
748 rt_sdio_init();
749
750 return 0;
751 }
752 INIT_PREV_EXPORT(rt_mmcsd_core_init);
753
754