1 /* 2 * Copyright (c) 2006-2018, RT-Thread Development Team 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 * 6 * Change Logs: 7 * Date Author Notes 8 * 2011-07-25 weety first version 9 */ 10 11 #include <rtthread.h> 12 #include <dfs_fs.h> 13 14 #include <drivers/mmcsd_core.h> 15 16 #define DBG_ENABLE 17 #define DBG_SECTION_NAME "SDIO" 18 #ifdef RT_SDIO_DEBUG 19 #define DBG_LEVEL DBG_LOG 20 #else 21 #define DBG_LEVEL DBG_INFO 22 #endif /* RT_SDIO_DEBUG */ 23 #define DBG_COLOR 24 #include <rtdbg.h> 25 26 static rt_list_t blk_devices = RT_LIST_OBJECT_INIT(blk_devices); 27 28 #define BLK_MIN(a, b) ((a) < (b) ? (a) : (b)) 29 30 struct mmcsd_blk_device 31 { 32 struct rt_mmcsd_card *card; 33 rt_list_t list; 34 struct rt_device dev; 35 struct dfs_partition part; 36 struct rt_device_blk_geometry geometry; 37 rt_size_t max_req_size; 38 }; 39 40 #ifndef RT_MMCSD_MAX_PARTITION 41 #define RT_MMCSD_MAX_PARTITION 16 42 #endif 43 44 rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card) 45 { 46 rt_int32_t err; 47 rt_uint32_t blocks; 48 49 struct rt_mmcsd_req req; 50 struct rt_mmcsd_cmd cmd; 51 struct rt_mmcsd_data data; 52 rt_uint32_t timeout_us; 53 54 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd)); 55 56 cmd.cmd_code = APP_CMD; 57 cmd.arg = card->rca << 16; 58 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC; 59 60 err = mmcsd_send_cmd(card->host, &cmd, 0); 61 if (err) 62 return -RT_ERROR; 63 if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 64 return -RT_ERROR; 65 66 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd)); 67 68 cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS; 69 cmd.arg = 0; 70 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC; 71 72 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data)); 73 74 data.timeout_ns = card->tacc_ns * 100; 75 data.timeout_clks = card->tacc_clks * 100; 76 77 timeout_us = data.timeout_ns / 1000; 78 timeout_us += data.timeout_clks * 1000 / 79 (card->host->io_cfg.clock / 1000); 80 81 if (timeout_us > 100000) 82 { 83 data.timeout_ns = 100000000; 84 data.timeout_clks = 0; 85 } 86 87 data.blksize = 4; 88 data.blks = 1; 89 data.flags = DATA_DIR_READ; 90 data.buf = &blocks; 91 92 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req)); 93 94 req.cmd = &cmd; 95 req.data = &data; 96 97 mmcsd_send_request(card->host, &req); 98 99 if (cmd.err || data.err) 100 return -RT_ERROR; 101 102 return blocks; 103 } 104 105 static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card, 106 rt_uint32_t sector, 107 void *buf, 108 rt_size_t blks, 109 rt_uint8_t dir) 110 { 111 struct rt_mmcsd_cmd cmd, stop; 112 struct rt_mmcsd_data data; 113 struct rt_mmcsd_req req; 114 struct rt_mmcsd_host *host = card->host; 115 rt_uint32_t r_cmd, w_cmd; 116 117 mmcsd_host_lock(host); 118 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req)); 119 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd)); 120 rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd)); 121 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data)); 122 req.cmd = &cmd; 123 req.data = &data; 124 125 cmd.arg = sector; 126 if (!(card->flags & CARD_FLAG_SDHC)) 127 { 128 cmd.arg <<= 9; 129 } 130 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC; 131 132 data.blksize = SECTOR_SIZE; 133 data.blks = blks; 134 135 if (blks > 1) 136 { 137 if (!controller_is_spi(card->host) || !dir) 138 { 139 req.stop = &stop; 140 stop.cmd_code = STOP_TRANSMISSION; 141 stop.arg = 0; 142 stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC; 143 } 144 r_cmd = READ_MULTIPLE_BLOCK; 145 w_cmd = WRITE_MULTIPLE_BLOCK; 146 } 147 else 148 { 149 req.stop = RT_NULL; 150 r_cmd = READ_SINGLE_BLOCK; 151 w_cmd = WRITE_BLOCK; 152 } 153 154 if (!dir) 155 { 156 cmd.cmd_code = r_cmd; 157 data.flags |= DATA_DIR_READ; 158 } 159 else 160 { 161 cmd.cmd_code = w_cmd; 162 data.flags |= DATA_DIR_WRITE; 163 } 164 165 mmcsd_set_data_timeout(&data, card); 166 data.buf = buf; 167 mmcsd_send_request(host, &req); 168 169 if (!controller_is_spi(card->host) && dir != 0) 170 { 171 do 172 { 173 rt_int32_t err; 174 175 cmd.cmd_code = SEND_STATUS; 176 cmd.arg = card->rca << 16; 177 cmd.flags = RESP_R1 | CMD_AC; 178 err = mmcsd_send_cmd(card->host, &cmd, 5); 179 if (err) 180 { 181 LOG_E("error %d requesting status", err); 182 break; 183 } 184 /* 185 * Some cards mishandle the status bits, 186 * so make sure to check both the busy 187 * indication and the card state. 188 */ 189 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 190 (R1_CURRENT_STATE(cmd.resp[0]) == 7)); 191 } 192 193 mmcsd_host_unlock(host); 194 195 if (cmd.err || data.err || stop.err) 196 { 197 LOG_E("mmcsd request blocks error"); 198 LOG_E("%d,%d,%d, 0x%08x,0x%08x", 199 cmd.err, data.err, stop.err, data.flags, sector); 200 201 return -RT_ERROR; 202 } 203 204 return RT_EOK; 205 } 206 207 static rt_err_t rt_mmcsd_init(rt_device_t dev) 208 { 209 return RT_EOK; 210 } 211 212 static rt_err_t rt_mmcsd_open(rt_device_t dev, rt_uint16_t oflag) 213 { 214 return RT_EOK; 215 } 216 217 static rt_err_t rt_mmcsd_close(rt_device_t dev) 218 { 219 return RT_EOK; 220 } 221 222 static rt_err_t rt_mmcsd_control(rt_device_t dev, int cmd, void *args) 223 { 224 struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data; 225 switch (cmd) 226 { 227 case RT_DEVICE_CTRL_BLK_GETGEOME: 228 rt_memcpy(args, &blk_dev->geometry, sizeof(struct rt_device_blk_geometry)); 229 break; 230 default: 231 break; 232 } 233 return RT_EOK; 234 } 235 236 static rt_size_t rt_mmcsd_read(rt_device_t dev, 237 rt_off_t pos, 238 void *buffer, 239 rt_size_t size) 240 { 241 rt_err_t err = 0; 242 rt_size_t offset = 0; 243 rt_size_t req_size = 0; 244 rt_size_t remain_size = size; 245 void *rd_ptr = (void *)buffer; 246 struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data; 247 struct dfs_partition *part = &blk_dev->part; 248 249 if (dev == RT_NULL) 250 { 251 rt_set_errno(-EINVAL); 252 return 0; 253 } 254 255 rt_sem_take(part->lock, RT_WAITING_FOREVER); 256 while (remain_size) 257 { 258 req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size; 259 err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, rd_ptr, req_size, 0); 260 if (err) 261 break; 262 offset += req_size; 263 rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9)); 264 remain_size -= req_size; 265 } 266 rt_sem_release(part->lock); 267 268 /* the length of reading must align to SECTOR SIZE */ 269 if (err) 270 { 271 rt_set_errno(-EIO); 272 return 0; 273 } 274 return size - remain_size; 275 } 276 277 static rt_size_t rt_mmcsd_write(rt_device_t dev, 278 rt_off_t pos, 279 const void *buffer, 280 rt_size_t size) 281 { 282 rt_err_t err = 0; 283 rt_size_t offset = 0; 284 rt_size_t req_size = 0; 285 rt_size_t remain_size = size; 286 void *wr_ptr = (void *)buffer; 287 struct mmcsd_blk_device *blk_dev = (struct mmcsd_blk_device *)dev->user_data; 288 struct dfs_partition *part = &blk_dev->part; 289 290 if (dev == RT_NULL) 291 { 292 rt_set_errno(-EINVAL); 293 return 0; 294 } 295 296 rt_sem_take(part->lock, RT_WAITING_FOREVER); 297 while (remain_size) 298 { 299 req_size = (remain_size > blk_dev->max_req_size) ? blk_dev->max_req_size : remain_size; 300 err = rt_mmcsd_req_blk(blk_dev->card, part->offset + pos + offset, wr_ptr, req_size, 1); 301 if (err) 302 break; 303 offset += req_size; 304 wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9)); 305 remain_size -= req_size; 306 } 307 rt_sem_release(part->lock); 308 309 /* the length of reading must align to SECTOR SIZE */ 310 if (err) 311 { 312 rt_set_errno(-EIO); 313 314 return 0; 315 } 316 return size - remain_size; 317 } 318 319 static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card) 320 { 321 struct rt_mmcsd_cmd cmd; 322 int err; 323 324 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */ 325 if (card->flags & CARD_FLAG_SDHC) 326 return 0; 327 328 mmcsd_host_lock(card->host); 329 cmd.cmd_code = SET_BLOCKLEN; 330 cmd.arg = 512; 331 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC; 332 err = mmcsd_send_cmd(card->host, &cmd, 5); 333 mmcsd_host_unlock(card->host); 334 335 if (err) 336 { 337 LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err); 338 339 return -RT_ERROR; 340 } 341 342 return 0; 343 } 344 345 #ifdef RT_USING_DEVICE_OPS 346 const static struct rt_device_ops mmcsd_blk_ops = 347 { 348 rt_mmcsd_init, 349 rt_mmcsd_open, 350 rt_mmcsd_close, 351 rt_mmcsd_read, 352 rt_mmcsd_write, 353 rt_mmcsd_control 354 }; 355 #endif 356 357 rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card) 358 { 359 rt_int32_t err = 0; 360 rt_uint8_t i, status; 361 rt_uint8_t *sector; 362 char dname[4]; 363 char sname[8]; 364 struct mmcsd_blk_device *blk_dev = RT_NULL; 365 366 err = mmcsd_set_blksize(card); 367 if(err) 368 { 369 return err; 370 } 371 372 LOG_I("probe mmcsd block device!"); 373 374 /* get the first sector to read partition table */ 375 sector = (rt_uint8_t *)rt_malloc(SECTOR_SIZE); 376 if (sector == RT_NULL) 377 { 378 LOG_E("allocate partition sector buffer failed!"); 379 380 return -RT_ENOMEM; 381 } 382 383 status = rt_mmcsd_req_blk(card, 0, sector, 1, 0); 384 if (status == RT_EOK) 385 { 386 for (i = 0; i < RT_MMCSD_MAX_PARTITION; i++) 387 { 388 blk_dev = rt_calloc(1, sizeof(struct mmcsd_blk_device)); 389 if (!blk_dev) 390 { 391 LOG_E("mmcsd:malloc memory failed!"); 392 break; 393 } 394 395 blk_dev->max_req_size = BLK_MIN((card->host->max_dma_segs * 396 card->host->max_seg_size) >> 9, 397 (card->host->max_blk_count * 398 card->host->max_blk_size) >> 9); 399 400 /* get the first partition */ 401 status = dfs_filesystem_get_partition(&blk_dev->part, sector, i); 402 if (status == RT_EOK) 403 { 404 rt_snprintf(dname, 4, "sd%d", i); 405 rt_snprintf(sname, 8, "sem_sd%d", i); 406 blk_dev->part.lock = rt_sem_create(sname, 1, RT_IPC_FLAG_FIFO); 407 408 /* register mmcsd device */ 409 blk_dev->dev.type = RT_Device_Class_Block; 410 #ifdef RT_USING_DEVICE_OPS 411 blk_dev->dev.ops = &mmcsd_blk_ops; 412 #else 413 blk_dev->dev.init = rt_mmcsd_init; 414 blk_dev->dev.open = rt_mmcsd_open; 415 blk_dev->dev.close = rt_mmcsd_close; 416 blk_dev->dev.read = rt_mmcsd_read; 417 blk_dev->dev.write = rt_mmcsd_write; 418 blk_dev->dev.control = rt_mmcsd_control; 419 #endif 420 blk_dev->dev.user_data = blk_dev; 421 422 blk_dev->card = card; 423 424 blk_dev->geometry.bytes_per_sector = 1<<9; 425 blk_dev->geometry.block_size = card->card_blksize; 426 blk_dev->geometry.sector_count = blk_dev->part.size; 427 428 rt_device_register(&blk_dev->dev, dname, 429 RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE | RT_DEVICE_FLAG_STANDALONE); 430 rt_list_insert_after(&blk_devices, &blk_dev->list); 431 } 432 else 433 { 434 if (i == 0) 435 { 436 /* there is no partition table */ 437 blk_dev->part.offset = 0; 438 blk_dev->part.size = 0; 439 blk_dev->part.lock = rt_sem_create("sem_sd0", 1, RT_IPC_FLAG_FIFO); 440 441 /* register mmcsd device */ 442 blk_dev->dev.type = RT_Device_Class_Block; 443 #ifdef RT_USING_DEVICE_OPS 444 blk_dev->dev.ops = &mmcsd_blk_ops; 445 #else 446 blk_dev->dev.init = rt_mmcsd_init; 447 blk_dev->dev.open = rt_mmcsd_open; 448 blk_dev->dev.close = rt_mmcsd_close; 449 blk_dev->dev.read = rt_mmcsd_read; 450 blk_dev->dev.write = rt_mmcsd_write; 451 blk_dev->dev.control = rt_mmcsd_control; 452 #endif 453 blk_dev->dev.user_data = blk_dev; 454 455 blk_dev->card = card; 456 457 blk_dev->geometry.bytes_per_sector = 1<<9; 458 blk_dev->geometry.block_size = card->card_blksize; 459 blk_dev->geometry.sector_count = 460 card->card_capacity * (1024 / 512); 461 462 rt_device_register(&blk_dev->dev, "sd0", 463 RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE | RT_DEVICE_FLAG_STANDALONE); 464 rt_list_insert_after(&blk_devices, &blk_dev->list); 465 466 break; 467 } 468 else 469 { 470 rt_free(blk_dev); 471 blk_dev = RT_NULL; 472 break; 473 } 474 } 475 476 #ifdef RT_USING_DFS_MNTTABLE 477 if (0) // if (blk_dev) 478 { 479 LOG_I("try to mount file system!"); 480 /* try to mount file system on this block device */ 481 dfs_mount_device(&(blk_dev->dev)); 482 } 483 #endif 484 } 485 } 486 else 487 { 488 LOG_E("read mmcsd first sector failed"); 489 err = -RT_ERROR; 490 } 491 492 /* release sector buffer */ 493 rt_free(sector); 494 495 return err; 496 } 497 498 void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card) 499 { 500 rt_list_t *l, *n; 501 struct mmcsd_blk_device *blk_dev; 502 503 for (l = (&blk_devices)->next, n = l->next; l != &blk_devices; l = n) 504 { 505 blk_dev = (struct mmcsd_blk_device *)rt_list_entry(l, struct mmcsd_blk_device, list); 506 if (blk_dev->card == card) 507 { 508 /* unmount file system */ 509 const char * mounted_path = dfs_filesystem_get_mounted_path(&(blk_dev->dev)); 510 if (mounted_path) 511 { 512 dfs_unmount(mounted_path); 513 } 514 515 rt_device_unregister(&blk_dev->dev); 516 rt_list_remove(&blk_dev->list); 517 rt_free(blk_dev); 518 } 519 } 520 } 521 522 /* 523 * This function will initialize block device on the mmc/sd. 524 * 525 * @deprecated since 2.1.0, this function does not need to be invoked 526 * in the system initialization. 527 */ 528 int rt_mmcsd_blk_init(void) 529 { 530 /* nothing */ 531 return 0; 532 } 533