1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2018-09-10 heyuanjie87 first version
9 */
10
11 #include <rtdevice.h>
12
13 #define MTDTONAND(x) ((rt_nand_t*)(x))
14 #define NOTALIGNED(x) ((x & (chip->page_size - 1)) != 0)
15 #ifndef min
16 #define min(a,b) (a>b? b:a)
17 #endif
18
nand_fill_oob(rt_nand_t * chip,uint8_t * oob,size_t len,struct mtd_io_desc * desc)19 static uint8_t *nand_fill_oob(rt_nand_t *chip, uint8_t *oob, size_t len, struct mtd_io_desc *desc)
20 {
21 rt_memset(chip->oob_poi, 0xff, chip->oobsize);
22
23 switch (desc->mode)
24 {
25 case MTD_OPM_PLACE_OOB:
26 case MTD_OPM_RAW:
27 rt_memcpy(chip->oob_poi + desc->ooboffs, oob, len);
28 return oob + len;
29
30 case MTD_OPM_AUTO_OOB:
31 {
32 const struct mtd_oob_region *free = chip->freelayout;
33 uint32_t boffs;
34 size_t bytes;
35
36 bytes = min(len, free->length);
37 boffs = free->offset;
38
39 rt_memcpy(chip->oob_poi + boffs, oob, bytes);
40 oob += bytes;
41
42 return oob;
43 }
44 }
45
46 return NULL;
47 }
48
nand_transfer_oob(rt_nand_t * chip,uint8_t * oob,struct mtd_io_desc * desc,size_t len)49 static uint8_t *nand_transfer_oob(rt_nand_t *chip, uint8_t *oob, struct mtd_io_desc *desc, size_t len)
50 {
51 switch (desc->mode)
52 {
53 case MTD_OPM_PLACE_OOB:
54 case MTD_OPM_RAW:
55 rt_memcpy(oob, chip->oob_poi + desc->ooboffs, len);
56 return oob + len;
57
58 case MTD_OPM_AUTO_OOB:
59 {
60 struct mtd_oob_region *free = (struct mtd_oob_region *)chip->freelayout;
61 uint32_t boffs = 0, roffs = desc->ooboffs;
62 size_t bytes = 0;
63
64 for (; free->length && len; free++, len -= bytes)
65 {
66 /* Read request not from offset 0? */
67 if (roffs)
68 {
69 if (roffs >= free->length)
70 {
71 roffs -= free->length;
72 continue;
73 }
74 boffs = free->offset + roffs;
75 bytes = min(len, (free->length - roffs));
76 roffs = 0;
77 }
78 else
79 {
80 bytes = min(len, free->length);
81 boffs = free->offset;
82 }
83
84 rt_memcpy(oob, chip->oob_poi + boffs, bytes);
85 oob += bytes;
86 }
87
88 return oob;
89 }
90 }
91
92 return NULL;
93 }
94
nand_read_page_raw(rt_nand_t * chip,uint8_t * buf,int oob_required,int page)95 static int nand_read_page_raw(rt_nand_t *chip, uint8_t *buf, int oob_required, int page)
96 {
97 chip->ops->read_buf(chip, buf, chip->page_size);
98
99 if (oob_required)
100 chip->ops->read_buf(chip, chip->oob_poi, chip->oobsize);
101
102 return 0;
103 }
104
nand_write_page_raw(rt_nand_t * chip,const uint8_t * buf,int oob_required,int page)105 static int nand_write_page_raw(rt_nand_t *chip, const uint8_t *buf, int oob_required, int page)
106 {
107 chip->ops->write_buf(chip, buf, chip->page_size);
108
109 if (oob_required)
110 chip->ops->write_buf(chip, chip->oob_poi, chip->oobsize);
111
112 return 0;
113 }
114
nand_write_page_hwecc(rt_nand_t * chip,const uint8_t * buf,int oob_required,int page)115 static int nand_write_page_hwecc(rt_nand_t *chip, const uint8_t *buf, int oob_required, int page)
116 {
117 uint16_t i;
118 uint16_t stepsize = chip->ecc.stepsize;
119 uint16_t eccbytes = chip->ecc.bytes;
120 uint16_t eccsteps = chip->ecc._step;
121 uint16_t eccpos = chip->ecc.layout->offset;
122 uint8_t *ecc_calc = chip->buffers.ecccalc;
123 const uint8_t *p = buf;
124
125 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += stepsize)
126 {
127 chip->ops->cmdfunc(chip, NAND_CMD_ECC_EN, 0, 0);
128 chip->ops->write_buf(chip, p, stepsize);
129 chip->ecc.calculate(chip, p, &ecc_calc[i]);
130 chip->ops->cmdfunc(chip, NAND_CMD_ECC_DIS, 0, 0);
131 }
132
133 rt_memcpy(&chip->oob_poi[eccpos], ecc_calc, chip->ecc.layout->length);
134
135 chip->ops->write_buf(chip, chip->oob_poi, chip->oobsize);
136
137 return 0;
138 }
139
nand_read_page_hwecc(rt_nand_t * chip,uint8_t * buf,int oob_required,int page)140 static int nand_read_page_hwecc(rt_nand_t *chip, uint8_t *buf, int oob_required, int page)
141 {
142 uint16_t i;
143 uint16_t eccsize = chip->ecc.stepsize;
144 uint16_t eccbytes = chip->ecc.bytes;
145 uint16_t eccsteps = chip->ecc._step;
146 uint16_t eccpos = chip->ecc.layout->offset;
147 uint8_t *p = buf;
148 uint8_t *ecc_calc = chip->buffers.ecccalc;
149 uint8_t *ecc_code = chip->buffers.ecccode;
150 int ret = 0;
151
152 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
153 {
154 chip->ops->cmdfunc(chip, NAND_CMD_ECC_EN, 0, 0);
155 chip->ops->read_buf(chip, p, eccsize);
156 chip->ecc.calculate(chip, p, &ecc_calc[i]);
157 chip->ops->cmdfunc(chip, NAND_CMD_ECC_DIS, 0, 0);
158 }
159
160 chip->ops->read_buf(chip, chip->oob_poi, chip->oobsize);
161 rt_memcpy(ecc_code, &chip->oob_poi[eccpos], chip->ecc.layout->length);
162
163 eccsteps = chip->ecc._step;
164 p = buf;
165
166 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
167 {
168 int stat;
169
170 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
171 if (stat != 0)
172 ret = -1;
173 }
174
175 return ret;
176 }
177
nand_write_page(rt_nand_t * chip,const uint8_t * buf,int oob_required,int page,int raw)178 static int nand_write_page(rt_nand_t *chip, const uint8_t *buf,
179 int oob_required, int page, int raw)
180 {
181 int status;
182
183 chip->ops->cmdfunc(chip, NAND_CMD_PAGE_WR0, page, 0x00);
184
185 if (raw)
186 {
187 nand_write_page_raw(chip, buf, oob_required, page);
188 }
189 else
190 {
191 chip->write_page(chip, buf, oob_required, page);
192 }
193
194 status = chip->ops->cmdfunc(chip, NAND_CMD_PAGE_WR1, -1, -1);
195
196 return status;
197 }
198
nand_do_read_desc(rt_nand_t * chip,loff_t from,struct mtd_io_desc * desc)199 static int nand_do_read_desc(rt_nand_t *chip, loff_t from, struct mtd_io_desc *desc)
200 {
201 int page, bytes;
202 char oob_required;
203 char ecc_fail = 0;
204 int ret = 0;
205 uint32_t readlen = desc->datlen;
206 uint16_t oobreadlen = desc->ooblen;
207 uint16_t max_oobsize = desc->mode == MTD_OPM_AUTO_OOB ?
208 chip->freelayout->length : chip->oobsize;
209
210 uint8_t *oob, *buf, *notalign = 0;
211
212 /* Reject reads, which are not page aligned */
213 if (NOTALIGNED(from))
214 {
215 return -EINVAL;
216 }
217
218 buf = desc->datbuf;
219 if (NOTALIGNED(desc->datlen) && !chip->pagebuf)
220 {
221 chip->pagebuf = rt_malloc(chip->page_size);
222 if (!chip->pagebuf)
223 return -ENOMEM;
224 }
225
226 page = (int)(from / chip->page_size);
227
228 oob = desc->oobbuf;
229 oob_required = oob ? 1 : 0;
230
231 while (1)
232 {
233 bytes = min(chip->page_size, readlen);
234
235 chip->ops->cmdfunc(chip, NAND_CMD_PAGE_RD, page, 0x00);
236 if (NOTALIGNED(bytes))
237 {
238 notalign = buf;
239 buf = chip->pagebuf;
240 }
241 /*
242 * Now read the page into the buffer. Absent an error,
243 * the read methods return max bitflips per ecc step.
244 */
245 if (desc->mode == MTD_OPM_RAW)
246 {
247 ret = nand_read_page_raw(chip, buf, oob_required, page);
248 }
249 else
250 {
251 ret = chip->read_page(chip, buf, oob_required, page);
252 }
253
254 if (ret != 0)
255 {
256 ret = -EBADMSG;
257 break;
258 }
259
260 if (oob)
261 {
262 int toread = min(oobreadlen, max_oobsize);
263
264 if (toread)
265 {
266 oob = nand_transfer_oob(chip, oob, desc, toread);
267 oobreadlen -= toread;
268 }
269 }
270
271 if (notalign)
272 {
273 rt_memcpy(notalign, buf, bytes);
274 }
275
276 buf += bytes;
277 readlen -= bytes;
278
279 if (!readlen)
280 break;
281
282 page++;
283 }
284
285 desc->datretlen = desc->datlen - (size_t)readlen;
286 if (oob)
287 desc->oobretlen = desc->ooblen - oobreadlen;
288
289 return ret;
290 }
291
292 /*
293 * write with ECC
294 *
295 */
nand_do_write_desc(rt_nand_t * chip,loff_t to,struct mtd_io_desc * desc)296 static int nand_do_write_desc(rt_nand_t *chip, loff_t to, struct mtd_io_desc *desc)
297 {
298 int page;
299 uint16_t writelen = desc->datlen;
300 uint16_t oob_required = desc->oobbuf ? 1 : 0;
301 uint16_t oobwritelen = desc->ooblen;
302 uint16_t oobmaxlen = desc->mode == MTD_OPM_AUTO_OOB ?
303 chip->freelayout->length : chip->oobsize;
304
305 uint8_t *oob = desc->oobbuf;
306 uint8_t *buf = desc->datbuf;
307 int ret;
308
309 if (!writelen)
310 return 0;
311
312 /* Reject writes, which are not page aligned */
313 if (NOTALIGNED(to))
314 {
315 return -EINVAL;
316 }
317
318 page = (int)(to / chip->page_size);
319
320 /* Don't allow multipage oob writes with offset */
321 if (oob && desc->ooboffs && (desc->ooboffs + desc->ooblen > oobmaxlen))
322 {
323 ret = -EINVAL;
324 goto err_out;
325 }
326
327 if (NOTALIGNED(desc->datlen) && !chip->pagebuf)
328 {
329 chip->pagebuf = rt_malloc(chip->page_size);
330 if (!chip->pagebuf)
331 return -ENOMEM;
332 }
333
334 while (1)
335 {
336 uint16_t bytes = min(chip->page_size, writelen);
337
338 if (oob)
339 {
340 size_t len = min(oobwritelen, oobmaxlen);
341 oob = nand_fill_oob(chip, oob, len, desc);
342 oobwritelen -= len;
343 }
344 else
345 {
346 /* We still need to erase leftover OOB data */
347 rt_memset(chip->oob_poi, 0xff, chip->oobsize);
348 }
349
350 if (NOTALIGNED(bytes))
351 {
352 uint8_t *dbtmp = buf;
353 buf = chip->pagebuf;
354 rt_memset(&buf[bytes], 0xff, chip->page_size - bytes);
355 rt_memcpy(buf, dbtmp, bytes);
356 }
357 ret = nand_write_page(chip, buf, oob_required, page, (desc->mode == MTD_OPM_RAW));
358 if (ret)
359 break;
360
361 writelen -= bytes;
362 if (!writelen)
363 break;
364
365 buf += bytes;
366 page++;
367 }
368
369 desc->datretlen = desc->datlen - writelen;
370 if (oob)
371 desc->oobretlen = desc->ooblen;
372
373 err_out:
374
375 return ret;
376 }
377
nand_read_oob_std(rt_nand_t * chip,int page)378 static int nand_read_oob_std(rt_nand_t *chip, int page)
379 {
380 chip->ops->cmdfunc(chip, NAND_CMD_PAGE_RD, page, chip->page_size);
381 chip->ops->read_buf(chip, chip->oob_poi, chip->oobsize);
382
383 return 0;
384 }
385
386 /*
387 * read one page of OOB
388 */
nand_only_read_oob(rt_nand_t * chip,loff_t from,struct mtd_io_desc * desc)389 static int nand_only_read_oob(rt_nand_t *chip, loff_t from, struct mtd_io_desc *desc)
390 {
391 int page;
392 int readlen = desc->ooblen;
393 int len;
394 uint8_t *buf = desc->oobbuf;
395 int ret = 0;
396
397 if (desc->mode == MTD_OPM_AUTO_OOB)
398 len = chip->freelayout->length;
399 else
400 len = chip->oobsize;
401
402 if (desc->ooboffs >= len) //attempt to start read outside oob
403 {
404 return -EINVAL;
405 }
406
407 page = (int)(from / chip->page_size);
408
409 ret = nand_read_oob_std(chip, page);
410 if (ret == 0)
411 {
412 len = min(len, readlen);
413 buf = nand_transfer_oob(chip, buf, desc, len);
414 desc->oobretlen = len;
415 }
416
417 return ret;
418 }
419
nand_write_oob_std(rt_nand_t * chip,int page)420 static int nand_write_oob_std(rt_nand_t *chip, int page)
421 {
422 int status;
423
424 chip->ops->cmdfunc(chip, NAND_CMD_PAGE_WR0, page, chip->page_size);
425 chip->ops->write_buf(chip, chip->oob_poi, chip->oobsize);
426 /* Send command to program the OOB data */
427 status = chip->ops->cmdfunc(chip, NAND_CMD_PAGE_WR1, -1, -1);
428
429 return status & NAND_STATUS_FAIL ? -EIO : 0;
430 }
431
nand_only_write_oob(rt_nand_t * chip,loff_t to,struct mtd_io_desc * desc)432 static int nand_only_write_oob(rt_nand_t *chip, loff_t to, struct mtd_io_desc *desc)
433 {
434 int page, ret, len;
435
436 if (desc->mode == MTD_OPM_AUTO_OOB)
437 len = chip->freelayout->length;
438 else
439 len = chip->oobsize;
440
441 /* Do not allow write past end of page */
442 if ((desc->ooboffs + desc->ooblen) > len)
443 {
444 return -EINVAL;
445 }
446
447 if (desc->ooblen == 0)
448 {
449 return -EINVAL;
450 }
451
452 /* get page */
453 page = (int)(to / chip->page_size);
454
455 nand_fill_oob(chip, desc->oobbuf, desc->ooblen, desc);
456
457 ret = nand_write_oob_std(chip, page);
458 if (ret == 0)
459 desc->oobretlen = len;
460
461 return ret;
462 }
463
nand_erase(rt_mtd_t * mtd,loff_t addr,size_t size)464 static int nand_erase(rt_mtd_t *mtd, loff_t addr, size_t size)
465 {
466 rt_nand_t *chip;
467 int status;
468 int page;
469 uint32_t blksize;
470
471 chip = MTDTONAND(mtd);
472 blksize = mtd->block_size;
473 page = addr / chip->page_size;
474
475 while (size >= blksize)
476 {
477 status = chip->ops->cmdfunc(chip, NAND_CMD_BLK_ERASE, page, 0);
478 if (status & NAND_STATUS_FAIL)
479 {
480 break;
481 }
482 size -= blksize;
483 page += chip->pages_pb;
484 }
485
486 return size;
487 }
488
nand_read(rt_mtd_t * mtd,loff_t from,struct mtd_io_desc * desc)489 static int nand_read(rt_mtd_t *mtd, loff_t from, struct mtd_io_desc *desc)
490 {
491 int ret = -ENOTSUP;
492 rt_nand_t *chip;
493
494 chip = MTDTONAND(mtd);
495
496 switch (desc->mode)
497 {
498 case MTD_OPM_PLACE_OOB:
499 case MTD_OPM_AUTO_OOB:
500 case MTD_OPM_RAW:
501 break;
502
503 default:
504 goto out;
505 }
506
507 if (!desc->datbuf || !desc->datlen)
508 ret = nand_only_read_oob(chip, from, desc);
509 else
510 ret = nand_do_read_desc(chip, from, desc);
511
512 out:
513
514 return ret;
515 }
516
nand_write(rt_mtd_t * mtd,loff_t to,struct mtd_io_desc * desc)517 static int nand_write(rt_mtd_t *mtd, loff_t to, struct mtd_io_desc *desc)
518 {
519 int ret = -ENOTSUP;
520 rt_nand_t *chip;
521
522 chip = MTDTONAND(mtd);
523
524 switch (desc->mode)
525 {
526 case MTD_OPM_PLACE_OOB:
527 case MTD_OPM_AUTO_OOB:
528 case MTD_OPM_RAW:
529 break;
530
531 default:
532 goto out;
533 }
534
535 if (!desc->datbuf || !desc->datlen)
536 ret = nand_only_write_oob(chip, to, desc);
537 else
538 ret = nand_do_write_desc(chip, to, desc);
539
540 out:
541
542 return ret;
543 }
544
nand_block_isbad(rt_mtd_t * mtd,uint32_t blk)545 static int nand_block_isbad(rt_mtd_t *mtd, uint32_t blk)
546 {
547 int ret;
548 rt_nand_t *chip = MTDTONAND(mtd);
549
550 if (chip->ops->isbad)
551 {
552 ret = chip->ops->isbad(chip, blk);
553 }
554 else
555 {
556 int page;
557
558 page = blk * chip->pages_pb;
559 nand_read_oob_std(chip, page);
560 ret = chip->oob_poi[0] != 0xFF;
561 }
562
563 return ret;
564 }
565
nand_block_markbad(rt_mtd_t * mtd,uint32_t blk)566 static int nand_block_markbad(rt_mtd_t *mtd, uint32_t blk)
567 {
568 int ret;
569 rt_nand_t *chip;
570
571 chip = MTDTONAND(mtd);
572
573 if (chip->ops->markbad)
574 {
575 ret = chip->ops->markbad(chip, blk);
576 }
577 else
578 {
579 int page;
580
581 page = blk * chip->pages_pb;
582 rt_memset(chip->oob_poi, 0xff, chip->oobsize);
583 chip->oob_poi[0] = 0;
584 ret = nand_write_oob_std(chip, page);
585 }
586
587 return ret;
588 }
589
590 static const struct mtd_ops _ops =
591 {
592 nand_erase,
593 nand_read,
594 nand_write,
595 nand_block_isbad,
596 nand_block_markbad,
597 };
598
rt_mtd_nand_init(rt_nand_t * nand,int blk_size,int page_size,int oob_size)599 int rt_mtd_nand_init(rt_nand_t *nand, int blk_size, int page_size, int oob_size)
600 {
601 uint8_t *buf;
602
603 buf = rt_malloc(oob_size * 3);
604 if (buf == RT_NULL)
605 return -ENOMEM;
606
607 nand->oob_poi = buf;
608 buf += oob_size;
609 nand->buffers.ecccalc = buf;
610 buf += oob_size;
611 nand->buffers.ecccode = buf;
612 nand->pagebuf = 0; /* alloc when unaligen access */
613
614 nand->pages_pb = blk_size / page_size;
615 nand->ecc._step = page_size / nand->ecc.stepsize;
616 nand->page_size = page_size;
617 nand->oobsize = oob_size;
618
619 nand->parent.type = MTD_TYPE_NAND;
620 nand->parent.ops = &_ops;
621 nand->parent.sector_size = page_size;
622 nand->parent.block_size = blk_size;
623 nand->parent.oob_size = oob_size;
624
625 switch (nand->ecc.mode)
626 {
627 case NAND_ECCM_NONE:
628 {
629 nand->read_page = nand_read_page_raw;
630 nand->write_page = nand_write_page_raw;
631 }break;
632 case NAND_ECCM_HW:
633 {
634 nand->read_page = nand_read_page_hwecc;
635 nand->write_page = nand_write_page_hwecc;
636 }break;
637 default:
638 {
639 rt_free(buf);
640 return -1;
641 }
642 }
643
644 return 0;
645 }
646