Lines Matching +full:tx +full:- +full:crci

1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/dma-mapping.h>
16 #include <linux/mtd/nand-qpic-common.h>
19 * qcom_free_bam_transaction() - Frees the BAM transaction memory
26 struct bam_transaction *bam_txn = nandc->bam_txn; in qcom_free_bam_transaction()
33 * qcom_alloc_bam_transaction() - allocate BAM transaction
43 unsigned int num_cw = nandc->max_cwperpage; in qcom_alloc_bam_transaction()
48 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + in qcom_alloc_bam_transaction()
49 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + in qcom_alloc_bam_transaction()
50 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); in qcom_alloc_bam_transaction()
59 bam_txn->bam_ce = bam_txn_buf; in qcom_alloc_bam_transaction()
61 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; in qcom_alloc_bam_transaction()
63 bam_txn->cmd_sgl = bam_txn_buf; in qcom_alloc_bam_transaction()
65 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; in qcom_alloc_bam_transaction()
67 bam_txn->data_sgl = bam_txn_buf; in qcom_alloc_bam_transaction()
69 init_completion(&bam_txn->txn_done); in qcom_alloc_bam_transaction()
76 * qcom_clear_bam_transaction() - Clears the BAM transaction
83 struct bam_transaction *bam_txn = nandc->bam_txn; in qcom_clear_bam_transaction()
85 if (!nandc->props->supports_bam) in qcom_clear_bam_transaction()
88 memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions)); in qcom_clear_bam_transaction()
89 bam_txn->last_data_desc = NULL; in qcom_clear_bam_transaction()
91 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage * in qcom_clear_bam_transaction()
93 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage * in qcom_clear_bam_transaction()
96 reinit_completion(&bam_txn->txn_done); in qcom_clear_bam_transaction()
101 * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
110 complete(&bam_txn->txn_done); in qcom_qpic_bam_dma_done()
115 * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
123 if (!nandc->props->supports_bam) in qcom_nandc_dev_to_mem()
127 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma, in qcom_nandc_dev_to_mem()
129 sizeof(*nandc->reg_read_buf), in qcom_nandc_dev_to_mem()
132 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma, in qcom_nandc_dev_to_mem()
134 sizeof(*nandc->reg_read_buf), in qcom_nandc_dev_to_mem()
140 * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
156 struct bam_transaction *bam_txn = nandc->bam_txn; in qcom_prepare_bam_async_desc()
162 return -ENOMEM; in qcom_prepare_bam_async_desc()
164 if (chan == nandc->cmd_chan) { in qcom_prepare_bam_async_desc()
165 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start]; in qcom_prepare_bam_async_desc()
166 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start; in qcom_prepare_bam_async_desc()
167 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos; in qcom_prepare_bam_async_desc()
169 desc->dir = DMA_TO_DEVICE; in qcom_prepare_bam_async_desc()
170 } else if (chan == nandc->tx_chan) { in qcom_prepare_bam_async_desc()
171 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start]; in qcom_prepare_bam_async_desc()
172 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start; in qcom_prepare_bam_async_desc()
173 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos; in qcom_prepare_bam_async_desc()
175 desc->dir = DMA_TO_DEVICE; in qcom_prepare_bam_async_desc()
177 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start]; in qcom_prepare_bam_async_desc()
178 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start; in qcom_prepare_bam_async_desc()
179 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos; in qcom_prepare_bam_async_desc()
181 desc->dir = DMA_FROM_DEVICE; in qcom_prepare_bam_async_desc()
184 sg_mark_end(sgl + sgl_cnt - 1); in qcom_prepare_bam_async_desc()
185 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir); in qcom_prepare_bam_async_desc()
187 dev_err(nandc->dev, "failure in mapping desc\n"); in qcom_prepare_bam_async_desc()
189 return -ENOMEM; in qcom_prepare_bam_async_desc()
192 desc->sgl_cnt = sgl_cnt; in qcom_prepare_bam_async_desc()
193 desc->bam_sgl = sgl; in qcom_prepare_bam_async_desc()
199 dev_err(nandc->dev, "failure in prep desc\n"); in qcom_prepare_bam_async_desc()
200 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir); in qcom_prepare_bam_async_desc()
202 return -EINVAL; in qcom_prepare_bam_async_desc()
205 desc->dma_desc = dma_desc; in qcom_prepare_bam_async_desc()
208 if (chan == nandc->cmd_chan) in qcom_prepare_bam_async_desc()
209 bam_txn->last_cmd_desc = dma_desc; in qcom_prepare_bam_async_desc()
211 bam_txn->last_data_desc = dma_desc; in qcom_prepare_bam_async_desc()
213 list_add_tail(&desc->node, &nandc->desc_list); in qcom_prepare_bam_async_desc()
220 * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
238 struct bam_transaction *bam_txn = nandc->bam_txn; in qcom_prep_bam_dma_desc_cmd()
240 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; in qcom_prep_bam_dma_desc_cmd()
257 bam_txn->bam_ce_pos += size; in qcom_prep_bam_dma_desc_cmd()
261 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; in qcom_prep_bam_dma_desc_cmd()
262 bam_ce_size = (bam_txn->bam_ce_pos - in qcom_prep_bam_dma_desc_cmd()
263 bam_txn->bam_ce_start) * in qcom_prep_bam_dma_desc_cmd()
265 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], in qcom_prep_bam_dma_desc_cmd()
267 bam_txn->cmd_sgl_pos++; in qcom_prep_bam_dma_desc_cmd()
268 bam_txn->bam_ce_start = bam_txn->bam_ce_pos; in qcom_prep_bam_dma_desc_cmd()
271 ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, in qcom_prep_bam_dma_desc_cmd()
283 * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
297 struct bam_transaction *bam_txn = nandc->bam_txn; in qcom_prep_bam_dma_desc_data()
300 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], in qcom_prep_bam_dma_desc_data()
302 bam_txn->rx_sgl_pos++; in qcom_prep_bam_dma_desc_data()
304 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], in qcom_prep_bam_dma_desc_data()
306 bam_txn->tx_sgl_pos++; in qcom_prep_bam_dma_desc_data()
313 ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, in qcom_prep_bam_dma_desc_data()
325 * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
349 return -ENOMEM; in qcom_prep_adm_dma_desc()
351 sgl = &desc->adm_sgl; in qcom_prep_adm_dma_desc()
357 desc->dir = DMA_FROM_DEVICE; in qcom_prep_adm_dma_desc()
360 desc->dir = DMA_TO_DEVICE; in qcom_prep_adm_dma_desc()
363 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir); in qcom_prep_adm_dma_desc()
365 ret = -ENOMEM; in qcom_prep_adm_dma_desc()
372 slave_conf.src_addr = nandc->base_dma + reg_off; in qcom_prep_adm_dma_desc()
373 if (nandc->data_crci) { in qcom_prep_adm_dma_desc()
374 periph_conf.crci = nandc->data_crci; in qcom_prep_adm_dma_desc()
380 slave_conf.dst_addr = nandc->base_dma + reg_off; in qcom_prep_adm_dma_desc()
381 if (nandc->cmd_crci) { in qcom_prep_adm_dma_desc()
382 periph_conf.crci = nandc->cmd_crci; in qcom_prep_adm_dma_desc()
388 ret = dmaengine_slave_config(nandc->chan, &slave_conf); in qcom_prep_adm_dma_desc()
390 dev_err(nandc->dev, "failed to configure dma channel\n"); in qcom_prep_adm_dma_desc()
394 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0); in qcom_prep_adm_dma_desc()
396 dev_err(nandc->dev, "failed to prepare desc\n"); in qcom_prep_adm_dma_desc()
397 ret = -EINVAL; in qcom_prep_adm_dma_desc()
401 desc->dma_desc = dma_desc; in qcom_prep_adm_dma_desc()
403 list_add_tail(&desc->node, &nandc->desc_list); in qcom_prep_adm_dma_desc()
414 * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
429 vaddr = nandc->reg_read_buf + nandc->reg_read_pos; in qcom_read_reg_dma()
430 nandc->reg_read_pos += num_regs; in qcom_read_reg_dma()
435 if (nandc->props->supports_bam) in qcom_read_reg_dma()
448 * qcom_write_reg_dma() - write a given number of registers
473 if (nandc->props->supports_bam) in qcom_write_reg_dma()
486 * qcom_read_data_dma() - transfer data
499 if (nandc->props->supports_bam) in qcom_read_data_dma()
507 * qcom_write_data_dma() - transfer data
520 if (nandc->props->supports_bam) in qcom_write_data_dma()
528 * qcom_submit_descs() - submit dma descriptor
538 struct bam_transaction *bam_txn = nandc->bam_txn; in qcom_submit_descs()
541 if (nandc->props->supports_bam) { in qcom_submit_descs()
542 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { in qcom_submit_descs()
543 ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0); in qcom_submit_descs()
548 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { in qcom_submit_descs()
549 ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, in qcom_submit_descs()
555 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { in qcom_submit_descs()
556 ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, in qcom_submit_descs()
563 list_for_each_entry(desc, &nandc->desc_list, node) in qcom_submit_descs()
564 cookie = dmaengine_submit(desc->dma_desc); in qcom_submit_descs()
566 if (nandc->props->supports_bam) { in qcom_submit_descs()
567 bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done; in qcom_submit_descs()
568 bam_txn->last_cmd_desc->callback_param = bam_txn; in qcom_submit_descs()
570 dma_async_issue_pending(nandc->tx_chan); in qcom_submit_descs()
571 dma_async_issue_pending(nandc->rx_chan); in qcom_submit_descs()
572 dma_async_issue_pending(nandc->cmd_chan); in qcom_submit_descs()
574 if (!wait_for_completion_timeout(&bam_txn->txn_done, in qcom_submit_descs()
576 ret = -ETIMEDOUT; in qcom_submit_descs()
578 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) in qcom_submit_descs()
579 ret = -ETIMEDOUT; in qcom_submit_descs()
587 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { in qcom_submit_descs()
588 list_del(&desc->node); in qcom_submit_descs()
590 if (nandc->props->supports_bam) in qcom_submit_descs()
591 dma_unmap_sg(nandc->dev, desc->bam_sgl, in qcom_submit_descs()
592 desc->sgl_cnt, desc->dir); in qcom_submit_descs()
594 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1, in qcom_submit_descs()
595 desc->dir); in qcom_submit_descs()
605 * qcom_clear_read_regs() - reset the read register buffer
612 nandc->reg_read_pos = 0; in qcom_clear_read_regs()
618 * qcom_nandc_unalloc() - unallocate qpic nand controller
625 if (nandc->props->supports_bam) { in qcom_nandc_unalloc()
626 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma)) in qcom_nandc_unalloc()
627 dma_unmap_single(nandc->dev, nandc->reg_read_dma, in qcom_nandc_unalloc()
629 sizeof(*nandc->reg_read_buf), in qcom_nandc_unalloc()
632 if (nandc->tx_chan) in qcom_nandc_unalloc()
633 dma_release_channel(nandc->tx_chan); in qcom_nandc_unalloc()
635 if (nandc->rx_chan) in qcom_nandc_unalloc()
636 dma_release_channel(nandc->rx_chan); in qcom_nandc_unalloc()
638 if (nandc->cmd_chan) in qcom_nandc_unalloc()
639 dma_release_channel(nandc->cmd_chan); in qcom_nandc_unalloc()
641 if (nandc->chan) in qcom_nandc_unalloc()
642 dma_release_channel(nandc->chan); in qcom_nandc_unalloc()
648 * qcom_nandc_alloc() - Allocate qpic nand controller
657 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32)); in qcom_nandc_alloc()
659 dev_err(nandc->dev, "failed to set DMA mask\n"); in qcom_nandc_alloc()
665 * data like ID and status, and preforming read-copy-write operations in qcom_nandc_alloc()
669 nandc->buf_size = 532; in qcom_nandc_alloc()
671 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL); in qcom_nandc_alloc()
672 if (!nandc->data_buffer) in qcom_nandc_alloc()
673 return -ENOMEM; in qcom_nandc_alloc()
675 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL); in qcom_nandc_alloc()
676 if (!nandc->regs) in qcom_nandc_alloc()
677 return -ENOMEM; in qcom_nandc_alloc()
679 nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD, in qcom_nandc_alloc()
680 sizeof(*nandc->reg_read_buf), in qcom_nandc_alloc()
682 if (!nandc->reg_read_buf) in qcom_nandc_alloc()
683 return -ENOMEM; in qcom_nandc_alloc()
685 if (nandc->props->supports_bam) { in qcom_nandc_alloc()
686 nandc->reg_read_dma = in qcom_nandc_alloc()
687 dma_map_single(nandc->dev, nandc->reg_read_buf, in qcom_nandc_alloc()
689 sizeof(*nandc->reg_read_buf), in qcom_nandc_alloc()
691 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) { in qcom_nandc_alloc()
692 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n"); in qcom_nandc_alloc()
693 return -EIO; in qcom_nandc_alloc()
696 nandc->tx_chan = dma_request_chan(nandc->dev, "tx"); in qcom_nandc_alloc()
697 if (IS_ERR(nandc->tx_chan)) { in qcom_nandc_alloc()
698 ret = PTR_ERR(nandc->tx_chan); in qcom_nandc_alloc()
699 nandc->tx_chan = NULL; in qcom_nandc_alloc()
700 dev_err_probe(nandc->dev, ret, in qcom_nandc_alloc()
701 "tx DMA channel request failed\n"); in qcom_nandc_alloc()
705 nandc->rx_chan = dma_request_chan(nandc->dev, "rx"); in qcom_nandc_alloc()
706 if (IS_ERR(nandc->rx_chan)) { in qcom_nandc_alloc()
707 ret = PTR_ERR(nandc->rx_chan); in qcom_nandc_alloc()
708 nandc->rx_chan = NULL; in qcom_nandc_alloc()
709 dev_err_probe(nandc->dev, ret, in qcom_nandc_alloc()
714 nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd"); in qcom_nandc_alloc()
715 if (IS_ERR(nandc->cmd_chan)) { in qcom_nandc_alloc()
716 ret = PTR_ERR(nandc->cmd_chan); in qcom_nandc_alloc()
717 nandc->cmd_chan = NULL; in qcom_nandc_alloc()
718 dev_err_probe(nandc->dev, ret, in qcom_nandc_alloc()
729 nandc->max_cwperpage = 1; in qcom_nandc_alloc()
730 nandc->bam_txn = qcom_alloc_bam_transaction(nandc); in qcom_nandc_alloc()
731 if (!nandc->bam_txn) { in qcom_nandc_alloc()
732 dev_err(nandc->dev, in qcom_nandc_alloc()
734 ret = -ENOMEM; in qcom_nandc_alloc()
738 nandc->chan = dma_request_chan(nandc->dev, "rxtx"); in qcom_nandc_alloc()
739 if (IS_ERR(nandc->chan)) { in qcom_nandc_alloc()
740 ret = PTR_ERR(nandc->chan); in qcom_nandc_alloc()
741 nandc->chan = NULL; in qcom_nandc_alloc()
742 dev_err_probe(nandc->dev, ret, in qcom_nandc_alloc()
748 INIT_LIST_HEAD(&nandc->desc_list); in qcom_nandc_alloc()
749 INIT_LIST_HEAD(&nandc->host_list); in qcom_nandc_alloc()