Lines Matching +full:no +full:- +full:wp

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
23 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); in mhi_read_reg()
50 while (retry--) { in mhi_poll_reg_field()
61 return -ETIMEDOUT; in mhi_poll_reg_field()
67 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); in mhi_write_reg()
100 if (db_cfg->db_mode) { in mhi_db_brstmode()
101 db_cfg->db_val = db_val; in mhi_db_brstmode()
103 db_cfg->db_mode = 0; in mhi_db_brstmode()
112 db_cfg->db_val = db_val; in mhi_db_brstmode_disable()
118 struct mhi_ring *ring = &mhi_event->ring; in mhi_ring_er_db()
120 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, in mhi_ring_er_db()
121 ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); in mhi_ring_er_db()
127 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_ring_cmd_db()
129 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_cmd_db()
130 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_cmd_db()
131 mhi_write_db(mhi_cntrl, ring->db_addr, db); in mhi_ring_cmd_db()
137 struct mhi_ring *ring = &mhi_chan->tre_ring; in mhi_ring_chan_db()
140 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_chan_db()
147 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_chan_db()
149 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, in mhi_ring_chan_db()
150 ring->db_addr, db); in mhi_ring_chan_db()
156 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); in mhi_get_exec_env()
165 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_get_mhi_state()
173 if (mhi_cntrl->reset) { in mhi_soc_reset()
174 mhi_cntrl->reset(mhi_cntrl); in mhi_soc_reset()
179 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, in mhi_soc_reset()
187 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, in mhi_map_single_no_bb()
188 buf_info->v_addr, buf_info->len, in mhi_map_single_no_bb()
189 buf_info->dir); in mhi_map_single_no_bb()
190 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) in mhi_map_single_no_bb()
191 return -ENOMEM; in mhi_map_single_no_bb()
199 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_map_single_use_bb()
200 &buf_info->p_addr, GFP_ATOMIC); in mhi_map_single_use_bb()
203 return -ENOMEM; in mhi_map_single_use_bb()
205 if (buf_info->dir == DMA_TO_DEVICE) in mhi_map_single_use_bb()
206 memcpy(buf, buf_info->v_addr, buf_info->len); in mhi_map_single_use_bb()
208 buf_info->bb_addr = buf; in mhi_map_single_use_bb()
216 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, in mhi_unmap_single_no_bb()
217 buf_info->dir); in mhi_unmap_single_no_bb()
223 if (buf_info->dir == DMA_FROM_DEVICE) in mhi_unmap_single_use_bb()
224 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); in mhi_unmap_single_use_bb()
226 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_unmap_single_use_bb()
227 buf_info->bb_addr, buf_info->p_addr); in mhi_unmap_single_use_bb()
235 if (ring->wp < ring->rp) { in get_nr_avail_ring_elements()
236 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; in get_nr_avail_ring_elements()
238 nr_el = (ring->rp - ring->base) / ring->el_size; in get_nr_avail_ring_elements()
239 nr_el += ((ring->base + ring->len - ring->wp) / in get_nr_avail_ring_elements()
240 ring->el_size) - 1; in get_nr_avail_ring_elements()
248 return (addr - ring->iommu_base) + ring->base; in mhi_to_virtual()
254 ring->wp += ring->el_size; in mhi_add_ring_element()
255 if (ring->wp >= (ring->base + ring->len)) in mhi_add_ring_element()
256 ring->wp = ring->base; in mhi_add_ring_element()
264 ring->rp += ring->el_size; in mhi_del_ring_element()
265 if (ring->rp >= (ring->base + ring->len)) in mhi_del_ring_element()
266 ring->rp = ring->base; in mhi_del_ring_element()
273 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && in is_valid_ring_ptr()
274 !(addr & (sizeof(struct mhi_ring_element) - 1)); in is_valid_ring_ptr()
284 if (dev->bus != &mhi_bus_type) in mhi_destroy_device()
288 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_destroy_device()
291 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_destroy_device()
294 ul_chan = mhi_dev->ul_chan; in mhi_destroy_device()
295 dl_chan = mhi_dev->dl_chan; in mhi_destroy_device()
309 * be sure that there will be no instances of mhi_dev left after in mhi_destroy_device()
313 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
316 put_device(&ul_chan->mhi_dev->dev); in mhi_destroy_device()
320 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
323 put_device(&dl_chan->mhi_dev->dev); in mhi_destroy_device()
326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", in mhi_destroy_device()
327 mhi_dev->name); in mhi_destroy_device()
339 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_get_free_desc_count()
341 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_get_free_desc_count()
342 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_get_free_desc_count()
352 if (!mhi_dev->dev.driver) in mhi_notify()
355 mhi_drv = to_mhi_driver(mhi_dev->dev.driver); in mhi_notify()
357 if (mhi_drv->status_cb) in mhi_notify()
358 mhi_drv->status_cb(mhi_dev, cb_reason); in mhi_notify()
367 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_create_devices()
370 mhi_chan = mhi_cntrl->mhi_chan; in mhi_create_devices()
371 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_create_devices()
372 if (!mhi_chan->configured || mhi_chan->mhi_dev || in mhi_create_devices()
373 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) in mhi_create_devices()
379 mhi_dev->dev_type = MHI_DEVICE_XFER; in mhi_create_devices()
380 switch (mhi_chan->dir) { in mhi_create_devices()
382 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
383 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
387 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
388 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
392 put_device(&mhi_dev->dev); in mhi_create_devices()
396 get_device(&mhi_dev->dev); in mhi_create_devices()
397 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
400 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { in mhi_create_devices()
401 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { in mhi_create_devices()
404 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_create_devices()
405 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
406 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
408 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
409 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
411 get_device(&mhi_dev->dev); in mhi_create_devices()
412 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
417 mhi_dev->name = mhi_chan->name; in mhi_create_devices()
418 dev_set_name(&mhi_dev->dev, "%s_%s", in mhi_create_devices()
419 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_create_devices()
420 mhi_dev->name); in mhi_create_devices()
423 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) in mhi_create_devices()
424 device_init_wakeup(&mhi_dev->dev, true); in mhi_create_devices()
426 ret = device_add(&mhi_dev->dev); in mhi_create_devices()
428 put_device(&mhi_dev->dev); in mhi_create_devices()
435 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_irq_handler()
437 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_irq_handler()
446 if (!mhi_cntrl->mhi_ctxt) { in mhi_irq_handler()
447 dev_dbg(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
452 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_irq_handler()
453 ptr = le64_to_cpu(er_ctxt->rp); in mhi_irq_handler()
456 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
464 if (ev_ring->rp == dev_rp) in mhi_irq_handler()
468 if (mhi_event->cl_manage) { in mhi_irq_handler()
469 struct mhi_chan *mhi_chan = mhi_event->mhi_chan; in mhi_irq_handler()
470 struct mhi_device *mhi_dev = mhi_chan->mhi_dev; in mhi_irq_handler()
475 tasklet_schedule(&mhi_event->task); in mhi_irq_handler()
484 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_intvec_threaded_handler()
489 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_intvec_threaded_handler()
491 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
504 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { in mhi_intvec_threaded_handler()
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_intvec_threaded_handler()
514 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
515 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); in mhi_intvec_threaded_handler()
522 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
523 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
527 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
542 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_handler()
550 /* Update the WP */ in mhi_recycle_ev_ring_element()
551 ring->wp += ring->el_size; in mhi_recycle_ev_ring_element()
553 if (ring->wp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
554 ring->wp = ring->base; in mhi_recycle_ev_ring_element()
556 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); in mhi_recycle_ev_ring_element()
559 ring->rp += ring->el_size; in mhi_recycle_ev_ring_element()
560 if (ring->rp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
561 ring->rp = ring->base; in mhi_recycle_ev_ring_element()
572 struct device *dev = &mhi_cntrl->mhi_dev->dev; in parse_xfer_event()
578 buf_ring = &mhi_chan->buf_ring; in parse_xfer_event()
579 tre_ring = &mhi_chan->tre_ring; in parse_xfer_event()
582 -EOVERFLOW : 0; in parse_xfer_event()
591 write_lock_irqsave(&mhi_chan->lock, flags); in parse_xfer_event()
593 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
595 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_xfer_event()
610 dev_err(&mhi_cntrl->mhi_dev->dev, in parse_xfer_event()
618 if (dev_rp >= (tre_ring->base + tre_ring->len)) in parse_xfer_event()
619 dev_rp = tre_ring->base; in parse_xfer_event()
621 result.dir = mhi_chan->dir; in parse_xfer_event()
623 local_rp = tre_ring->rp; in parse_xfer_event()
625 buf_info = buf_ring->rp; in parse_xfer_event()
630 xfer_len = buf_info->len; in parse_xfer_event()
632 /* Unmap if it's not pre-mapped by client */ in parse_xfer_event()
633 if (likely(!buf_info->pre_mapped)) in parse_xfer_event()
634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in parse_xfer_event()
636 result.buf_addr = buf_info->cb_buf; in parse_xfer_event()
640 min_t(u16, xfer_len, buf_info->len); in parse_xfer_event()
643 local_rp = tre_ring->rp; in parse_xfer_event()
645 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
648 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_xfer_event()
650 if (mhi_chan->dir == DMA_TO_DEVICE) { in parse_xfer_event()
651 atomic_dec(&mhi_cntrl->pending_pkts); in parse_xfer_event()
653 mhi_cntrl->runtime_put(mhi_cntrl); in parse_xfer_event()
657 * Recycle the buffer if buffer is pre-allocated, in parse_xfer_event()
661 if (mhi_chan->pre_alloc) { in parse_xfer_event()
662 if (mhi_queue_buf(mhi_chan->mhi_dev, in parse_xfer_event()
663 mhi_chan->dir, in parse_xfer_event()
664 buf_info->cb_buf, in parse_xfer_event()
665 buf_info->len, MHI_EOT)) { in parse_xfer_event()
668 mhi_chan->chan); in parse_xfer_event()
669 kfree(buf_info->cb_buf); in parse_xfer_event()
673 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
682 mhi_chan->db_cfg.db_mode = 1; in parse_xfer_event()
683 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
684 if (tre_ring->wp != tre_ring->rp && in parse_xfer_event()
688 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
699 write_unlock_irqrestore(&mhi_chan->lock, flags); in parse_xfer_event()
701 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
717 buf_ring = &mhi_chan->buf_ring; in parse_rsc_event()
718 tre_ring = &mhi_chan->tre_ring; in parse_rsc_event()
725 WARN_ON(cookie >= buf_ring->len); in parse_rsc_event()
727 buf_info = buf_ring->base + cookie; in parse_rsc_event()
730 -EOVERFLOW : 0; in parse_rsc_event()
733 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); in parse_rsc_event()
734 result.buf_addr = buf_info->cb_buf; in parse_rsc_event()
735 result.dir = mhi_chan->dir; in parse_rsc_event()
737 read_lock_bh(&mhi_chan->lock); in parse_rsc_event()
739 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_rsc_event()
742 WARN_ON(!buf_info->used); in parse_rsc_event()
745 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_rsc_event()
751 * receive, so even though completion event is different we can re-use in parse_rsc_event()
755 * Last descriptor host queue is D (WP) and first descriptor in parse_rsc_event()
762 buf_info->used = false; in parse_rsc_event()
765 read_unlock_bh(&mhi_chan->lock); in parse_rsc_event()
774 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_process_cmd_completion()
775 struct mhi_ring *mhi_ring = &cmd_ring->ring; in mhi_process_cmd_completion()
781 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
790 if (chan < mhi_cntrl->max_chan && in mhi_process_cmd_completion()
791 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_cmd_completion()
792 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_cmd_completion()
793 write_lock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
794 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); in mhi_process_cmd_completion()
795 complete(&mhi_chan->completion); in mhi_process_cmd_completion()
796 write_unlock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
798 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
810 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_ctrl_ev_ring()
812 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_ctrl_ev_ring()
814 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_process_ctrl_ev_ring()
817 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
824 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_ctrl_ev_ring()
825 return -EIO; in mhi_process_ctrl_ev_ring()
828 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
830 return -EIO; in mhi_process_ctrl_ev_ring()
834 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
846 link_info = &mhi_cntrl->mhi_link_info; in mhi_process_ctrl_ev_ring()
847 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
848 link_info->target_link_speed = in mhi_process_ctrl_ev_ring()
850 link_info->target_link_width = in mhi_process_ctrl_ev_ring()
852 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
854 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); in mhi_process_ctrl_ev_ring()
881 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
884 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
918 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_process_ctrl_ev_ring()
919 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
920 mhi_cntrl->ee = event; in mhi_process_ctrl_ev_ring()
921 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
922 wake_up_all(&mhi_cntrl->state_event); in mhi_process_ctrl_ev_ring()
936 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_ctrl_ev_ring()
942 if (chan < mhi_cntrl->max_chan) { in mhi_process_ctrl_ev_ring()
943 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_ctrl_ev_ring()
944 if (!mhi_chan->configured) in mhi_process_ctrl_ev_ring()
955 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
957 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
959 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
961 return -EIO; in mhi_process_ctrl_ev_ring()
968 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
973 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
983 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_data_event_ring()
985 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_data_event_ring()
989 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_data_event_ring()
992 return -EIO; in mhi_process_data_event_ring()
995 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
997 return -EIO; in mhi_process_data_event_ring()
1001 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1010 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_data_event_ring()
1016 if (chan < mhi_cntrl->max_chan && in mhi_process_data_event_ring()
1017 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_data_event_ring()
1018 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_data_event_ring()
1022 event_quota--; in mhi_process_data_event_ring()
1025 event_quota--; in mhi_process_data_event_ring()
1030 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1032 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
1034 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1036 return -EIO; in mhi_process_data_event_ring()
1042 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1047 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1055 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ev_task()
1058 spin_lock_bh(&mhi_event->lock); in mhi_ev_task()
1059 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ev_task()
1060 spin_unlock_bh(&mhi_event->lock); in mhi_ev_task()
1066 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ctrl_ev_task()
1067 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ctrl_ev_task()
1073 * We can check PM state w/o a lock here because there is no way in mhi_ctrl_ev_task()
1074 * PM state can change from reg access valid to no access while this in mhi_ctrl_ev_task()
1077 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ctrl_ev_task()
1089 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ctrl_ev_task()
1092 * We received an IRQ but no events to process, maybe device went to in mhi_ctrl_ev_task()
1096 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1103 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1112 void *tmp = ring->wp + ring->el_size; in mhi_is_ring_full()
1114 if (tmp >= (ring->base + ring->len)) in mhi_is_ring_full()
1115 tmp = ring->base; in mhi_is_ring_full()
1117 return (tmp == ring->rp); in mhi_is_ring_full()
1123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue()
1124 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue()
1125 mhi_dev->dl_chan; in mhi_queue()
1126 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue()
1130 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) in mhi_queue()
1131 return -EIO; in mhi_queue()
1135 return -EAGAIN; in mhi_queue()
1141 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1144 * for host->device buffer, balanced put is done on buffer completion in mhi_queue()
1145 * for device->host buffer, balanced put is after ringing the DB in mhi_queue()
1147 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_queue()
1150 mhi_cntrl->wake_toggle(mhi_cntrl); in mhi_queue()
1152 if (mhi_chan->dir == DMA_TO_DEVICE) in mhi_queue()
1153 atomic_inc(&mhi_cntrl->pending_pkts); in mhi_queue()
1159 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_queue()
1161 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1169 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_skb()
1170 mhi_dev->dl_chan; in mhi_queue_skb()
1173 buf_info.v_addr = skb->data; in mhi_queue_skb()
1177 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_skb()
1178 return -EINVAL; in mhi_queue_skb()
1187 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_dma()
1188 mhi_dev->dl_chan; in mhi_queue_dma()
1191 buf_info.p_addr = mhi_buf->dma_addr; in mhi_queue_dma()
1196 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_dma()
1197 return -EINVAL; in mhi_queue_dma()
1212 /* Protect accesses for reading and incrementing WP */ in mhi_gen_tre()
1213 write_lock_bh(&mhi_chan->lock); in mhi_gen_tre()
1215 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { in mhi_gen_tre()
1216 ret = -ENODEV; in mhi_gen_tre()
1220 buf_ring = &mhi_chan->buf_ring; in mhi_gen_tre()
1221 tre_ring = &mhi_chan->tre_ring; in mhi_gen_tre()
1223 buf_info = buf_ring->wp; in mhi_gen_tre()
1224 WARN_ON(buf_info->used); in mhi_gen_tre()
1225 buf_info->pre_mapped = info->pre_mapped; in mhi_gen_tre()
1226 if (info->pre_mapped) in mhi_gen_tre()
1227 buf_info->p_addr = info->p_addr; in mhi_gen_tre()
1229 buf_info->v_addr = info->v_addr; in mhi_gen_tre()
1230 buf_info->cb_buf = info->cb_buf; in mhi_gen_tre()
1231 buf_info->wp = tre_ring->wp; in mhi_gen_tre()
1232 buf_info->dir = mhi_chan->dir; in mhi_gen_tre()
1233 buf_info->len = info->len; in mhi_gen_tre()
1235 if (!info->pre_mapped) { in mhi_gen_tre()
1236 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); in mhi_gen_tre()
1244 bei = !!(mhi_chan->intmod); in mhi_gen_tre()
1246 mhi_tre = tre_ring->wp; in mhi_gen_tre()
1247 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); in mhi_gen_tre()
1248 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); in mhi_gen_tre()
1249 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); in mhi_gen_tre()
1252 /* increment WP */ in mhi_gen_tre()
1257 write_unlock_bh(&mhi_chan->lock); in mhi_gen_tre()
1277 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue_is_full()
1279 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_queue_is_full()
1280 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue_is_full()
1291 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_send_cmd()
1292 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_send_cmd()
1293 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_send_cmd()
1297 chan = mhi_chan->chan; in mhi_send_cmd()
1299 spin_lock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1301 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1302 return -ENOMEM; in mhi_send_cmd()
1306 cmd_tre = ring->wp; in mhi_send_cmd()
1309 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; in mhi_send_cmd()
1310 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; in mhi_send_cmd()
1311 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); in mhi_send_cmd()
1314 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; in mhi_send_cmd()
1315 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; in mhi_send_cmd()
1316 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); in mhi_send_cmd()
1319 cmd_tre->ptr = MHI_TRE_CMD_START_PTR; in mhi_send_cmd()
1320 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; in mhi_send_cmd()
1321 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); in mhi_send_cmd()
1330 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1333 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1334 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1343 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_update_channel_state()
1350 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1351 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1352 mhi_chan->ch_state != MHI_CH_STATE_ENABLED && in mhi_update_channel_state()
1353 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { in mhi_update_channel_state()
1354 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1355 return -EINVAL; in mhi_update_channel_state()
1357 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_update_channel_state()
1358 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1363 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in mhi_update_channel_state()
1364 return -EINVAL; in mhi_update_channel_state()
1369 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1370 mhi_chan->ch_state != MHI_CH_STATE_DISABLED) in mhi_update_channel_state()
1371 return -EINVAL; in mhi_update_channel_state()
1377 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1378 return -EINVAL; in mhi_update_channel_state()
1382 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1385 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_update_channel_state()
1387 reinit_completion(&mhi_chan->completion); in mhi_update_channel_state()
1391 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1395 ret = wait_for_completion_timeout(&mhi_chan->completion, in mhi_update_channel_state()
1396 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_update_channel_state()
1397 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { in mhi_update_channel_state()
1400 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1401 ret = -EIO; in mhi_update_channel_state()
1408 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1409 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? in mhi_update_channel_state()
1411 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1416 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_update_channel_state()
1417 mhi_device_put(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1426 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_unprepare_channel()
1428 mutex_lock(&mhi_chan->mutex); in mhi_unprepare_channel()
1430 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_unprepare_channel()
1432 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_unprepare_channel()
1436 /* no more processing events for this channel */ in mhi_unprepare_channel()
1441 mhi_chan->chan); in mhi_unprepare_channel()
1444 write_lock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1445 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_unprepare_channel()
1446 write_unlock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1448 if (!mhi_chan->offload_ch) { in mhi_unprepare_channel()
1452 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); in mhi_unprepare_channel()
1454 mutex_unlock(&mhi_chan->mutex); in mhi_unprepare_channel()
1461 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_prepare_channel()
1463 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_prepare_channel()
1465 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_prepare_channel()
1466 return -ENOTCONN; in mhi_prepare_channel()
1469 mutex_lock(&mhi_chan->mutex); in mhi_prepare_channel()
1472 if (!mhi_chan->offload_ch) { in mhi_prepare_channel()
1483 if (mhi_chan->dir == DMA_FROM_DEVICE) in mhi_prepare_channel()
1484 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); in mhi_prepare_channel()
1486 /* Pre-allocate buffer for xfer ring */ in mhi_prepare_channel()
1487 if (mhi_chan->pre_alloc) { in mhi_prepare_channel()
1489 &mhi_chan->tre_ring); in mhi_prepare_channel()
1490 size_t len = mhi_cntrl->buffer_len; in mhi_prepare_channel()
1492 while (nr_el--) { in mhi_prepare_channel()
1498 ret = -ENOMEM; in mhi_prepare_channel()
1513 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1515 read_lock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1517 read_unlock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1519 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1522 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1527 if (!mhi_chan->offload_ch) in mhi_prepare_channel()
1531 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1536 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1550 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_mark_stale_events()
1556 ev_ring = &mhi_event->ring; in mhi_mark_stale_events()
1559 spin_lock_irqsave(&mhi_event->lock, flags); in mhi_mark_stale_events()
1561 ptr = le64_to_cpu(er_ctxt->rp); in mhi_mark_stale_events()
1563 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_mark_stale_events()
1565 dev_rp = ev_ring->rp; in mhi_mark_stale_events()
1570 local_rp = ev_ring->rp; in mhi_mark_stale_events()
1574 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, in mhi_mark_stale_events()
1577 if (local_rp == (ev_ring->base + ev_ring->len)) in mhi_mark_stale_events()
1578 local_rp = ev_ring->base; in mhi_mark_stale_events()
1582 spin_unlock_irqrestore(&mhi_event->lock, flags); in mhi_mark_stale_events()
1592 buf_ring = &mhi_chan->buf_ring; in mhi_reset_data_chan()
1593 tre_ring = &mhi_chan->tre_ring; in mhi_reset_data_chan()
1594 result.transaction_status = -ENOTCONN; in mhi_reset_data_chan()
1596 while (tre_ring->rp != tre_ring->wp) { in mhi_reset_data_chan()
1597 struct mhi_buf_info *buf_info = buf_ring->rp; in mhi_reset_data_chan()
1599 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_reset_data_chan()
1600 atomic_dec(&mhi_cntrl->pending_pkts); in mhi_reset_data_chan()
1602 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_reset_data_chan()
1605 if (!buf_info->pre_mapped) in mhi_reset_data_chan()
1606 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in mhi_reset_data_chan()
1611 if (mhi_chan->pre_alloc) { in mhi_reset_data_chan()
1612 kfree(buf_info->cb_buf); in mhi_reset_data_chan()
1614 result.buf_addr = buf_info->cb_buf; in mhi_reset_data_chan()
1615 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_reset_data_chan()
1624 int chan = mhi_chan->chan; in mhi_reset_chan()
1627 if (mhi_chan->offload_ch) in mhi_reset_chan()
1630 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1631 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_reset_chan()
1632 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; in mhi_reset_chan()
1638 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1644 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in __mhi_prepare_for_transfer()
1648 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1660 for (--dir; dir >= 0; dir--) { in __mhi_prepare_for_transfer()
1661 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1685 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_unprepare_from_transfer()
1690 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_unprepare_from_transfer()
1701 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_get_channel_doorbell_offset()
1702 void __iomem *base = mhi_cntrl->regs; in mhi_get_channel_doorbell_offset()
1708 return -EIO; in mhi_get_channel_doorbell_offset()