Lines Matching full:md

83 	struct t7xx_modem *md = t7xx_dev->md;  in t7xx_pci_mhccif_isr()  local
89 ctl = md->fsm_ctl; in t7xx_pci_mhccif_isr()
92 "MHCCIF interrupt received before initializing MD monitor\n"); in t7xx_pci_mhccif_isr()
96 spin_lock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr()
98 md->exp_id |= int_sta; in t7xx_pci_mhccif_isr()
99 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_pci_mhccif_isr()
104 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_pci_mhccif_isr()
107 } else if (md->exp_id & D2H_INT_PORT_ENUM) { in t7xx_pci_mhccif_isr()
108 md->exp_id &= ~D2H_INT_PORT_ENUM; in t7xx_pci_mhccif_isr()
115 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { in t7xx_pci_mhccif_isr()
116 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_pci_mhccif_isr()
117 queue_work(md->handshake_wq, &md->handshake_work); in t7xx_pci_mhccif_isr()
120 spin_unlock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr()
249 modem = t7xx_dev->md; in t7xx_rgu_isr_handler()
309 /* We do not want to get CLDMA IRQ when MD is in t7xx_cldma_exception()
331 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) in t7xx_md_exception() argument
333 struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; in t7xx_md_exception()
338 t7xx_port_proxy_reset(md->port_prox); in t7xx_md_exception()
341 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); in t7xx_md_exception()
342 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage); in t7xx_md_exception()
350 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) in t7xx_wait_hif_ex_hk_event() argument
355 if (md->exp_id & event_id) in t7xx_wait_hif_ex_hk_event()
367 /* Register the MHCCIF ISR for MD exception, port enum and in t7xx_md_sys_sw_init()
410 /* Parse MD runtime data query */ in t7xx_prepare_device_rt_data()
472 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); in t7xx_parse_host_rt_data()
478 static int t7xx_core_reset(struct t7xx_modem *md) in t7xx_core_reset() argument
480 struct device *dev = &md->t7xx_dev->pdev->dev; in t7xx_core_reset()
481 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_core_reset()
483 md->core_md.ready = false; in t7xx_core_reset()
490 if (md->core_md.handshake_ongoing) { in t7xx_core_reset()
497 md->core_md.handshake_ongoing = false; in t7xx_core_reset()
501 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info, in t7xx_core_hk_handler() argument
507 struct device *dev = &md->t7xx_dev->pdev->dev; in t7xx_core_hk_handler()
567 struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); in t7xx_md_hk_wq() local
568 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_md_hk_wq()
572 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG); in t7xx_md_hk_wq()
573 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_hk_wq()
575 md->core_md.handshake_ongoing = true; in t7xx_md_hk_wq()
576 t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); in t7xx_md_hk_wq()
581 struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work); in t7xx_ap_hk_wq() local
582 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_ap_hk_wq()
586 t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); in t7xx_ap_hk_wq()
587 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG); in t7xx_ap_hk_wq()
588 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); in t7xx_ap_hk_wq()
589 md->core_ap.handshake_ongoing = true; in t7xx_ap_hk_wq()
590 t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); in t7xx_ap_hk_wq()
593 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) in t7xx_md_event_notify() argument
595 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_md_event_notify()
601 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK | in t7xx_md_event_notify()
606 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); in t7xx_md_event_notify()
608 spin_lock_irqsave(&md->exp_lock, flags); in t7xx_md_event_notify()
609 int_sta = t7xx_get_interrupt_status(md->t7xx_dev); in t7xx_md_event_notify()
610 md->exp_id |= int_sta; in t7xx_md_event_notify()
611 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_md_event_notify()
613 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_md_event_notify()
614 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_md_event_notify()
615 md->exp_id &= ~D2H_INT_ASYNC_AP_HK; in t7xx_md_event_notify()
617 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_md_event_notify()
618 md->exp_id &= ~D2H_INT_ASYNC_AP_HK; in t7xx_md_event_notify()
620 void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; in t7xx_md_event_notify()
622 if (md->exp_id & D2H_INT_ASYNC_MD_HK) { in t7xx_md_event_notify()
623 queue_work(md->handshake_wq, &md->handshake_work); in t7xx_md_event_notify()
624 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_md_event_notify()
626 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); in t7xx_md_event_notify()
629 if (md->exp_id & D2H_INT_ASYNC_AP_HK) { in t7xx_md_event_notify()
630 queue_work(md->handshake_wq, &md->ap_handshake_work); in t7xx_md_event_notify()
631 md->exp_id &= ~D2H_INT_ASYNC_AP_HK; in t7xx_md_event_notify()
633 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); in t7xx_md_event_notify()
636 spin_unlock_irqrestore(&md->exp_lock, flags); in t7xx_md_event_notify()
638 t7xx_mhccif_mask_clr(md->t7xx_dev, in t7xx_md_event_notify()
646 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); in t7xx_md_event_notify()
647 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); in t7xx_md_event_notify()
655 void t7xx_md_exception_handshake(struct t7xx_modem *md) in t7xx_md_exception_handshake() argument
657 struct device *dev = &md->t7xx_dev->pdev->dev; in t7xx_md_exception_handshake()
660 t7xx_md_exception(md, HIF_EX_INIT); in t7xx_md_exception_handshake()
661 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE); in t7xx_md_exception_handshake()
665 t7xx_md_exception(md, HIF_EX_INIT_DONE); in t7xx_md_exception_handshake()
666 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE); in t7xx_md_exception_handshake()
670 t7xx_md_exception(md, HIF_EX_CLEARQ_DONE); in t7xx_md_exception_handshake()
671 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET); in t7xx_md_exception_handshake()
675 t7xx_md_exception(md, HIF_EX_ALLQ_RESET); in t7xx_md_exception_handshake()
681 struct t7xx_modem *md; in t7xx_md_alloc() local
683 md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL); in t7xx_md_alloc()
684 if (!md) in t7xx_md_alloc()
687 md->t7xx_dev = t7xx_dev; in t7xx_md_alloc()
688 t7xx_dev->md = md; in t7xx_md_alloc()
689 spin_lock_init(&md->exp_lock); in t7xx_md_alloc()
690 md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, in t7xx_md_alloc()
692 if (!md->handshake_wq) in t7xx_md_alloc()
695 INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); in t7xx_md_alloc()
696 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; in t7xx_md_alloc()
697 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= in t7xx_md_alloc()
700 INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq); in t7xx_md_alloc()
701 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK; in t7xx_md_alloc()
702 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |= in t7xx_md_alloc()
705 return md; in t7xx_md_alloc()
710 struct t7xx_modem *md = t7xx_dev->md; in t7xx_md_reset() local
712 md->md_init_finish = false; in t7xx_md_reset()
713 md->exp_id = 0; in t7xx_md_reset()
714 t7xx_fsm_reset(md); in t7xx_md_reset()
715 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_reset()
716 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_reset()
717 t7xx_port_proxy_reset(md->port_prox); in t7xx_md_reset()
718 md->md_init_finish = true; in t7xx_md_reset()
719 return t7xx_core_reset(md); in t7xx_md_reset()
726 * Allocate and initialize MD control block, and initialize data path.
735 struct t7xx_modem *md; in t7xx_md_init() local
738 md = t7xx_md_alloc(t7xx_dev); in t7xx_md_init()
739 if (!md) in t7xx_md_init()
750 ret = t7xx_fsm_init(md); in t7xx_md_init()
758 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_init()
762 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_init()
766 ret = t7xx_port_proxy_init(md); in t7xx_md_init()
770 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); in t7xx_md_init()
775 md->md_init_finish = true; in t7xx_md_init()
779 t7xx_port_proxy_uninit(md->port_prox); in t7xx_md_init()
782 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_init()
785 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_init()
791 t7xx_fsm_uninit(md); in t7xx_md_init()
794 destroy_workqueue(md->handshake_wq); in t7xx_md_init()
802 struct t7xx_modem *md = t7xx_dev->md; in t7xx_md_exit() local
806 if (!md->md_init_finish) in t7xx_md_exit()
810 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); in t7xx_md_exit()
811 t7xx_port_proxy_uninit(md->port_prox); in t7xx_md_exit()
812 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_exit()
813 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_exit()
815 t7xx_fsm_uninit(md); in t7xx_md_exit()
816 destroy_workqueue(md->handshake_wq); in t7xx_md_exit()