Lines Matching full:rx_chn

671 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)  in k3_udma_glue_cfg_rx_chn()  argument
673 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_chn()
685 req.index = rx_chn->udma_rchan_id; in k3_udma_glue_cfg_rx_chn()
686 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_rx_chn()
690 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); in k3_udma_glue_cfg_rx_chn()
693 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num && in k3_udma_glue_cfg_rx_chn()
694 rx_chn->flow_id_base != rx_chn->udma_rchan_id) { in k3_udma_glue_cfg_rx_chn()
698 req.flowid_start = rx_chn->flow_id_base; in k3_udma_glue_cfg_rx_chn()
699 req.flowid_cnt = rx_chn->flow_num; in k3_udma_glue_cfg_rx_chn()
702 req.rx_atype = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_chn()
706 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", in k3_udma_glue_cfg_rx_chn()
707 rx_chn->udma_rchan_id, ret); in k3_udma_glue_cfg_rx_chn()
712 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_release_rx_flow() argument
715 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
726 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_release_rx_flow()
728 rx_chn->flows_ready--; in k3_udma_glue_release_rx_flow()
731 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_cfg_rx_flow() argument
735 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
736 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_flow()
737 struct device *dev = rx_chn->common.dev; in k3_udma_glue_cfg_rx_flow()
743 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, in k3_udma_glue_cfg_rx_flow()
756 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_cfg_rx_flow()
758 xudma_get_rflow_ring_offset(rx_chn->common.udmax); in k3_udma_glue_cfg_rx_flow()
766 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, in k3_udma_glue_cfg_rx_flow()
776 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn); in k3_udma_glue_cfg_rx_flow()
780 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_cfg_rx_flow()
781 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_flow()
782 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_flow()
797 if (rx_chn->remote) { in k3_udma_glue_cfg_rx_flow()
823 if (rx_chn->common.epib) in k3_udma_glue_cfg_rx_flow()
825 if (rx_chn->common.psdata_size) in k3_udma_glue_cfg_rx_flow()
847 rx_chn->flows_ready++; in k3_udma_glue_cfg_rx_flow()
849 flow->udma_rflow_id, rx_chn->flows_ready); in k3_udma_glue_cfg_rx_flow()
858 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
910 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_allocate_rx_flows() argument
920 if (rx_chn->flow_id_base != -1 && in k3_udma_glue_allocate_rx_flows()
921 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_allocate_rx_flows()
925 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_allocate_rx_flows()
926 rx_chn->flow_id_base, in k3_udma_glue_allocate_rx_flows()
927 rx_chn->flow_num); in k3_udma_glue_allocate_rx_flows()
929 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", in k3_udma_glue_allocate_rx_flows()
930 rx_chn->flow_id_base, rx_chn->flow_num, ret); in k3_udma_glue_allocate_rx_flows()
933 rx_chn->flow_id_base = ret; in k3_udma_glue_allocate_rx_flows()
942 struct k3_udma_glue_rx_channel *rx_chn; in k3_udma_glue_request_rx_chn_priv() local
953 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
954 if (!rx_chn) in k3_udma_glue_request_rx_chn_priv()
957 rx_chn->common.dev = dev; in k3_udma_glue_request_rx_chn_priv()
958 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_rx_chn_priv()
959 rx_chn->remote = false; in k3_udma_glue_request_rx_chn_priv()
963 &rx_chn->common, false); in k3_udma_glue_request_rx_chn_priv()
967 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_rx_chn_priv()
968 rx_chn->common.psdata_size, in k3_udma_glue_request_rx_chn_priv()
969 rx_chn->common.swdata_size); in k3_udma_glue_request_rx_chn_priv()
971 ep_cfg = rx_chn->common.ep_config; in k3_udma_glue_request_rx_chn_priv()
973 if (xudma_is_pktdma(rx_chn->common.udmax)) in k3_udma_glue_request_rx_chn_priv()
974 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; in k3_udma_glue_request_rx_chn_priv()
976 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_rx_chn_priv()
979 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, in k3_udma_glue_request_rx_chn_priv()
980 rx_chn->udma_rchan_id); in k3_udma_glue_request_rx_chn_priv()
981 if (IS_ERR(rx_chn->udma_rchanx)) { in k3_udma_glue_request_rx_chn_priv()
982 ret = PTR_ERR(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
986 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
988 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_rx_chn_priv()
989 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_request_rx_chn_priv()
990 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x", in k3_udma_glue_request_rx_chn_priv()
991 rx_chn->udma_rchan_id, rx_chn->common.src_thread); in k3_udma_glue_request_rx_chn_priv()
992 ret = device_register(&rx_chn->common.chan_dev); in k3_udma_glue_request_rx_chn_priv()
995 put_device(&rx_chn->common.chan_dev); in k3_udma_glue_request_rx_chn_priv()
996 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_rx_chn_priv()
1000 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_rx_chn_priv()
1002 rx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_rx_chn_priv()
1003 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, in k3_udma_glue_request_rx_chn_priv()
1007 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_rx_chn_priv()
1021 rx_chn->flow_id_base = flow_start; in k3_udma_glue_request_rx_chn_priv()
1023 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
1027 rx_chn->flow_id_base = rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
1030 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_rx_chn_priv()
1032 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
1033 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
1034 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
1039 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); in k3_udma_glue_request_rx_chn_priv()
1043 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_rx_chn_priv()
1044 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
1047 rx_chn->common.dst_thread = in k3_udma_glue_request_rx_chn_priv()
1048 xudma_dev_get_psil_base(rx_chn->common.udmax) + in k3_udma_glue_request_rx_chn_priv()
1049 rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
1051 ret = k3_udma_glue_cfg_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
1059 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); in k3_udma_glue_request_rx_chn_priv()
1064 k3_udma_glue_dump_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
1066 return rx_chn; in k3_udma_glue_request_rx_chn_priv()
1069 k3_udma_glue_release_rx_chn(rx_chn); in k3_udma_glue_request_rx_chn_priv()
1074 k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_request_remote_rx_chn_common() argument
1080 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_remote_rx_chn_common()
1081 rx_chn->common.psdata_size, in k3_udma_glue_request_remote_rx_chn_common()
1082 rx_chn->common.swdata_size); in k3_udma_glue_request_remote_rx_chn_common()
1084 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn_common()
1085 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn_common()
1086 if (!rx_chn->flows) in k3_udma_glue_request_remote_rx_chn_common()
1089 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_remote_rx_chn_common()
1090 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_request_remote_rx_chn_common()
1091 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x", in k3_udma_glue_request_remote_rx_chn_common()
1092 rx_chn->common.src_thread, rx_chn->flow_id_base); in k3_udma_glue_request_remote_rx_chn_common()
1093 ret = device_register(&rx_chn->common.chan_dev); in k3_udma_glue_request_remote_rx_chn_common()
1096 put_device(&rx_chn->common.chan_dev); in k3_udma_glue_request_remote_rx_chn_common()
1097 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_remote_rx_chn_common()
1101 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_remote_rx_chn_common()
1103 rx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_remote_rx_chn_common()
1104 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, in k3_udma_glue_request_remote_rx_chn_common()
1108 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); in k3_udma_glue_request_remote_rx_chn_common()
1112 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_remote_rx_chn_common()
1113 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn_common()
1115 k3_udma_glue_dump_rx_chn(rx_chn); in k3_udma_glue_request_remote_rx_chn_common()
1124 struct k3_udma_glue_rx_channel *rx_chn; in k3_udma_glue_request_remote_rx_chn() local
1138 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn()
1139 if (!rx_chn) in k3_udma_glue_request_remote_rx_chn()
1142 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn()
1143 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn()
1144 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn()
1145 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn()
1146 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn()
1147 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn()
1148 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn()
1152 &rx_chn->common, false); in k3_udma_glue_request_remote_rx_chn()
1156 ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev); in k3_udma_glue_request_remote_rx_chn()
1160 return rx_chn; in k3_udma_glue_request_remote_rx_chn()
1163 k3_udma_glue_release_rx_chn(rx_chn); in k3_udma_glue_request_remote_rx_chn()
1172 struct k3_udma_glue_rx_channel *rx_chn; in k3_udma_glue_request_remote_rx_chn_for_thread_id() local
1186 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1187 if (!rx_chn) in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1190 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1191 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1192 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1193 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1194 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1195 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1196 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1198 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1202 ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1206 return rx_chn; in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1209 k3_udma_glue_release_rx_chn(rx_chn); in k3_udma_glue_request_remote_rx_chn_for_thread_id()
1225 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_release_rx_chn() argument
1229 if (IS_ERR_OR_NULL(rx_chn->common.udmax)) in k3_udma_glue_release_rx_chn()
1232 if (rx_chn->psil_paired) { in k3_udma_glue_release_rx_chn()
1233 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1234 rx_chn->common.src_thread, in k3_udma_glue_release_rx_chn()
1235 rx_chn->common.dst_thread); in k3_udma_glue_release_rx_chn()
1236 rx_chn->psil_paired = false; in k3_udma_glue_release_rx_chn()
1239 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_release_rx_chn()
1240 k3_udma_glue_release_rx_flow(rx_chn, i); in k3_udma_glue_release_rx_chn()
1242 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_release_rx_chn()
1243 xudma_free_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1244 rx_chn->flow_id_base, in k3_udma_glue_release_rx_chn()
1245 rx_chn->flow_num); in k3_udma_glue_release_rx_chn()
1247 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) in k3_udma_glue_release_rx_chn()
1248 xudma_rchan_put(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1249 rx_chn->udma_rchanx); in k3_udma_glue_release_rx_chn()
1251 if (rx_chn->common.chan_dev.parent) { in k3_udma_glue_release_rx_chn()
1252 device_unregister(&rx_chn->common.chan_dev); in k3_udma_glue_release_rx_chn()
1253 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_release_rx_chn()
1258 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_init() argument
1262 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_init()
1265 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); in k3_udma_glue_rx_flow_init()
1269 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_get_fdq_id() argument
1274 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_get_fdq_id()
1277 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id()
1283 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_rx_get_flow_id_base() argument
1285 return rx_chn->flow_id_base; in k3_udma_glue_rx_get_flow_id_base()
1289 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_enable() argument
1292 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_enable()
1293 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_enable()
1294 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_enable()
1300 if (!rx_chn->remote) in k3_udma_glue_rx_flow_enable()
1332 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_flow_disable() argument
1335 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_disable()
1336 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_disable()
1337 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_disable()
1341 if (!rx_chn->remote) in k3_udma_glue_rx_flow_disable()
1369 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_enable_rx_chn() argument
1373 if (rx_chn->remote) in k3_udma_glue_enable_rx_chn()
1376 if (rx_chn->flows_ready < rx_chn->flow_num) in k3_udma_glue_enable_rx_chn()
1379 ret = xudma_navss_psil_pair(rx_chn->common.udmax, in k3_udma_glue_enable_rx_chn()
1380 rx_chn->common.src_thread, in k3_udma_glue_enable_rx_chn()
1381 rx_chn->common.dst_thread); in k3_udma_glue_enable_rx_chn()
1383 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret); in k3_udma_glue_enable_rx_chn()
1387 rx_chn->psil_paired = true; in k3_udma_glue_enable_rx_chn()
1389 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_rx_chn()
1392 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_rx_chn()
1395 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); in k3_udma_glue_enable_rx_chn()
1400 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_disable_rx_chn() argument
1402 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); in k3_udma_glue_disable_rx_chn()
1404 xudma_rchanrt_write(rx_chn->udma_rchanx, in k3_udma_glue_disable_rx_chn()
1406 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_rx_chn()
1408 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); in k3_udma_glue_disable_rx_chn()
1410 if (rx_chn->psil_paired) { in k3_udma_glue_disable_rx_chn()
1411 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_disable_rx_chn()
1412 rx_chn->common.src_thread, in k3_udma_glue_disable_rx_chn()
1413 rx_chn->common.dst_thread); in k3_udma_glue_disable_rx_chn()
1414 rx_chn->psil_paired = false; in k3_udma_glue_disable_rx_chn()
1419 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_tdown_rx_chn() argument
1425 if (rx_chn->remote) in k3_udma_glue_tdown_rx_chn()
1428 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); in k3_udma_glue_tdown_rx_chn()
1430 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_tdown_rx_chn()
1433 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_rx_chn()
1436 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1440 dev_err(rx_chn->common.dev, "RX tdown timeout\n"); in k3_udma_glue_tdown_rx_chn()
1446 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1449 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_rx_chn()
1450 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); in k3_udma_glue_tdown_rx_chn()
1454 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_reset_rx_chn() argument
1458 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_reset_rx_chn()
1459 struct device *dev = rx_chn->common.dev; in k3_udma_glue_reset_rx_chn()
1498 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_push_rx_chn() argument
1502 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_push_rx_chn()
1508 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_pop_rx_chn() argument
1511 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_pop_rx_chn()
1517 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_get_irq() argument
1522 flow = &rx_chn->flows[flow_num]; in k3_udma_glue_rx_get_irq()
1524 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_rx_get_irq()
1525 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax, in k3_udma_glue_rx_get_irq()
1539 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn) in k3_udma_glue_rx_get_dma_device() argument
1541 if (xudma_is_pktdma(rx_chn->common.udmax) && in k3_udma_glue_rx_get_dma_device()
1542 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15)) in k3_udma_glue_rx_get_dma_device()
1543 return &rx_chn->common.chan_dev; in k3_udma_glue_rx_get_dma_device()
1545 return xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_rx_get_dma_device()
1549 void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_dma_to_cppi5_addr() argument
1552 if (!xudma_is_pktdma(rx_chn->common.udmax) || in k3_udma_glue_rx_dma_to_cppi5_addr()
1553 !rx_chn->common.atype_asel) in k3_udma_glue_rx_dma_to_cppi5_addr()
1556 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; in k3_udma_glue_rx_dma_to_cppi5_addr()
1560 void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn, in k3_udma_glue_rx_cppi5_to_dma_addr() argument
1563 if (!xudma_is_pktdma(rx_chn->common.udmax) || in k3_udma_glue_rx_cppi5_to_dma_addr()
1564 !rx_chn->common.atype_asel) in k3_udma_glue_rx_cppi5_to_dma_addr()