Lines Matching full:tunnel

17 #include "tunnel.h"
24 * retried if the DP tunnel is still activating.
175 struct tb_tunnel *tunnel; in tb_discover_dp_resources() local
177 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_discover_dp_resources()
178 if (tb_tunnel_is_dp(tunnel)) in tb_discover_dp_resources()
179 tb_discover_dp_resource(tb, tunnel->dst_port); in tb_discover_dp_resources()
188 const struct tb_tunnel *tunnel; in tb_enable_clx() local
209 * tunnel and in that case bail out. in tb_enable_clx()
211 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_enable_clx()
212 if (tb_tunnel_is_dma(tunnel)) { in tb_enable_clx()
213 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) in tb_enable_clx()
283 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) in tb_increase_tmu_accuracy() argument
287 if (!tunnel) in tb_increase_tmu_accuracy()
291 * Once first DP tunnel is established we change the TMU in tb_increase_tmu_accuracy()
299 sw = tunnel->tb->root_switch; in tb_increase_tmu_accuracy()
386 struct tb_tunnel *tunnel = NULL; in tb_switch_discover_tunnels() local
390 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); in tb_switch_discover_tunnels()
391 tb_increase_tmu_accuracy(tunnel); in tb_switch_discover_tunnels()
395 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); in tb_switch_discover_tunnels()
399 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); in tb_switch_discover_tunnels()
406 if (tunnel) in tb_switch_discover_tunnels()
407 list_add_tail(&tunnel->list, list); in tb_switch_discover_tunnels()
499 struct tb_tunnel *tunnel; in tb_find_tunnel() local
501 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_find_tunnel()
502 if (tunnel->type == type && in tb_find_tunnel()
503 ((src_port && src_port == tunnel->src_port) || in tb_find_tunnel()
504 (dst_port && dst_port == tunnel->dst_port))) { in tb_find_tunnel()
505 return tunnel; in tb_find_tunnel()
549 * from @src_port to @dst_port. Does not take USB3 tunnel starting from
551 * already included in as part of the "first hop" USB3 tunnel.
561 struct tb_tunnel *tunnel; in tb_consumed_usb3_pcie_bandwidth() local
565 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_consumed_usb3_pcie_bandwidth()
566 if (tunnel && !tb_port_is_usb3_down(src_port) && in tb_consumed_usb3_pcie_bandwidth()
570 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up, in tb_consumed_usb3_pcie_bandwidth()
598 * to @dst_port. Does not take tunnel starting from @src_port and ending
614 struct tb_tunnel *tunnel; in tb_consumed_dp_bandwidth() local
624 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_consumed_dp_bandwidth()
628 if (tb_tunnel_is_invalid(tunnel)) in tb_consumed_dp_bandwidth()
631 if (!tb_tunnel_is_dp(tunnel)) in tb_consumed_dp_bandwidth()
634 if (!tb_tunnel_port_on_path(tunnel, port)) in tb_consumed_dp_bandwidth()
642 group = tunnel->src_port->group; in tb_consumed_dp_bandwidth()
647 * Ignore the DP tunnel between src_port and dst_port in tb_consumed_dp_bandwidth()
648 * because it is the same tunnel and we may be in tb_consumed_dp_bandwidth()
651 if (tunnel->src_port == src_port && in tb_consumed_dp_bandwidth()
652 tunnel->dst_port == dst_port) in tb_consumed_dp_bandwidth()
655 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up, in tb_consumed_dp_bandwidth()
866 struct tb_tunnel *tunnel; in tb_release_unused_usb3_bandwidth() local
868 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_release_unused_usb3_bandwidth()
869 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; in tb_release_unused_usb3_bandwidth()
876 struct tb_tunnel *tunnel; in tb_reclaim_usb3_bandwidth() local
878 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_reclaim_usb3_bandwidth()
879 if (!tunnel) in tb_reclaim_usb3_bandwidth()
882 tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n"); in tb_reclaim_usb3_bandwidth()
885 * Calculate available bandwidth for the first hop USB3 tunnel. in tb_reclaim_usb3_bandwidth()
888 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, in tb_reclaim_usb3_bandwidth()
891 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n"); in tb_reclaim_usb3_bandwidth()
895 tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up, in tb_reclaim_usb3_bandwidth()
898 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); in tb_reclaim_usb3_bandwidth()
907 struct tb_tunnel *tunnel; in tb_tunnel_usb3() local
910 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); in tb_tunnel_usb3()
935 * there is no point setting up a new tunnel. in tb_tunnel_usb3()
941 /* Make all unused bandwidth available for the new tunnel */ in tb_tunnel_usb3()
952 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", in tb_tunnel_usb3()
955 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, in tb_tunnel_usb3()
957 if (!tunnel) { in tb_tunnel_usb3()
962 if (tb_tunnel_activate(tunnel)) { in tb_tunnel_usb3()
964 "USB3 tunnel activation failed, aborting\n"); in tb_tunnel_usb3()
969 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_tunnel_usb3()
976 tb_tunnel_put(tunnel); in tb_tunnel_usb3()
1390 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); in tb_scan_port()
1416 struct tb_tunnel *tunnel; in tb_recalc_estimated_bandwidth_for_group() local
1422 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); in tb_recalc_estimated_bandwidth_for_group()
1423 if (WARN_ON(!tunnel)) in tb_recalc_estimated_bandwidth_for_group()
1432 * for each tunnel separately. in tb_recalc_estimated_bandwidth_for_group()
1434 first_tunnel = tunnel; in tb_recalc_estimated_bandwidth_for_group()
1438 tb_tunnel_warn(tunnel, in tb_recalc_estimated_bandwidth_for_group()
1444 out = tunnel->dst_port; in tb_recalc_estimated_bandwidth_for_group()
1448 tb_tunnel_warn(tunnel, in tb_recalc_estimated_bandwidth_for_group()
1455 * - already allocated bandwidth for the DP tunnel in tb_recalc_estimated_bandwidth_for_group()
1459 if (tb_tunnel_direction_downstream(tunnel)) in tb_recalc_estimated_bandwidth_for_group()
1468 tb_tunnel_dbg(tunnel, in tb_recalc_estimated_bandwidth_for_group()
1475 tb_tunnel_warn(tunnel, in tb_recalc_estimated_bandwidth_for_group()
1516 struct tb_tunnel *tunnel; in __configure_group_sym() local
1529 * if the graphics released bandwidth for other tunnel in the in __configure_group_sym()
1533 tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL); in __configure_group_sym()
1534 if (tunnel) in __configure_group_sym()
1535 tb_configure_sym(group->tb, in, tunnel->dst_port, true); in __configure_group_sym()
1597 struct tb_tunnel *tunnel; in tb_attach_bandwidth_group() local
1605 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_attach_bandwidth_group()
1606 if (!tb_tunnel_is_dp(tunnel)) in tb_attach_bandwidth_group()
1609 if (tunnel->src_port->sw == in->sw && in tb_attach_bandwidth_group()
1610 tunnel->dst_port->sw == out->sw) { in tb_attach_bandwidth_group()
1611 group = tunnel->src_port->group; in tb_attach_bandwidth_group()
1668 struct tb_tunnel *tunnel; in tb_discover_tunnels() local
1672 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_discover_tunnels()
1673 if (tb_tunnel_is_pci(tunnel)) { in tb_discover_tunnels()
1674 struct tb_switch *parent = tunnel->dst_port->sw; in tb_discover_tunnels()
1676 while (parent != tunnel->src_port->sw) { in tb_discover_tunnels()
1680 } else if (tb_tunnel_is_dp(tunnel)) { in tb_discover_tunnels()
1681 struct tb_port *in = tunnel->src_port; in tb_discover_tunnels()
1682 struct tb_port *out = tunnel->dst_port; in tb_discover_tunnels()
1693 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) in tb_deactivate_and_free_tunnel() argument
1698 if (!tunnel) in tb_deactivate_and_free_tunnel()
1701 tb_tunnel_deactivate(tunnel); in tb_deactivate_and_free_tunnel()
1702 list_del(&tunnel->list); in tb_deactivate_and_free_tunnel()
1704 tb = tunnel->tb; in tb_deactivate_and_free_tunnel()
1705 src_port = tunnel->src_port; in tb_deactivate_and_free_tunnel()
1706 dst_port = tunnel->dst_port; in tb_deactivate_and_free_tunnel()
1708 switch (tunnel->type) { in tb_deactivate_and_free_tunnel()
1712 * In case of DP tunnel make sure the DP IN resource is in tb_deactivate_and_free_tunnel()
1740 tb_tunnel_put(tunnel); in tb_deactivate_and_free_tunnel()
1749 struct tb_tunnel *tunnel; in tb_free_invalid_tunnels() local
1752 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in tb_free_invalid_tunnels()
1753 if (tb_tunnel_is_invalid(tunnel)) in tb_free_invalid_tunnels()
1754 tb_deactivate_and_free_tunnel(tunnel); in tb_free_invalid_tunnels()
1860 * Keep the DP tunnel under the topology starting from in tb_find_dp_out()
1877 static void tb_dp_tunnel_active(struct tb_tunnel *tunnel, void *data) in tb_dp_tunnel_active() argument
1879 struct tb_port *in = tunnel->src_port; in tb_dp_tunnel_active()
1880 struct tb_port *out = tunnel->dst_port; in tb_dp_tunnel_active()
1884 if (tb_tunnel_is_active(tunnel)) { in tb_dp_tunnel_active()
1887 tb_tunnel_dbg(tunnel, "DPRX capabilities read completed\n"); in tb_dp_tunnel_active()
1889 /* If fail reading tunnel's consumed bandwidth, tear it down */ in tb_dp_tunnel_active()
1890 ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, in tb_dp_tunnel_active()
1893 tb_tunnel_warn(tunnel, in tb_dp_tunnel_active()
1895 tb_deactivate_and_free_tunnel(tunnel); in tb_dp_tunnel_active()
1910 * In case of DP tunnel exists, change host in tb_dp_tunnel_active()
1914 tb_increase_tmu_accuracy(tunnel); in tb_dp_tunnel_active()
1917 struct tb_port *in = tunnel->src_port; in tb_dp_tunnel_active()
1920 * This tunnel failed to establish. This means DPRX in tb_dp_tunnel_active()
1928 * also tear down the tunnel and try to re-use the in tb_dp_tunnel_active()
1934 tb_tunnel_warn(tunnel, "not active, tearing down\n"); in tb_dp_tunnel_active()
1947 struct tb_tunnel *tunnel; in tb_tunnel_one_dp() local
1956 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_tunnel_one_dp()
1957 if (tb_tunnel_is_dp(tunnel)) { in tb_tunnel_one_dp()
1965 * both ends of the tunnel. in tb_tunnel_one_dp()
1969 * tunnel is active. in tb_tunnel_one_dp()
1982 /* Make all unused USB3 bandwidth available for the new DP tunnel */ in tb_tunnel_one_dp()
1994 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", in tb_tunnel_one_dp()
1997 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, in tb_tunnel_one_dp()
2000 if (!tunnel) { in tb_tunnel_one_dp()
2001 tb_port_dbg(out, "could not allocate DP tunnel\n"); in tb_tunnel_one_dp()
2005 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_tunnel_one_dp()
2007 ret = tb_tunnel_activate(tunnel); in tb_tunnel_one_dp()
2009 tb_port_info(out, "DP tunnel activation failed, aborting\n"); in tb_tunnel_one_dp()
2010 list_del(&tunnel->list); in tb_tunnel_one_dp()
2017 tb_tunnel_put(tunnel); in tb_tunnel_one_dp()
2038 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); in tb_tunnel_dp()
2044 * establish a DP tunnel between them. in tb_tunnel_dp()
2151 struct tb_tunnel *tunnel; in tb_dp_resource_unavailable() local
2163 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); in tb_dp_resource_unavailable()
2164 if (tunnel) in tb_dp_resource_unavailable()
2165 tb_deactivate_and_free_tunnel(tunnel); in tb_dp_resource_unavailable()
2172 * to create another tunnel. in tb_dp_resource_unavailable()
2203 struct tb_tunnel *tunnel, *n; in tb_disconnect_and_release_dp() local
2209 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { in tb_disconnect_and_release_dp()
2210 if (tb_tunnel_is_dp(tunnel)) in tb_disconnect_and_release_dp()
2211 tb_deactivate_and_free_tunnel(tunnel); in tb_disconnect_and_release_dp()
2225 struct tb_tunnel *tunnel; in tb_disconnect_pci() local
2232 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); in tb_disconnect_pci()
2233 if (WARN_ON(!tunnel)) in tb_disconnect_pci()
2238 tb_tunnel_deactivate(tunnel); in tb_disconnect_pci()
2239 list_del(&tunnel->list); in tb_disconnect_pci()
2240 tb_tunnel_put(tunnel); in tb_disconnect_pci()
2248 struct tb_tunnel *tunnel; in tb_tunnel_pci() local
2263 tunnel = tb_tunnel_alloc_pci(tb, up, down); in tb_tunnel_pci()
2264 if (!tunnel) in tb_tunnel_pci()
2267 if (tb_tunnel_activate(tunnel)) { in tb_tunnel_pci()
2269 "PCIe tunnel activation failed, aborting\n"); in tb_tunnel_pci()
2270 tb_tunnel_put(tunnel); in tb_tunnel_pci()
2284 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_tunnel_pci()
2294 struct tb_tunnel *tunnel; in tb_approve_xdomain_paths() local
2310 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, in tb_approve_xdomain_paths()
2312 if (!tunnel) { in tb_approve_xdomain_paths()
2317 if (tb_tunnel_activate(tunnel)) { in tb_approve_xdomain_paths()
2319 "DMA tunnel activation failed, aborting\n"); in tb_approve_xdomain_paths()
2324 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_approve_xdomain_paths()
2329 tb_tunnel_put(tunnel); in tb_approve_xdomain_paths()
2343 struct tb_tunnel *tunnel, *n; in __tb_disconnect_xdomain_paths() local
2350 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in __tb_disconnect_xdomain_paths()
2351 if (!tb_tunnel_is_dma(tunnel)) in __tb_disconnect_xdomain_paths()
2353 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) in __tb_disconnect_xdomain_paths()
2356 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, in __tb_disconnect_xdomain_paths()
2358 tb_deactivate_and_free_tunnel(tunnel); in __tb_disconnect_xdomain_paths()
2363 * because we may still have another DMA tunnel active through in __tb_disconnect_xdomain_paths()
2443 /* Maybe we can create another DP tunnel */ in tb_handle_hotplug()
2502 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, in tb_alloc_dp_bandwidth() argument
2509 struct tb *tb = tunnel->tb; in tb_alloc_dp_bandwidth()
2513 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down); in tb_alloc_dp_bandwidth()
2517 in = tunnel->src_port; in tb_alloc_dp_bandwidth()
2518 out = tunnel->dst_port; in tb_alloc_dp_bandwidth()
2520 tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n", in tb_alloc_dp_bandwidth()
2535 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down); in tb_alloc_dp_bandwidth()
2562 tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n", in tb_alloc_dp_bandwidth()
2567 tb_tunnel_dbg(tunnel, in tb_alloc_dp_bandwidth()
2575 downstream = tb_tunnel_direction_downstream(tunnel); in tb_alloc_dp_bandwidth()
2580 if (tunnel->bw_mode) { in tb_alloc_dp_bandwidth()
2584 * what is currently allocated to that tunnel we in tb_alloc_dp_bandwidth()
2585 * simply change the reservation of the tunnel in tb_alloc_dp_bandwidth()
2613 return tb_tunnel_alloc_bandwidth(tunnel, requested_up, in tb_alloc_dp_bandwidth()
2635 tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n", in tb_alloc_dp_bandwidth()
2655 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, in tb_alloc_dp_bandwidth()
2658 tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); in tb_alloc_dp_bandwidth()
2686 tb_tunnel_dbg(tunnel, in tb_alloc_dp_bandwidth()
2689 tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down); in tb_alloc_dp_bandwidth()
2699 struct tb_tunnel *tunnel; in tb_handle_dp_bandwidth_request() local
2726 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); in tb_handle_dp_bandwidth_request()
2727 if (!tunnel) { in tb_handle_dp_bandwidth_request()
2728 tb_port_warn(in, "failed to find tunnel\n"); in tb_handle_dp_bandwidth_request()
2733 if (tunnel->bw_mode) { in tb_handle_dp_bandwidth_request()
2735 * Reset the tunnel back to use the legacy in tb_handle_dp_bandwidth_request()
2738 tunnel->bw_mode = false; in tb_handle_dp_bandwidth_request()
2767 if (tb_tunnel_direction_downstream(tunnel)) { in tb_handle_dp_bandwidth_request()
2775 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down); in tb_handle_dp_bandwidth_request()
2778 tb_tunnel_warn(tunnel, in tb_handle_dp_bandwidth_request()
2781 tb_tunnel_dbg(tunnel, "not active yet\n"); in tb_handle_dp_bandwidth_request()
2784 * tunnel is not yet active. This means that in tb_handle_dp_bandwidth_request()
2786 * this tunnel. Allow it some time and retry in tb_handle_dp_bandwidth_request()
2790 tb_tunnel_dbg(tunnel, in tb_handle_dp_bandwidth_request()
2797 tb_tunnel_dbg(tunnel, in tb_handle_dp_bandwidth_request()
2801 tb_tunnel_warn(tunnel, in tb_handle_dp_bandwidth_request()
2805 tb_tunnel_dbg(tunnel, in tb_handle_dp_bandwidth_request()
2900 struct tb_tunnel *tunnel; in tb_stop() local
2905 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in tb_stop()
2911 if (tb_tunnel_is_dma(tunnel)) in tb_stop()
2912 tb_tunnel_deactivate(tunnel); in tb_stop()
2913 tb_tunnel_put(tunnel); in tb_stop()
3080 struct tb_tunnel *tunnel, *n; in tb_resume_noirq() local
3105 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { in tb_resume_noirq()
3106 if (tb_tunnel_is_usb3(tunnel)) in tb_resume_noirq()
3108 tb_tunnel_deactivate(tunnel); in tb_resume_noirq()
3109 tb_tunnel_put(tunnel); in tb_resume_noirq()
3113 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in tb_resume_noirq()
3115 if (tb_tunnel_is_usb3(tunnel)) { in tb_resume_noirq()
3120 tb_tunnel_activate(tunnel); in tb_resume_noirq()
3223 struct tb_tunnel *tunnel, *n; in tb_runtime_resume() local
3229 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) in tb_runtime_resume()
3230 tb_tunnel_activate(tunnel); in tb_runtime_resume()