Lines Matching full:tunnel
15 #include "tunnel.h"
75 * seconds after tunnel is established. Since at least i915 can runtime
129 /* Add some credits for potential second DMA tunnel */ in tb_available_credits()
171 struct tb_tunnel *tunnel; in tb_tunnel_alloc() local
173 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); in tb_tunnel_alloc()
174 if (!tunnel) in tb_tunnel_alloc()
177 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); in tb_tunnel_alloc()
178 if (!tunnel->paths) { in tb_tunnel_alloc()
179 kfree(tunnel); in tb_tunnel_alloc()
183 INIT_LIST_HEAD(&tunnel->list); in tb_tunnel_alloc()
184 tunnel->tb = tb; in tb_tunnel_alloc()
185 tunnel->npaths = npaths; in tb_tunnel_alloc()
186 tunnel->type = type; in tb_tunnel_alloc()
187 kref_init(&tunnel->kref); in tb_tunnel_alloc()
189 return tunnel; in tb_tunnel_alloc()
192 static void tb_tunnel_get(struct tb_tunnel *tunnel) in tb_tunnel_get() argument
195 kref_get(&tunnel->kref); in tb_tunnel_get()
201 struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); in tb_tunnel_destroy() local
204 if (tunnel->destroy) in tb_tunnel_destroy()
205 tunnel->destroy(tunnel); in tb_tunnel_destroy()
207 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_destroy()
208 if (tunnel->paths[i]) in tb_tunnel_destroy()
209 tb_path_free(tunnel->paths[i]); in tb_tunnel_destroy()
212 kfree(tunnel->paths); in tb_tunnel_destroy()
213 kfree(tunnel); in tb_tunnel_destroy()
216 void tb_tunnel_put(struct tb_tunnel *tunnel) in tb_tunnel_put() argument
219 kref_put(&tunnel->kref, tb_tunnel_destroy); in tb_tunnel_put()
223 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) in tb_pci_set_ext_encapsulation() argument
225 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); in tb_pci_set_ext_encapsulation()
229 if ((usb4_switch_version(tunnel->src_port->sw) < 2) || in tb_pci_set_ext_encapsulation()
230 (usb4_switch_version(tunnel->dst_port->sw) < 2)) in tb_pci_set_ext_encapsulation()
236 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); in tb_pci_set_ext_encapsulation()
244 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); in tb_pci_set_ext_encapsulation()
252 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", in tb_pci_set_ext_encapsulation()
257 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) in tb_pci_activate() argument
262 res = tb_pci_set_ext_encapsulation(tunnel, activate); in tb_pci_activate()
268 res = tb_pci_port_enable(tunnel->dst_port, activate); in tb_pci_activate()
270 res = tb_pci_port_enable(tunnel->src_port, activate); in tb_pci_activate()
276 res = tb_pci_port_enable(tunnel->src_port, activate); in tb_pci_activate()
281 tb_pci_port_enable(tunnel->dst_port, activate); in tb_pci_activate()
284 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); in tb_pci_activate()
343 * If @down adapter is active, follows the tunnel to the PCIe upstream
344 * adapter and back. Returns the discovered tunnel or %NULL if there was
345 * no tunnel.
350 struct tb_tunnel *tunnel; in tb_tunnel_discover_pci() local
356 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); in tb_tunnel_discover_pci()
357 if (!tunnel) in tb_tunnel_discover_pci()
360 tunnel->activate = tb_pci_activate; in tb_tunnel_discover_pci()
361 tunnel->src_port = down; in tb_tunnel_discover_pci()
369 &tunnel->dst_port, "PCIe Up", alloc_hopid); in tb_tunnel_discover_pci()
375 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_discover_pci()
376 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) in tb_tunnel_discover_pci()
379 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, in tb_tunnel_discover_pci()
383 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_discover_pci()
384 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) in tb_tunnel_discover_pci()
387 /* Validate that the tunnel is complete */ in tb_tunnel_discover_pci()
388 if (!tb_port_is_pcie_up(tunnel->dst_port)) { in tb_tunnel_discover_pci()
389 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_pci()
394 if (down != tunnel->src_port) { in tb_tunnel_discover_pci()
395 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); in tb_tunnel_discover_pci()
399 if (!tb_pci_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_pci()
400 tb_tunnel_warn(tunnel, in tb_tunnel_discover_pci()
401 "tunnel is not fully activated, cleaning up\n"); in tb_tunnel_discover_pci()
405 tb_tunnel_dbg(tunnel, "discovered\n"); in tb_tunnel_discover_pci()
406 return tunnel; in tb_tunnel_discover_pci()
409 tb_tunnel_deactivate(tunnel); in tb_tunnel_discover_pci()
411 tb_tunnel_put(tunnel); in tb_tunnel_discover_pci()
417 * tb_tunnel_alloc_pci() - allocate a pci tunnel
422 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
430 struct tb_tunnel *tunnel; in tb_tunnel_alloc_pci() local
433 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); in tb_tunnel_alloc_pci()
434 if (!tunnel) in tb_tunnel_alloc_pci()
437 tunnel->activate = tb_pci_activate; in tb_tunnel_alloc_pci()
438 tunnel->src_port = down; in tb_tunnel_alloc_pci()
439 tunnel->dst_port = up; in tb_tunnel_alloc_pci()
445 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_alloc_pci()
453 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_alloc_pci()
457 return tunnel; in tb_tunnel_alloc_pci()
460 tb_tunnel_put(tunnel); in tb_tunnel_alloc_pci()
706 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) in tb_dp_xchg_caps() argument
709 struct tb_port *out = tunnel->dst_port; in tb_dp_xchg_caps()
710 struct tb_port *in = tunnel->src_port; in tb_dp_xchg_caps()
747 tb_tunnel_dbg(tunnel, in tb_dp_xchg_caps()
752 * If the tunnel bandwidth is limited (max_bw is set) then see in tb_dp_xchg_caps()
758 tb_tunnel_dbg(tunnel, in tb_dp_xchg_caps()
762 if (tb_tunnel_direction_downstream(tunnel)) in tb_dp_xchg_caps()
763 max_bw = tunnel->max_down; in tb_dp_xchg_caps()
765 max_bw = tunnel->max_up; in tb_dp_xchg_caps()
774 tb_tunnel_info(tunnel, "not enough bandwidth\n"); in tb_dp_xchg_caps()
779 tb_tunnel_dbg(tunnel, in tb_dp_xchg_caps()
798 tb_tunnel_dbg(tunnel, "disabling LTTPR\n"); in tb_dp_xchg_caps()
805 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) in tb_dp_bandwidth_alloc_mode_enable() argument
808 struct tb_port *out = tunnel->dst_port; in tb_dp_bandwidth_alloc_mode_enable()
809 struct tb_port *in = tunnel->src_port; in tb_dp_bandwidth_alloc_mode_enable()
848 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", in tb_dp_bandwidth_alloc_mode_enable()
864 tb_tunnel_dbg(tunnel, in tb_dp_bandwidth_alloc_mode_enable()
872 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity); in tb_dp_bandwidth_alloc_mode_enable()
887 if (tb_tunnel_direction_downstream(tunnel)) in tb_dp_bandwidth_alloc_mode_enable()
888 estimated_bw = tunnel->max_down; in tb_dp_bandwidth_alloc_mode_enable()
890 estimated_bw = tunnel->max_up; in tb_dp_bandwidth_alloc_mode_enable()
892 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw); in tb_dp_bandwidth_alloc_mode_enable()
903 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n"); in tb_dp_bandwidth_alloc_mode_enable()
907 static int tb_dp_pre_activate(struct tb_tunnel *tunnel) in tb_dp_pre_activate() argument
909 struct tb_port *in = tunnel->src_port; in tb_dp_pre_activate()
914 ret = tb_dp_xchg_caps(tunnel); in tb_dp_pre_activate()
924 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n"); in tb_dp_pre_activate()
930 return tb_dp_bandwidth_alloc_mode_enable(tunnel); in tb_dp_pre_activate()
933 static void tb_dp_post_deactivate(struct tb_tunnel *tunnel) in tb_dp_post_deactivate() argument
935 struct tb_port *in = tunnel->src_port; in tb_dp_post_deactivate()
941 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n"); in tb_dp_post_deactivate()
951 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) in tb_dp_wait_dprx() argument
954 struct tb_port *in = tunnel->src_port; in tb_dp_wait_dprx()
958 * active tunnel. in tb_dp_wait_dprx()
975 tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); in tb_dp_wait_dprx()
981 struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work); in tb_dp_dprx_work() local
982 struct tb *tb = tunnel->tb; in tb_dp_dprx_work()
984 if (!tunnel->dprx_canceled) { in tb_dp_dprx_work()
986 if (tb_dp_is_usb4(tunnel->src_port->sw) && in tb_dp_dprx_work()
987 tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) { in tb_dp_dprx_work()
988 if (ktime_before(ktime_get(), tunnel->dprx_timeout)) { in tb_dp_dprx_work()
989 queue_delayed_work(tb->wq, &tunnel->dprx_work, in tb_dp_dprx_work()
995 tunnel->state = TB_TUNNEL_ACTIVE; in tb_dp_dprx_work()
1000 if (tunnel->callback) in tb_dp_dprx_work()
1001 tunnel->callback(tunnel, tunnel->callback_data); in tb_dp_dprx_work()
1004 static int tb_dp_dprx_start(struct tb_tunnel *tunnel) in tb_dp_dprx_start() argument
1007 * Bump up the reference to keep the tunnel around. It will be in tb_dp_dprx_start()
1008 * dropped in tb_dp_dprx_stop() once the tunnel is deactivated. in tb_dp_dprx_start()
1010 tb_tunnel_get(tunnel); in tb_dp_dprx_start()
1012 tunnel->dprx_started = true; in tb_dp_dprx_start()
1014 if (tunnel->callback) { in tb_dp_dprx_start()
1015 tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); in tb_dp_dprx_start()
1016 queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); in tb_dp_dprx_start()
1020 return tb_dp_is_usb4(tunnel->src_port->sw) ? in tb_dp_dprx_start()
1021 tb_dp_wait_dprx(tunnel, dprx_timeout) : 0; in tb_dp_dprx_start()
1024 static void tb_dp_dprx_stop(struct tb_tunnel *tunnel) in tb_dp_dprx_stop() argument
1026 if (tunnel->dprx_started) { in tb_dp_dprx_stop()
1027 tunnel->dprx_started = false; in tb_dp_dprx_stop()
1028 tunnel->dprx_canceled = true; in tb_dp_dprx_stop()
1029 cancel_delayed_work(&tunnel->dprx_work); in tb_dp_dprx_stop()
1030 tb_tunnel_put(tunnel); in tb_dp_dprx_stop()
1034 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) in tb_dp_activate() argument
1042 paths = tunnel->paths; in tb_dp_activate()
1045 tb_dp_port_set_hops(tunnel->src_port, in tb_dp_activate()
1050 tb_dp_port_set_hops(tunnel->dst_port, in tb_dp_activate()
1055 tb_dp_dprx_stop(tunnel); in tb_dp_activate()
1056 tb_dp_port_hpd_clear(tunnel->src_port); in tb_dp_activate()
1057 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); in tb_dp_activate()
1058 if (tb_port_is_dpout(tunnel->dst_port)) in tb_dp_activate()
1059 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); in tb_dp_activate()
1062 ret = tb_dp_port_enable(tunnel->src_port, active); in tb_dp_activate()
1066 if (tb_port_is_dpout(tunnel->dst_port)) { in tb_dp_activate()
1067 ret = tb_dp_port_enable(tunnel->dst_port, active); in tb_dp_activate()
1072 return active ? tb_dp_dprx_start(tunnel) : 0; in tb_dp_activate()
1077 * @tunnel: DP tunnel to check
1080 * Returns maximum possible bandwidth for this tunnel in Mb/s.
1082 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, in tb_dp_bandwidth_mode_maximum_bandwidth() argument
1085 struct tb_port *in = tunnel->src_port; in tb_dp_bandwidth_mode_maximum_bandwidth()
1116 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, in tb_dp_bandwidth_mode_consumed_bandwidth() argument
1120 struct tb_port *in = tunnel->src_port; in tb_dp_bandwidth_mode_consumed_bandwidth()
1126 if (!tunnel->bw_mode) in tb_dp_bandwidth_mode_consumed_bandwidth()
1135 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); in tb_dp_bandwidth_mode_consumed_bandwidth()
1141 if (tb_tunnel_direction_downstream(tunnel)) { in tb_dp_bandwidth_mode_consumed_bandwidth()
1152 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, in tb_dp_allocated_bandwidth() argument
1155 struct tb_port *in = tunnel->src_port; in tb_dp_allocated_bandwidth()
1161 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { in tb_dp_allocated_bandwidth()
1169 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, in tb_dp_allocated_bandwidth()
1176 if (tb_tunnel_direction_downstream(tunnel)) { in tb_dp_allocated_bandwidth()
1186 return tunnel->consumed_bandwidth(tunnel, allocated_up, in tb_dp_allocated_bandwidth()
1190 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, in tb_dp_alloc_bandwidth() argument
1193 struct tb_port *in = tunnel->src_port; in tb_dp_alloc_bandwidth()
1199 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); in tb_dp_alloc_bandwidth()
1203 if (tb_tunnel_direction_downstream(tunnel)) { in tb_dp_alloc_bandwidth()
1221 tunnel->bw_mode = true; in tb_dp_alloc_bandwidth()
1225 /* Read cap from tunnel DP IN */
1226 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, in tb_dp_read_cap() argument
1229 struct tb_port *in = tunnel->src_port; in tb_dp_read_cap()
1240 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap); in tb_dp_read_cap()
1257 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, in tb_dp_maximum_bandwidth() argument
1262 if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port)) in tb_dp_maximum_bandwidth()
1265 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); in tb_dp_maximum_bandwidth()
1269 if (tb_tunnel_direction_downstream(tunnel)) { in tb_dp_maximum_bandwidth()
1280 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, in tb_dp_consumed_bandwidth() argument
1283 const struct tb_switch *sw = tunnel->src_port->sw; in tb_dp_consumed_bandwidth()
1288 ret = tb_dp_wait_dprx(tunnel, 0); in tb_dp_consumed_bandwidth()
1293 * tunnel consumes as much as it had in tb_dp_consumed_bandwidth()
1296 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, in tb_dp_consumed_bandwidth()
1309 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, in tb_dp_consumed_bandwidth()
1317 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); in tb_dp_consumed_bandwidth()
1322 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); in tb_dp_consumed_bandwidth()
1332 if (tb_tunnel_direction_downstream(tunnel)) { in tb_dp_consumed_bandwidth()
1425 static void tb_dp_dump(struct tb_tunnel *tunnel) in tb_dp_dump() argument
1430 in = tunnel->src_port; in tb_dp_dump()
1431 out = tunnel->dst_port; in tb_dp_dump()
1440 tb_tunnel_dbg(tunnel, in tb_dp_dump()
1451 tb_tunnel_dbg(tunnel, in tb_dp_dump()
1462 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", in tb_dp_dump()
1472 * If @in adapter is active, follows the tunnel to the DP out adapter
1473 * and back. Returns the discovered tunnel or %NULL if there was no
1474 * tunnel.
1476 * Return: DP tunnel or %NULL if no tunnel found.
1481 struct tb_tunnel *tunnel; in tb_tunnel_discover_dp() local
1488 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); in tb_tunnel_discover_dp()
1489 if (!tunnel) in tb_tunnel_discover_dp()
1492 tunnel->pre_activate = tb_dp_pre_activate; in tb_tunnel_discover_dp()
1493 tunnel->activate = tb_dp_activate; in tb_tunnel_discover_dp()
1494 tunnel->post_deactivate = tb_dp_post_deactivate; in tb_tunnel_discover_dp()
1495 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; in tb_tunnel_discover_dp()
1496 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; in tb_tunnel_discover_dp()
1497 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; in tb_tunnel_discover_dp()
1498 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_discover_dp()
1499 tunnel->src_port = in; in tb_tunnel_discover_dp()
1502 &tunnel->dst_port, "Video", alloc_hopid); in tb_tunnel_discover_dp()
1508 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; in tb_tunnel_discover_dp()
1509 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false)) in tb_tunnel_discover_dp()
1516 tunnel->paths[TB_DP_AUX_PATH_OUT] = path; in tb_tunnel_discover_dp()
1517 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false); in tb_tunnel_discover_dp()
1519 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, in tb_tunnel_discover_dp()
1523 tunnel->paths[TB_DP_AUX_PATH_IN] = path; in tb_tunnel_discover_dp()
1524 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false); in tb_tunnel_discover_dp()
1526 /* Validate that the tunnel is complete */ in tb_tunnel_discover_dp()
1527 if (!tb_port_is_dpout(tunnel->dst_port)) { in tb_tunnel_discover_dp()
1532 if (!tb_dp_port_is_enabled(tunnel->dst_port)) in tb_tunnel_discover_dp()
1535 if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) in tb_tunnel_discover_dp()
1538 if (port != tunnel->src_port) { in tb_tunnel_discover_dp()
1539 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); in tb_tunnel_discover_dp()
1543 tb_dp_dump(tunnel); in tb_tunnel_discover_dp()
1545 tb_tunnel_dbg(tunnel, "discovered\n"); in tb_tunnel_discover_dp()
1546 return tunnel; in tb_tunnel_discover_dp()
1549 tb_tunnel_deactivate(tunnel); in tb_tunnel_discover_dp()
1551 tb_tunnel_put(tunnel); in tb_tunnel_discover_dp()
1557 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1562 * @max_up: Maximum available upstream bandwidth for the DP tunnel.
1564 * @max_down: Maximum available downstream bandwidth for the DP tunnel.
1566 * @callback: Optional callback that is called when the DP tunnel is
1570 * Allocates a tunnel between @in and @out that is capable of tunneling
1572 * after tb_tunnel_activate() once the tunnel has been fully activated.
1585 struct tb_tunnel *tunnel; in tb_tunnel_alloc_dp() local
1593 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); in tb_tunnel_alloc_dp()
1594 if (!tunnel) in tb_tunnel_alloc_dp()
1597 tunnel->pre_activate = tb_dp_pre_activate; in tb_tunnel_alloc_dp()
1598 tunnel->activate = tb_dp_activate; in tb_tunnel_alloc_dp()
1599 tunnel->post_deactivate = tb_dp_post_deactivate; in tb_tunnel_alloc_dp()
1600 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; in tb_tunnel_alloc_dp()
1601 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; in tb_tunnel_alloc_dp()
1602 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; in tb_tunnel_alloc_dp()
1603 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_alloc_dp()
1604 tunnel->src_port = in; in tb_tunnel_alloc_dp()
1605 tunnel->dst_port = out; in tb_tunnel_alloc_dp()
1606 tunnel->max_up = max_up; in tb_tunnel_alloc_dp()
1607 tunnel->max_down = max_down; in tb_tunnel_alloc_dp()
1608 tunnel->callback = callback; in tb_tunnel_alloc_dp()
1609 tunnel->callback_data = callback_data; in tb_tunnel_alloc_dp()
1610 INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work); in tb_tunnel_alloc_dp()
1612 paths = tunnel->paths; in tb_tunnel_alloc_dp()
1636 return tunnel; in tb_tunnel_alloc_dp()
1639 tb_tunnel_put(tunnel); in tb_tunnel_alloc_dp()
1704 * We don't tunnel other traffic over this link so can use all in tb_dma_init_rx_path()
1767 static void tb_dma_destroy(struct tb_tunnel *tunnel) in tb_dma_destroy() argument
1771 for (i = 0; i < tunnel->npaths; i++) { in tb_dma_destroy()
1772 if (!tunnel->paths[i]) in tb_dma_destroy()
1774 tb_dma_destroy_path(tunnel->paths[i]); in tb_dma_destroy()
1779 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1797 struct tb_tunnel *tunnel; in tb_tunnel_alloc_dma() local
1814 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA); in tb_tunnel_alloc_dma()
1815 if (!tunnel) in tb_tunnel_alloc_dma()
1818 tunnel->src_port = nhi; in tb_tunnel_alloc_dma()
1819 tunnel->dst_port = dst; in tb_tunnel_alloc_dma()
1820 tunnel->destroy = tb_dma_destroy; in tb_tunnel_alloc_dma()
1829 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1831 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n"); in tb_tunnel_alloc_dma()
1841 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1843 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n"); in tb_tunnel_alloc_dma()
1848 return tunnel; in tb_tunnel_alloc_dma()
1851 tb_tunnel_put(tunnel); in tb_tunnel_alloc_dma()
1856 * tb_tunnel_match_dma() - Match DMA tunnel
1857 * @tunnel: Tunnel to match
1865 * This function can be used to match specific DMA tunnel, if there are
1869 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, in tb_tunnel_match_dma() argument
1878 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_match_dma()
1879 const struct tb_path *path = tunnel->paths[i]; in tb_tunnel_match_dma()
1932 static int tb_usb3_pre_activate(struct tb_tunnel *tunnel) in tb_usb3_pre_activate() argument
1934 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", in tb_usb3_pre_activate()
1935 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_pre_activate()
1937 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, in tb_usb3_pre_activate()
1938 &tunnel->allocated_up, in tb_usb3_pre_activate()
1939 &tunnel->allocated_down); in tb_usb3_pre_activate()
1942 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) in tb_usb3_activate() argument
1946 res = tb_usb3_port_enable(tunnel->src_port, activate); in tb_usb3_activate()
1950 if (tb_port_is_usb3_up(tunnel->dst_port)) in tb_usb3_activate()
1951 return tb_usb3_port_enable(tunnel->dst_port, activate); in tb_usb3_activate()
1956 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, in tb_usb3_consumed_bandwidth() argument
1959 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); in tb_usb3_consumed_bandwidth()
1966 *consumed_up = tunnel->allocated_up * in tb_usb3_consumed_bandwidth()
1968 *consumed_down = tunnel->allocated_down * in tb_usb3_consumed_bandwidth()
1979 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel) in tb_usb3_release_unused_bandwidth() argument
1983 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, in tb_usb3_release_unused_bandwidth()
1984 &tunnel->allocated_up, in tb_usb3_release_unused_bandwidth()
1985 &tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1989 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n", in tb_usb3_release_unused_bandwidth()
1990 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1994 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, in tb_usb3_reclaim_available_bandwidth() argument
2000 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); in tb_usb3_reclaim_available_bandwidth()
2002 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); in tb_usb3_reclaim_available_bandwidth()
2013 if (tunnel->allocated_up >= max_rate && in tb_usb3_reclaim_available_bandwidth()
2014 tunnel->allocated_down >= max_rate) in tb_usb3_reclaim_available_bandwidth()
2019 if (allocate_up < tunnel->allocated_up) in tb_usb3_reclaim_available_bandwidth()
2020 allocate_up = tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
2023 if (allocate_down < tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
2024 allocate_down = tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
2027 if (allocate_up == tunnel->allocated_up && in tb_usb3_reclaim_available_bandwidth()
2028 allocate_down == tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
2031 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, in tb_usb3_reclaim_available_bandwidth()
2034 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n"); in tb_usb3_reclaim_available_bandwidth()
2038 tunnel->allocated_up = allocate_up; in tb_usb3_reclaim_available_bandwidth()
2039 *available_up -= tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
2041 tunnel->allocated_down = allocate_down; in tb_usb3_reclaim_available_bandwidth()
2042 *available_down -= tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
2044 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n", in tb_usb3_reclaim_available_bandwidth()
2045 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_reclaim_available_bandwidth()
2088 * If @down adapter is active, follows the tunnel to the USB3 upstream
2089 * adapter and back. Returns the discovered tunnel or %NULL if there was
2090 * no tunnel.
2095 struct tb_tunnel *tunnel; in tb_tunnel_discover_usb3() local
2101 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); in tb_tunnel_discover_usb3()
2102 if (!tunnel) in tb_tunnel_discover_usb3()
2105 tunnel->activate = tb_usb3_activate; in tb_tunnel_discover_usb3()
2106 tunnel->src_port = down; in tb_tunnel_discover_usb3()
2114 &tunnel->dst_port, "USB3 Down", alloc_hopid); in tb_tunnel_discover_usb3()
2120 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_discover_usb3()
2121 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); in tb_tunnel_discover_usb3()
2123 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, in tb_tunnel_discover_usb3()
2127 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_discover_usb3()
2128 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); in tb_tunnel_discover_usb3()
2130 /* Validate that the tunnel is complete */ in tb_tunnel_discover_usb3()
2131 if (!tb_port_is_usb3_up(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
2132 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_usb3()
2137 if (down != tunnel->src_port) { in tb_tunnel_discover_usb3()
2138 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); in tb_tunnel_discover_usb3()
2142 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
2143 tb_tunnel_warn(tunnel, in tb_tunnel_discover_usb3()
2144 "tunnel is not fully activated, cleaning up\n"); in tb_tunnel_discover_usb3()
2153 * hop tunnel. in tb_tunnel_discover_usb3()
2156 &tunnel->allocated_up, &tunnel->allocated_down); in tb_tunnel_discover_usb3()
2160 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", in tb_tunnel_discover_usb3()
2161 tunnel->allocated_up, tunnel->allocated_down); in tb_tunnel_discover_usb3()
2163 tunnel->pre_activate = tb_usb3_pre_activate; in tb_tunnel_discover_usb3()
2164 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_discover_usb3()
2165 tunnel->release_unused_bandwidth = in tb_tunnel_discover_usb3()
2167 tunnel->reclaim_available_bandwidth = in tb_tunnel_discover_usb3()
2171 tb_tunnel_dbg(tunnel, "discovered\n"); in tb_tunnel_discover_usb3()
2172 return tunnel; in tb_tunnel_discover_usb3()
2175 tb_tunnel_deactivate(tunnel); in tb_tunnel_discover_usb3()
2177 tb_tunnel_put(tunnel); in tb_tunnel_discover_usb3()
2183 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2187 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
2189 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
2192 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2201 struct tb_tunnel *tunnel; in tb_tunnel_alloc_usb3() local
2216 tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n", in tb_tunnel_alloc_usb3()
2220 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); in tb_tunnel_alloc_usb3()
2221 if (!tunnel) in tb_tunnel_alloc_usb3()
2224 tunnel->activate = tb_usb3_activate; in tb_tunnel_alloc_usb3()
2225 tunnel->src_port = down; in tb_tunnel_alloc_usb3()
2226 tunnel->dst_port = up; in tb_tunnel_alloc_usb3()
2227 tunnel->max_up = max_up; in tb_tunnel_alloc_usb3()
2228 tunnel->max_down = max_down; in tb_tunnel_alloc_usb3()
2233 tb_tunnel_put(tunnel); in tb_tunnel_alloc_usb3()
2237 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_alloc_usb3()
2242 tb_tunnel_put(tunnel); in tb_tunnel_alloc_usb3()
2246 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_alloc_usb3()
2249 tunnel->allocated_up = min(max_rate, max_up); in tb_tunnel_alloc_usb3()
2250 tunnel->allocated_down = min(max_rate, max_down); in tb_tunnel_alloc_usb3()
2252 tunnel->pre_activate = tb_usb3_pre_activate; in tb_tunnel_alloc_usb3()
2253 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_alloc_usb3()
2254 tunnel->release_unused_bandwidth = in tb_tunnel_alloc_usb3()
2256 tunnel->reclaim_available_bandwidth = in tb_tunnel_alloc_usb3()
2260 return tunnel; in tb_tunnel_alloc_usb3()
2265 * @tunnel: Tunnel to check
2267 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) in tb_tunnel_is_invalid() argument
2271 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_is_invalid()
2272 WARN_ON(!tunnel->paths[i]->activated); in tb_tunnel_is_invalid()
2273 if (tb_path_is_invalid(tunnel->paths[i])) in tb_tunnel_is_invalid()
2281 * tb_tunnel_activate() - activate a tunnel
2282 * @tunnel: Tunnel to activate
2285 * Specifically returns %-EINPROGRESS if the tunnel activation is still
2289 int tb_tunnel_activate(struct tb_tunnel *tunnel) in tb_tunnel_activate() argument
2293 tb_tunnel_dbg(tunnel, "activating\n"); in tb_tunnel_activate()
2299 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_activate()
2300 if (tunnel->paths[i]->activated) { in tb_tunnel_activate()
2301 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_activate()
2302 tunnel->paths[i]->activated = false; in tb_tunnel_activate()
2306 tunnel->state = TB_TUNNEL_ACTIVATING; in tb_tunnel_activate()
2308 if (tunnel->pre_activate) { in tb_tunnel_activate()
2309 res = tunnel->pre_activate(tunnel); in tb_tunnel_activate()
2314 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_activate()
2315 res = tb_path_activate(tunnel->paths[i]); in tb_tunnel_activate()
2320 if (tunnel->activate) { in tb_tunnel_activate()
2321 res = tunnel->activate(tunnel, true); in tb_tunnel_activate()
2329 tunnel->state = TB_TUNNEL_ACTIVE; in tb_tunnel_activate()
2333 tb_tunnel_warn(tunnel, "activation failed\n"); in tb_tunnel_activate()
2334 tb_tunnel_deactivate(tunnel); in tb_tunnel_activate()
2339 * tb_tunnel_deactivate() - deactivate a tunnel
2340 * @tunnel: Tunnel to deactivate
2342 void tb_tunnel_deactivate(struct tb_tunnel *tunnel) in tb_tunnel_deactivate() argument
2346 tb_tunnel_dbg(tunnel, "deactivating\n"); in tb_tunnel_deactivate()
2348 if (tunnel->activate) in tb_tunnel_deactivate()
2349 tunnel->activate(tunnel, false); in tb_tunnel_deactivate()
2351 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_deactivate()
2352 if (tunnel->paths[i] && tunnel->paths[i]->activated) in tb_tunnel_deactivate()
2353 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_deactivate()
2356 if (tunnel->post_deactivate) in tb_tunnel_deactivate()
2357 tunnel->post_deactivate(tunnel); in tb_tunnel_deactivate()
2359 tunnel->state = TB_TUNNEL_INACTIVE; in tb_tunnel_deactivate()
2363 * tb_tunnel_port_on_path() - Does the tunnel go through port
2364 * @tunnel: Tunnel to check
2367 * Returns true if @tunnel goes through @port (direction does not matter),
2370 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, in tb_tunnel_port_on_path() argument
2375 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_port_on_path()
2376 if (!tunnel->paths[i]) in tb_tunnel_port_on_path()
2379 if (tb_path_port_on_path(tunnel->paths[i], port)) in tb_tunnel_port_on_path()
2386 // Is tb_tunnel_activate() called for the tunnel
2387 static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel) in tb_tunnel_is_activated() argument
2389 return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel); in tb_tunnel_is_activated()
2394 * @tunnel: Tunnel to check
2398 * Returns maximum possible bandwidth this tunnel can go if not limited
2399 * by other bandwidth clients. If the tunnel does not support this
2402 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, in tb_tunnel_maximum_bandwidth() argument
2405 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_maximum_bandwidth()
2408 if (tunnel->maximum_bandwidth) in tb_tunnel_maximum_bandwidth()
2409 return tunnel->maximum_bandwidth(tunnel, max_up, max_down); in tb_tunnel_maximum_bandwidth()
2414 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2415 * @tunnel: Tunnel to check
2420 * Returns the bandwidth allocated for the tunnel. This may be higher
2421 * than what the tunnel actually consumes.
2423 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, in tb_tunnel_allocated_bandwidth() argument
2426 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_allocated_bandwidth()
2429 if (tunnel->allocated_bandwidth) in tb_tunnel_allocated_bandwidth()
2430 return tunnel->allocated_bandwidth(tunnel, allocated_up, in tb_tunnel_allocated_bandwidth()
2436 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2437 * @tunnel: Tunnel whose bandwidth allocation to change
2441 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2446 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, in tb_tunnel_alloc_bandwidth() argument
2449 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_alloc_bandwidth()
2452 if (tunnel->alloc_bandwidth) in tb_tunnel_alloc_bandwidth()
2453 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); in tb_tunnel_alloc_bandwidth()
2459 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2460 * @tunnel: Tunnel to check
2466 * Stores the amount of isochronous bandwidth @tunnel consumes in
2470 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, in tb_tunnel_consumed_bandwidth() argument
2476 * Here we need to distinguish between not active tunnel from in tb_tunnel_consumed_bandwidth()
2482 if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) { in tb_tunnel_consumed_bandwidth()
2485 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); in tb_tunnel_consumed_bandwidth()
2495 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw); in tb_tunnel_consumed_bandwidth()
2501 * @tunnel: Tunnel whose unused bandwidth to release
2503 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2508 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) in tb_tunnel_release_unused_bandwidth() argument
2510 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_release_unused_bandwidth()
2513 if (tunnel->release_unused_bandwidth) { in tb_tunnel_release_unused_bandwidth()
2516 ret = tunnel->release_unused_bandwidth(tunnel); in tb_tunnel_release_unused_bandwidth()
2526 * @tunnel: Tunnel reclaiming available bandwidth
2532 * reclaimed by the tunnel). If nothing was reclaimed the values are
2535 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, in tb_tunnel_reclaim_available_bandwidth() argument
2539 if (!tb_tunnel_is_active(tunnel)) in tb_tunnel_reclaim_available_bandwidth()
2542 if (tunnel->reclaim_available_bandwidth) in tb_tunnel_reclaim_available_bandwidth()
2543 tunnel->reclaim_available_bandwidth(tunnel, available_up, in tb_tunnel_reclaim_available_bandwidth()
2547 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel) in tb_tunnel_type_name() argument
2549 return tb_tunnel_names[tunnel->type]; in tb_tunnel_type_name()