Lines Matching full:tc
37 enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 bool (*is_ready)(struct intel_tc_port *tc);
40 bool (*is_owned)(struct intel_tc_port *tc);
41 void (*get_hw_state)(struct intel_tc_port *tc);
42 bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 void (*disconnect)(struct intel_tc_port *tc);
44 void (*init)(struct intel_tc_port *tc);
70 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71 static bool tc_phy_is_ready(struct intel_tc_port *tc);
72 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
92 return dig_port->tc; in to_tc_port()
95 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) in tc_to_i915() argument
97 return to_i915(tc->dig_port->base.base.dev); in tc_to_i915()
103 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_in_mode() local
105 return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode; in intel_tc_port_in_mode()
125 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_handles_hpd_glitches() local
127 return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port; in intel_tc_port_handles_hpd_glitches()
131 * The display power domains used for TC ports depending on the
132 * platform and TC mode (legacy, DP-alt, TBT):
148 * POWER_DOMAIN_AUX_USBC<TC port index>:
152 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
155 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
161 * - TCSS/TBT: block TC-cold power state for using the (direct or
166 * - TCSS/PHY: block TC-cold power state for using the (direct or
170 * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
175 * - TCSS/PHY: block TC-cold power state for using the (direct or
181 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_cold_requires_aux_pw() local
183 return tc_phy_cold_off_domain(tc) == in intel_tc_cold_requires_aux_pw()
188 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain) in __tc_cold_block() argument
190 struct drm_i915_private *i915 = tc_to_i915(tc); in __tc_cold_block()
192 *domain = tc_phy_cold_off_domain(tc); in __tc_cold_block()
198 tc_cold_block(struct intel_tc_port *tc) in tc_cold_block() argument
203 wakeref = __tc_cold_block(tc, &domain); in tc_cold_block()
205 tc->lock_power_domain = domain; in tc_cold_block()
211 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain, in __tc_cold_unblock() argument
214 struct drm_i915_private *i915 = tc_to_i915(tc); in __tc_cold_unblock()
220 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref) in tc_cold_unblock() argument
222 enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc); in tc_cold_unblock()
225 drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain); in tc_cold_unblock()
227 __tc_cold_unblock(tc, domain, wakeref); in tc_cold_unblock()
231 assert_display_core_power_enabled(struct intel_tc_port *tc) in assert_display_core_power_enabled() argument
233 struct drm_i915_private *i915 = tc_to_i915(tc); in assert_display_core_power_enabled()
240 assert_tc_cold_blocked(struct intel_tc_port *tc) in assert_tc_cold_blocked() argument
242 struct drm_i915_private *i915 = tc_to_i915(tc); in assert_tc_cold_blocked()
246 tc_phy_cold_off_domain(tc)); in assert_tc_cold_blocked()
251 tc_port_power_domain(struct intel_tc_port *tc) in tc_port_power_domain() argument
253 enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); in tc_port_power_domain()
259 assert_tc_port_power_enabled(struct intel_tc_port *tc) in assert_tc_port_power_enabled() argument
261 struct drm_i915_private *i915 = tc_to_i915(tc); in assert_tc_port_power_enabled()
264 !intel_display_power_is_enabled(i915, tc_port_power_domain(tc))); in assert_tc_port_power_enabled()
270 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_get_lane_mask() local
273 lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); in intel_tc_port_get_lane_mask()
276 assert_tc_cold_blocked(tc); in intel_tc_port_get_lane_mask()
278 lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx); in intel_tc_port_get_lane_mask()
279 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx); in intel_tc_port_get_lane_mask()
285 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_get_pin_assignment_mask() local
288 pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia)); in intel_tc_port_get_pin_assignment_mask()
291 assert_tc_cold_blocked(tc); in intel_tc_port_get_pin_assignment_mask()
293 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> in intel_tc_port_get_pin_assignment_mask()
294 DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); in intel_tc_port_get_pin_assignment_mask()
372 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_max_lane_count() local
374 if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) in intel_tc_port_max_lane_count()
377 assert_tc_cold_blocked(tc); in intel_tc_port_max_lane_count()
392 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_set_fia_lane_count() local
400 lane_reversal && tc->mode != TC_PORT_LEGACY); in intel_tc_port_set_fia_lane_count()
402 assert_tc_cold_blocked(tc); in intel_tc_port_set_fia_lane_count()
404 val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); in intel_tc_port_set_fia_lane_count()
405 val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
410 DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) : in intel_tc_port_set_fia_lane_count()
411 DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
415 DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) : in intel_tc_port_set_fia_lane_count()
416 DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
419 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
425 intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); in intel_tc_port_set_fia_lane_count()
428 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, in tc_port_fixup_legacy_flag() argument
431 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_fixup_legacy_flag()
434 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); in tc_port_fixup_legacy_flag()
439 if (tc->legacy_port) in tc_port_fixup_legacy_flag()
451 tc->port_name, live_status_mask, valid_hpd_mask); in tc_port_fixup_legacy_flag()
453 tc->legacy_port = !tc->legacy_port; in tc_port_fixup_legacy_flag()
456 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia) in tc_phy_load_fia_params() argument
458 enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); in tc_phy_load_fia_params()
461 * Each Modular FIA instance houses 2 TC ports. In SOC that has more in tc_phy_load_fia_params()
462 * than two TC ports, there are multiple instances of Modular FIA. in tc_phy_load_fia_params()
465 tc->phy_fia = tc_port / 2; in tc_phy_load_fia_params()
466 tc->phy_fia_idx = tc_port % 2; in tc_phy_load_fia_params()
468 tc->phy_fia = FIA1; in tc_phy_load_fia_params()
469 tc->phy_fia_idx = tc_port; in tc_phy_load_fia_params()
474 * ICL TC PHY handlers
478 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc) in icl_tc_phy_cold_off_domain() argument
480 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_cold_off_domain()
481 struct intel_digital_port *dig_port = tc->dig_port; in icl_tc_phy_cold_off_domain()
483 if (tc->legacy_port) in icl_tc_phy_cold_off_domain()
489 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) in icl_tc_phy_hpd_live_status() argument
491 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_hpd_live_status()
492 struct intel_digital_port *dig_port = tc->dig_port; in icl_tc_phy_hpd_live_status()
499 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) { in icl_tc_phy_hpd_live_status()
500 fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); in icl_tc_phy_hpd_live_status()
507 tc->port_name); in icl_tc_phy_hpd_live_status()
511 if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx)) in icl_tc_phy_hpd_live_status()
513 if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx)) in icl_tc_phy_hpd_live_status()
530 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) in icl_tc_phy_is_ready() argument
532 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_is_ready()
535 assert_tc_cold_blocked(tc); in icl_tc_phy_is_ready()
537 val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia)); in icl_tc_phy_is_ready()
541 tc->port_name); in icl_tc_phy_is_ready()
545 return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx); in icl_tc_phy_is_ready()
548 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, in icl_tc_phy_take_ownership() argument
551 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_take_ownership()
554 assert_tc_cold_blocked(tc); in icl_tc_phy_take_ownership()
556 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); in icl_tc_phy_take_ownership()
560 tc->port_name, take ? "take" : "release"); in icl_tc_phy_take_ownership()
565 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); in icl_tc_phy_take_ownership()
567 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); in icl_tc_phy_take_ownership()
569 intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); in icl_tc_phy_take_ownership()
574 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc) in icl_tc_phy_is_owned() argument
576 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_is_owned()
579 assert_tc_cold_blocked(tc); in icl_tc_phy_is_owned()
581 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); in icl_tc_phy_is_owned()
585 tc->port_name); in icl_tc_phy_is_owned()
589 return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); in icl_tc_phy_is_owned()
592 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) in icl_tc_phy_get_hw_state() argument
597 tc_cold_wref = __tc_cold_block(tc, &domain); in icl_tc_phy_get_hw_state()
599 tc->mode = tc_phy_get_current_mode(tc); in icl_tc_phy_get_hw_state()
600 if (tc->mode != TC_PORT_DISCONNECTED) in icl_tc_phy_get_hw_state()
601 tc->lock_wakeref = tc_cold_block(tc); in icl_tc_phy_get_hw_state()
603 __tc_cold_unblock(tc, domain, tc_cold_wref); in icl_tc_phy_get_hw_state()
617 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, in tc_phy_verify_legacy_or_dp_alt_mode() argument
620 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_verify_legacy_or_dp_alt_mode()
621 struct intel_digital_port *dig_port = tc->dig_port; in tc_phy_verify_legacy_or_dp_alt_mode()
625 if (tc->mode == TC_PORT_LEGACY) { in tc_phy_verify_legacy_or_dp_alt_mode()
630 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT); in tc_phy_verify_legacy_or_dp_alt_mode()
636 if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) { in tc_phy_verify_legacy_or_dp_alt_mode()
638 tc->port_name); in tc_phy_verify_legacy_or_dp_alt_mode()
645 tc->port_name, in tc_phy_verify_legacy_or_dp_alt_mode()
653 static bool icl_tc_phy_connect(struct intel_tc_port *tc, in icl_tc_phy_connect() argument
656 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_connect()
658 tc->lock_wakeref = tc_cold_block(tc); in icl_tc_phy_connect()
660 if (tc->mode == TC_PORT_TBT_ALT) in icl_tc_phy_connect()
663 if ((!tc_phy_is_ready(tc) || in icl_tc_phy_connect()
664 !icl_tc_phy_take_ownership(tc, true)) && in icl_tc_phy_connect()
665 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { in icl_tc_phy_connect()
667 tc->port_name, in icl_tc_phy_connect()
668 str_yes_no(tc_phy_is_ready(tc))); in icl_tc_phy_connect()
673 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) in icl_tc_phy_connect()
679 icl_tc_phy_take_ownership(tc, false); in icl_tc_phy_connect()
681 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in icl_tc_phy_connect()
690 static void icl_tc_phy_disconnect(struct intel_tc_port *tc) in icl_tc_phy_disconnect() argument
692 switch (tc->mode) { in icl_tc_phy_disconnect()
695 icl_tc_phy_take_ownership(tc, false); in icl_tc_phy_disconnect()
698 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in icl_tc_phy_disconnect()
701 MISSING_CASE(tc->mode); in icl_tc_phy_disconnect()
705 static void icl_tc_phy_init(struct intel_tc_port *tc) in icl_tc_phy_init() argument
707 tc_phy_load_fia_params(tc, false); in icl_tc_phy_init()
722 * TGL TC PHY handlers
726 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc) in tgl_tc_phy_cold_off_domain() argument
731 static void tgl_tc_phy_init(struct intel_tc_port *tc) in tgl_tc_phy_init() argument
733 struct drm_i915_private *i915 = tc_to_i915(tc); in tgl_tc_phy_init()
737 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) in tgl_tc_phy_init()
742 tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK); in tgl_tc_phy_init()
757 * ADLP TC PHY handlers
761 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc) in adlp_tc_phy_cold_off_domain() argument
763 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_cold_off_domain()
764 struct intel_digital_port *dig_port = tc->dig_port; in adlp_tc_phy_cold_off_domain()
766 if (tc->mode != TC_PORT_TBT_ALT) in adlp_tc_phy_cold_off_domain()
772 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) in adlp_tc_phy_hpd_live_status() argument
774 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_hpd_live_status()
775 struct intel_digital_port *dig_port = tc->dig_port; in adlp_tc_phy_hpd_live_status()
807 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) in adlp_tc_phy_is_ready() argument
809 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_is_ready()
810 enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); in adlp_tc_phy_is_ready()
813 assert_display_core_power_enabled(tc); in adlp_tc_phy_is_ready()
819 tc->port_name); in adlp_tc_phy_is_ready()
826 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, in adlp_tc_phy_take_ownership() argument
829 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_take_ownership()
830 enum port port = tc->dig_port->base.port; in adlp_tc_phy_take_ownership()
832 assert_tc_port_power_enabled(tc); in adlp_tc_phy_take_ownership()
840 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc) in adlp_tc_phy_is_owned() argument
842 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_is_owned()
843 enum port port = tc->dig_port->base.port; in adlp_tc_phy_is_owned()
846 assert_tc_port_power_enabled(tc); in adlp_tc_phy_is_owned()
852 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) in adlp_tc_phy_get_hw_state() argument
854 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_get_hw_state()
856 tc_port_power_domain(tc); in adlp_tc_phy_get_hw_state()
861 tc->mode = tc_phy_get_current_mode(tc); in adlp_tc_phy_get_hw_state()
862 if (tc->mode != TC_PORT_DISCONNECTED) in adlp_tc_phy_get_hw_state()
863 tc->lock_wakeref = tc_cold_block(tc); in adlp_tc_phy_get_hw_state()
868 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) in adlp_tc_phy_connect() argument
870 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_connect()
872 tc_port_power_domain(tc); in adlp_tc_phy_connect()
875 if (tc->mode == TC_PORT_TBT_ALT) { in adlp_tc_phy_connect()
876 tc->lock_wakeref = tc_cold_block(tc); in adlp_tc_phy_connect()
882 if (!adlp_tc_phy_take_ownership(tc, true) && in adlp_tc_phy_connect()
883 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { in adlp_tc_phy_connect()
885 tc->port_name); in adlp_tc_phy_connect()
889 if (!tc_phy_is_ready(tc) && in adlp_tc_phy_connect()
890 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { in adlp_tc_phy_connect()
892 tc->port_name); in adlp_tc_phy_connect()
896 tc->lock_wakeref = tc_cold_block(tc); in adlp_tc_phy_connect()
898 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) in adlp_tc_phy_connect()
906 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in adlp_tc_phy_connect()
908 adlp_tc_phy_take_ownership(tc, false); in adlp_tc_phy_connect()
915 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc) in adlp_tc_phy_disconnect() argument
917 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_disconnect()
919 tc_port_power_domain(tc); in adlp_tc_phy_disconnect()
924 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in adlp_tc_phy_disconnect()
926 switch (tc->mode) { in adlp_tc_phy_disconnect()
929 adlp_tc_phy_take_ownership(tc, false); in adlp_tc_phy_disconnect()
934 MISSING_CASE(tc->mode); in adlp_tc_phy_disconnect()
940 static void adlp_tc_phy_init(struct intel_tc_port *tc) in adlp_tc_phy_init() argument
942 tc_phy_load_fia_params(tc, true); in adlp_tc_phy_init()
957 * XELPDP TC PHY handlers
960 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc) in xelpdp_tc_phy_hpd_live_status() argument
962 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_hpd_live_status()
963 struct intel_digital_port *dig_port = tc->dig_port; in xelpdp_tc_phy_hpd_live_status()
982 if (tc->legacy_port && (pch_isr & pch_isr_bit)) in xelpdp_tc_phy_hpd_live_status()
989 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc) in xelpdp_tc_phy_tcss_power_is_enabled() argument
991 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_tcss_power_is_enabled()
992 enum port port = tc->dig_port->base.port; in xelpdp_tc_phy_tcss_power_is_enabled()
995 assert_tc_cold_blocked(tc); in xelpdp_tc_phy_tcss_power_is_enabled()
1001 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) in xelpdp_tc_phy_wait_for_tcss_power() argument
1003 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_wait_for_tcss_power()
1005 if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { in xelpdp_tc_phy_wait_for_tcss_power()
1009 tc->port_name); in xelpdp_tc_phy_wait_for_tcss_power()
1044 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) in __xelpdp_tc_phy_enable_tcss_power() argument
1046 struct intel_display *display = to_intel_display(tc->dig_port); in __xelpdp_tc_phy_enable_tcss_power()
1047 enum port port = tc->dig_port->base.port; in __xelpdp_tc_phy_enable_tcss_power()
1051 assert_tc_cold_blocked(tc); in __xelpdp_tc_phy_enable_tcss_power()
1064 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) in xelpdp_tc_phy_enable_tcss_power() argument
1066 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_enable_tcss_power()
1068 __xelpdp_tc_phy_enable_tcss_power(tc, enable); in xelpdp_tc_phy_enable_tcss_power()
1070 if (enable && !tc_phy_wait_for_ready(tc)) in xelpdp_tc_phy_enable_tcss_power()
1073 if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) in xelpdp_tc_phy_enable_tcss_power()
1079 if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) in xelpdp_tc_phy_enable_tcss_power()
1085 __xelpdp_tc_phy_enable_tcss_power(tc, false); in xelpdp_tc_phy_enable_tcss_power()
1086 xelpdp_tc_phy_wait_for_tcss_power(tc, false); in xelpdp_tc_phy_enable_tcss_power()
1091 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) in xelpdp_tc_phy_take_ownership() argument
1093 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_take_ownership()
1094 enum port port = tc->dig_port->base.port; in xelpdp_tc_phy_take_ownership()
1098 assert_tc_cold_blocked(tc); in xelpdp_tc_phy_take_ownership()
1108 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc) in xelpdp_tc_phy_is_owned() argument
1110 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_is_owned()
1111 enum port port = tc->dig_port->base.port; in xelpdp_tc_phy_is_owned()
1114 assert_tc_cold_blocked(tc); in xelpdp_tc_phy_is_owned()
1119 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) in xelpdp_tc_phy_get_hw_state() argument
1121 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_get_hw_state()
1125 tc_cold_wref = __tc_cold_block(tc, &domain); in xelpdp_tc_phy_get_hw_state()
1127 tc->mode = tc_phy_get_current_mode(tc); in xelpdp_tc_phy_get_hw_state()
1128 if (tc->mode != TC_PORT_DISCONNECTED) in xelpdp_tc_phy_get_hw_state()
1129 tc->lock_wakeref = tc_cold_block(tc); in xelpdp_tc_phy_get_hw_state()
1132 (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && in xelpdp_tc_phy_get_hw_state()
1133 !xelpdp_tc_phy_tcss_power_is_enabled(tc)); in xelpdp_tc_phy_get_hw_state()
1135 __tc_cold_unblock(tc, domain, tc_cold_wref); in xelpdp_tc_phy_get_hw_state()
1138 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) in xelpdp_tc_phy_connect() argument
1140 tc->lock_wakeref = tc_cold_block(tc); in xelpdp_tc_phy_connect()
1142 if (tc->mode == TC_PORT_TBT_ALT) in xelpdp_tc_phy_connect()
1145 if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) in xelpdp_tc_phy_connect()
1148 xelpdp_tc_phy_take_ownership(tc, true); in xelpdp_tc_phy_connect()
1150 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) in xelpdp_tc_phy_connect()
1156 xelpdp_tc_phy_take_ownership(tc, false); in xelpdp_tc_phy_connect()
1157 xelpdp_tc_phy_wait_for_tcss_power(tc, false); in xelpdp_tc_phy_connect()
1160 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in xelpdp_tc_phy_connect()
1165 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc) in xelpdp_tc_phy_disconnect() argument
1167 switch (tc->mode) { in xelpdp_tc_phy_disconnect()
1170 xelpdp_tc_phy_take_ownership(tc, false); in xelpdp_tc_phy_disconnect()
1171 xelpdp_tc_phy_enable_tcss_power(tc, false); in xelpdp_tc_phy_disconnect()
1174 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in xelpdp_tc_phy_disconnect()
1177 MISSING_CASE(tc->mode); in xelpdp_tc_phy_disconnect()
1193 * Generic TC PHY handlers
1197 tc_phy_cold_off_domain(struct intel_tc_port *tc) in tc_phy_cold_off_domain() argument
1199 return tc->phy_ops->cold_off_domain(tc); in tc_phy_cold_off_domain()
1202 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc) in tc_phy_hpd_live_status() argument
1204 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_hpd_live_status()
1207 mask = tc->phy_ops->hpd_live_status(tc); in tc_phy_hpd_live_status()
1215 static bool tc_phy_is_ready(struct intel_tc_port *tc) in tc_phy_is_ready() argument
1217 return tc->phy_ops->is_ready(tc); in tc_phy_is_ready()
1220 static bool tc_phy_is_owned(struct intel_tc_port *tc) in tc_phy_is_owned() argument
1222 return tc->phy_ops->is_owned(tc); in tc_phy_is_owned()
1225 static void tc_phy_get_hw_state(struct intel_tc_port *tc) in tc_phy_get_hw_state() argument
1227 tc->phy_ops->get_hw_state(tc); in tc_phy_get_hw_state()
1230 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, in tc_phy_is_ready_and_owned() argument
1233 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_is_ready_and_owned()
1240 static bool tc_phy_is_connected(struct intel_tc_port *tc, in tc_phy_is_connected() argument
1243 struct intel_encoder *encoder = &tc->dig_port->base; in tc_phy_is_connected()
1245 bool phy_is_ready = tc_phy_is_ready(tc); in tc_phy_is_connected()
1246 bool phy_is_owned = tc_phy_is_owned(tc); in tc_phy_is_connected()
1249 if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) in tc_phy_is_connected()
1256 tc->port_name, in tc_phy_is_connected()
1265 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc) in tc_phy_wait_for_ready() argument
1267 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_wait_for_ready()
1269 if (wait_for(tc_phy_is_ready(tc), 500)) { in tc_phy_wait_for_ready()
1271 tc->port_name); in tc_phy_wait_for_ready()
1289 tc_phy_hpd_live_mode(struct intel_tc_port *tc) in tc_phy_hpd_live_mode() argument
1291 u32 live_status_mask = tc_phy_hpd_live_status(tc); in tc_phy_hpd_live_mode()
1297 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc, in get_tc_mode_in_phy_owned_state() argument
1309 if (tc->legacy_port) in get_tc_mode_in_phy_owned_state()
1317 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc, in get_tc_mode_in_phy_not_owned_state() argument
1330 if (tc->legacy_port) in get_tc_mode_in_phy_not_owned_state()
1338 tc_phy_get_current_mode(struct intel_tc_port *tc) in tc_phy_get_current_mode() argument
1340 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_get_current_mode()
1341 enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc); in tc_phy_get_current_mode()
1351 if (tc->legacy_port) in tc_phy_get_current_mode()
1352 tc_phy_wait_for_ready(tc); in tc_phy_get_current_mode()
1354 phy_is_ready = tc_phy_is_ready(tc); in tc_phy_get_current_mode()
1355 phy_is_owned = tc_phy_is_owned(tc); in tc_phy_get_current_mode()
1357 if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { in tc_phy_get_current_mode()
1358 mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); in tc_phy_get_current_mode()
1361 mode = get_tc_mode_in_phy_owned_state(tc, live_mode); in tc_phy_get_current_mode()
1366 tc->port_name, in tc_phy_get_current_mode()
1375 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc) in default_tc_mode() argument
1377 if (tc->legacy_port) in default_tc_mode()
1384 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask) in hpd_mask_to_target_mode() argument
1391 return default_tc_mode(tc); in hpd_mask_to_target_mode()
1395 tc_phy_get_target_mode(struct intel_tc_port *tc) in tc_phy_get_target_mode() argument
1397 u32 live_status_mask = tc_phy_hpd_live_status(tc); in tc_phy_get_target_mode()
1399 return hpd_mask_to_target_mode(tc, live_status_mask); in tc_phy_get_target_mode()
1402 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) in tc_phy_connect() argument
1404 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_connect()
1405 u32 live_status_mask = tc_phy_hpd_live_status(tc); in tc_phy_connect()
1408 tc_port_fixup_legacy_flag(tc, live_status_mask); in tc_phy_connect()
1410 tc->mode = hpd_mask_to_target_mode(tc, live_status_mask); in tc_phy_connect()
1412 connected = tc->phy_ops->connect(tc, required_lanes); in tc_phy_connect()
1413 if (!connected && tc->mode != default_tc_mode(tc)) { in tc_phy_connect()
1414 tc->mode = default_tc_mode(tc); in tc_phy_connect()
1415 connected = tc->phy_ops->connect(tc, required_lanes); in tc_phy_connect()
1421 static void tc_phy_disconnect(struct intel_tc_port *tc) in tc_phy_disconnect() argument
1423 if (tc->mode != TC_PORT_DISCONNECTED) { in tc_phy_disconnect()
1424 tc->phy_ops->disconnect(tc); in tc_phy_disconnect()
1425 tc->mode = TC_PORT_DISCONNECTED; in tc_phy_disconnect()
1429 static void tc_phy_init(struct intel_tc_port *tc) in tc_phy_init() argument
1431 mutex_lock(&tc->lock); in tc_phy_init()
1432 tc->phy_ops->init(tc); in tc_phy_init()
1433 mutex_unlock(&tc->lock); in tc_phy_init()
1436 static void intel_tc_port_reset_mode(struct intel_tc_port *tc, in intel_tc_port_reset_mode() argument
1439 struct drm_i915_private *i915 = tc_to_i915(tc); in intel_tc_port_reset_mode()
1440 struct intel_digital_port *dig_port = tc->dig_port; in intel_tc_port_reset_mode()
1441 enum tc_port_mode old_tc_mode = tc->mode; in intel_tc_port_reset_mode()
1453 tc_phy_disconnect(tc); in intel_tc_port_reset_mode()
1455 tc_phy_connect(tc, required_lanes); in intel_tc_port_reset_mode()
1457 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", in intel_tc_port_reset_mode()
1458 tc->port_name, in intel_tc_port_reset_mode()
1460 tc_port_mode_name(tc->mode)); in intel_tc_port_reset_mode()
1463 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc) in intel_tc_port_needs_reset() argument
1465 return tc_phy_get_target_mode(tc) != tc->mode; in intel_tc_port_needs_reset()
1468 static void intel_tc_port_update_mode(struct intel_tc_port *tc, in intel_tc_port_update_mode() argument
1472 intel_tc_port_needs_reset(tc)) in intel_tc_port_update_mode()
1473 intel_tc_port_reset_mode(tc, required_lanes, force_disconnect); in intel_tc_port_update_mode()
1476 static void __intel_tc_port_get_link(struct intel_tc_port *tc) in __intel_tc_port_get_link() argument
1478 tc->link_refcount++; in __intel_tc_port_get_link()
1481 static void __intel_tc_port_put_link(struct intel_tc_port *tc) in __intel_tc_port_put_link() argument
1483 tc->link_refcount--; in __intel_tc_port_put_link()
1486 static bool tc_port_is_enabled(struct intel_tc_port *tc) in tc_port_is_enabled() argument
1488 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_is_enabled()
1489 struct intel_digital_port *dig_port = tc->dig_port; in tc_port_is_enabled()
1491 assert_tc_port_power_enabled(tc); in tc_port_is_enabled()
1507 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_init_mode() local
1510 mutex_lock(&tc->lock); in intel_tc_port_init_mode()
1512 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); in intel_tc_port_init_mode()
1513 drm_WARN_ON(&i915->drm, tc->lock_wakeref); in intel_tc_port_init_mode()
1514 drm_WARN_ON(&i915->drm, tc->link_refcount); in intel_tc_port_init_mode()
1516 tc_phy_get_hw_state(tc); in intel_tc_port_init_mode()
1521 tc->init_mode = tc->mode; in intel_tc_port_init_mode()
1534 if (!tc_port_is_enabled(tc)) { in intel_tc_port_init_mode()
1536 } else if (tc->mode == TC_PORT_DISCONNECTED) { in intel_tc_port_init_mode()
1537 drm_WARN_ON(&i915->drm, !tc->legacy_port); in intel_tc_port_init_mode()
1540 tc->port_name); in intel_tc_port_init_mode()
1545 intel_tc_port_update_mode(tc, 1, false); in intel_tc_port_init_mode()
1547 /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */ in intel_tc_port_init_mode()
1548 __intel_tc_port_get_link(tc); in intel_tc_port_init_mode()
1550 mutex_unlock(&tc->lock); in intel_tc_port_init_mode()
1553 static bool tc_port_has_active_links(struct intel_tc_port *tc, in tc_port_has_active_links() argument
1556 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_has_active_links()
1557 struct intel_digital_port *dig_port = tc->dig_port; in tc_port_has_active_links()
1569 if (active_links && !tc_phy_is_connected(tc, pll_type)) in tc_port_has_active_links()
1572 tc->port_name, active_links); in tc_port_has_active_links()
1593 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_sanitize_mode() local
1595 mutex_lock(&tc->lock); in intel_tc_port_sanitize_mode()
1597 drm_WARN_ON(&i915->drm, tc->link_refcount != 1); in intel_tc_port_sanitize_mode()
1598 if (!tc_port_has_active_links(tc, crtc_state)) { in intel_tc_port_sanitize_mode()
1605 if (tc->init_mode != TC_PORT_TBT_ALT && in intel_tc_port_sanitize_mode()
1606 tc->init_mode != TC_PORT_DISCONNECTED) in intel_tc_port_sanitize_mode()
1609 tc->port_name, in intel_tc_port_sanitize_mode()
1610 tc_port_mode_name(tc->init_mode)); in intel_tc_port_sanitize_mode()
1611 tc_phy_disconnect(tc); in intel_tc_port_sanitize_mode()
1612 __intel_tc_port_put_link(tc); in intel_tc_port_sanitize_mode()
1616 tc->port_name, in intel_tc_port_sanitize_mode()
1617 tc_port_mode_name(tc->mode)); in intel_tc_port_sanitize_mode()
1619 mutex_unlock(&tc->lock); in intel_tc_port_sanitize_mode()
1636 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_connected() local
1641 if (tc->mode != TC_PORT_DISCONNECTED) in intel_tc_port_connected()
1642 mask = BIT(tc->mode); in intel_tc_port_connected()
1644 return tc_phy_hpd_live_status(tc) & mask; in intel_tc_port_connected()
1647 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) in __intel_tc_port_link_needs_reset() argument
1651 mutex_lock(&tc->lock); in __intel_tc_port_link_needs_reset()
1653 ret = tc->link_refcount && in __intel_tc_port_link_needs_reset()
1654 tc->mode == TC_PORT_DP_ALT && in __intel_tc_port_link_needs_reset()
1655 intel_tc_port_needs_reset(tc); in __intel_tc_port_link_needs_reset()
1657 mutex_unlock(&tc->lock); in __intel_tc_port_link_needs_reset()
1670 static int reset_link_commit(struct intel_tc_port *tc, in reset_link_commit() argument
1674 struct drm_i915_private *i915 = tc_to_i915(tc); in reset_link_commit()
1675 struct intel_digital_port *dig_port = tc->dig_port; in reset_link_commit()
1702 if (!__intel_tc_port_link_needs_reset(tc)) in reset_link_commit()
1708 static int reset_link(struct intel_tc_port *tc) in reset_link() argument
1710 struct drm_i915_private *i915 = tc_to_i915(tc); in reset_link()
1724 ret = reset_link_commit(tc, state, &ctx); in reset_link()
1733 struct intel_tc_port *tc = in intel_tc_port_link_reset_work() local
1735 struct drm_i915_private *i915 = tc_to_i915(tc); in intel_tc_port_link_reset_work()
1738 if (!__intel_tc_port_link_needs_reset(tc)) in intel_tc_port_link_reset_work()
1745 tc->port_name); in intel_tc_port_link_reset_work()
1746 ret = reset_link(tc); in intel_tc_port_link_reset_work()
1766 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_link_cancel_reset_work() local
1771 cancel_delayed_work(&tc->link_reset_work); in intel_tc_port_link_cancel_reset_work()
1774 static void __intel_tc_port_lock(struct intel_tc_port *tc, in __intel_tc_port_lock() argument
1777 struct drm_i915_private *i915 = tc_to_i915(tc); in __intel_tc_port_lock()
1779 mutex_lock(&tc->lock); in __intel_tc_port_lock()
1781 cancel_delayed_work(&tc->disconnect_phy_work); in __intel_tc_port_lock()
1783 if (!tc->link_refcount) in __intel_tc_port_lock()
1784 intel_tc_port_update_mode(tc, required_lanes, in __intel_tc_port_lock()
1787 drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED); in __intel_tc_port_lock()
1788 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT && in __intel_tc_port_lock()
1789 !tc_phy_is_owned(tc)); in __intel_tc_port_lock()
1804 struct intel_tc_port *tc = in intel_tc_port_disconnect_phy_work() local
1807 mutex_lock(&tc->lock); in intel_tc_port_disconnect_phy_work()
1809 if (!tc->link_refcount) in intel_tc_port_disconnect_phy_work()
1810 intel_tc_port_update_mode(tc, 1, true); in intel_tc_port_disconnect_phy_work()
1812 mutex_unlock(&tc->lock); in intel_tc_port_disconnect_phy_work()
1828 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_suspend() local
1830 cancel_delayed_work_sync(&tc->link_reset_work); in intel_tc_port_suspend()
1836 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_unlock() local
1838 if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED) in intel_tc_port_unlock()
1839 queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work, in intel_tc_port_unlock()
1842 mutex_unlock(&tc->lock); in intel_tc_port_unlock()
1847 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_ref_held() local
1849 return mutex_is_locked(&tc->lock) || in intel_tc_port_ref_held()
1850 tc->link_refcount; in intel_tc_port_ref_held()
1856 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_get_link() local
1858 __intel_tc_port_lock(tc, required_lanes); in intel_tc_port_get_link()
1859 __intel_tc_port_get_link(tc); in intel_tc_port_get_link()
1865 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_put_link() local
1868 __intel_tc_port_put_link(tc); in intel_tc_port_put_link()
1883 struct intel_tc_port *tc; in intel_tc_port_init() local
1890 tc = kzalloc(sizeof(*tc), GFP_KERNEL); in intel_tc_port_init()
1891 if (!tc) in intel_tc_port_init()
1894 dig_port->tc = tc; in intel_tc_port_init()
1895 tc->dig_port = dig_port; in intel_tc_port_init()
1898 tc->phy_ops = &xelpdp_tc_phy_ops; in intel_tc_port_init()
1900 tc->phy_ops = &adlp_tc_phy_ops; in intel_tc_port_init()
1902 tc->phy_ops = &tgl_tc_phy_ops; in intel_tc_port_init()
1904 tc->phy_ops = &icl_tc_phy_ops; in intel_tc_port_init()
1906 tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port), in intel_tc_port_init()
1908 if (!tc->port_name) { in intel_tc_port_init()
1909 kfree(tc); in intel_tc_port_init()
1913 mutex_init(&tc->lock); in intel_tc_port_init()
1915 INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work); in intel_tc_port_init()
1916 INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work); in intel_tc_port_init()
1917 tc->legacy_port = is_legacy; in intel_tc_port_init()
1918 tc->mode = TC_PORT_DISCONNECTED; in intel_tc_port_init()
1919 tc->link_refcount = 0; in intel_tc_port_init()
1921 tc_phy_init(tc); in intel_tc_port_init()
1932 kfree(dig_port->tc->port_name); in intel_tc_port_cleanup()
1933 kfree(dig_port->tc); in intel_tc_port_cleanup()
1934 dig_port->tc = NULL; in intel_tc_port_cleanup()