1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4
5 #include "dm_services.h"
6 #include "basics/dc_common.h"
7 #include "dm_helpers.h"
8 #include "core_types.h"
9 #include "resource.h"
10 #include "dccg.h"
11 #include "dce/dce_hwseq.h"
12 #include "reg_helper.h"
13 #include "abm.h"
14 #include "hubp.h"
15 #include "dchubbub.h"
16 #include "timing_generator.h"
17 #include "opp.h"
18 #include "ipp.h"
19 #include "mpc.h"
20 #include "mcif_wb.h"
21 #include "dc_dmub_srv.h"
22 #include "link_hwss.h"
23 #include "dpcd_defs.h"
24 #include "clk_mgr.h"
25 #include "dsc.h"
26 #include "link.h"
27
28 #include "dce/dmub_hw_lock_mgr.h"
29 #include "dcn10/dcn10_cm_common.h"
30 #include "dcn20/dcn20_optc.h"
31 #include "dcn30/dcn30_cm_common.h"
32 #include "dcn32/dcn32_hwseq.h"
33 #include "dcn401_hwseq.h"
34 #include "dcn401/dcn401_resource.h"
35 #include "dc_state_priv.h"
36 #include "link_enc_cfg.h"
37
38 #define DC_LOGGER_INIT(logger)
39
40 #define CTX \
41 hws->ctx
42 #define REG(reg)\
43 hws->regs->reg
44 #define DC_LOGGER \
45 dc->ctx->logger
46
47
48 #undef FN
49 #define FN(reg_name, field_name) \
50 hws->shifts->field_name, hws->masks->field_name
51
dcn401_initialize_min_clocks(struct dc * dc)52 static void dcn401_initialize_min_clocks(struct dc *dc)
53 {
54 struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
55
56 clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
57 clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
58 clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
59 clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
60 clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
61 if (dc->debug.disable_boot_optimizations) {
62 clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
63 } else {
64 /* Even though DPG_EN = 1 for the connected display, it still requires the
65 * correct timing so we cannot set DISPCLK to min freq or it could cause
66 * audio corruption. Read current DISPCLK from DENTIST and request the same
67 * freq to ensure that the timing is valid and unchanged.
68 */
69 clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
70 }
71 clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
72 clocks->fclk_p_state_change_support = true;
73 clocks->p_state_change_support = true;
74
75 dc->clk_mgr->funcs->update_clocks(
76 dc->clk_mgr,
77 dc->current_state,
78 true);
79 }
80
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)81 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
82 {
83 unsigned int i = 0;
84 struct mpc_grph_gamut_adjustment mpc_adjust;
85 unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
86 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
87
88 //For now assert if location is not pre-blend
89 if (pipe_ctx->plane_state)
90 ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
91
92 // program MPCC_MCM_FIRST_GAMUT_REMAP
93 memset(&mpc_adjust, 0, sizeof(mpc_adjust));
94 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
95 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
96
97 if (pipe_ctx->plane_state &&
98 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
99 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
100 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
101 mpc_adjust.temperature_matrix[i] =
102 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
103 }
104
105 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
106
107 // program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
108 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
109 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
110
111 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
112
113 // program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
114 memset(&mpc_adjust, 0, sizeof(mpc_adjust));
115 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
116 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
117
118 if (pipe_ctx->top_pipe == NULL) {
119 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
120 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
121 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
122 mpc_adjust.temperature_matrix[i] =
123 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
124 }
125 }
126
127 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
128 }
129
dcn401_init_hw(struct dc * dc)130 void dcn401_init_hw(struct dc *dc)
131 {
132 struct abm **abms = dc->res_pool->multiple_abms;
133 struct dce_hwseq *hws = dc->hwseq;
134 struct dc_bios *dcb = dc->ctx->dc_bios;
135 struct resource_pool *res_pool = dc->res_pool;
136 int i;
137 int edp_num;
138 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
139 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
140 int current_dchub_ref_freq = 0;
141
142 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
143 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
144
145 // mark dcmode limits present if any clock has distinct AC and DC values from SMU
146 dc->caps.dcmode_power_limits_present =
147 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
148 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
149 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
150 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
151 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
152 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
153 }
154
155 // Initialize the dccg
156 if (res_pool->dccg->funcs->dccg_init)
157 res_pool->dccg->funcs->dccg_init(res_pool->dccg);
158
159 // Disable DMUB Initialization until IPS state programming is finalized
160 //if (!dcb->funcs->is_accelerated_mode(dcb)) {
161 // hws->funcs.bios_golden_init(dc);
162 //}
163
164 // Set default OPTC memory power states
165 if (dc->debug.enable_mem_low_power.bits.optc) {
166 // Shutdown when unassigned and light sleep in VBLANK
167 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
168 }
169
170 if (dc->debug.enable_mem_low_power.bits.vga) {
171 // Power down VGA memory
172 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
173 }
174
175 if (dc->ctx->dc_bios->fw_info_valid) {
176 res_pool->ref_clocks.xtalin_clock_inKhz =
177 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
178
179 if (res_pool->hubbub) {
180 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
181 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
182 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
183
184 current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
185
186 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
187 res_pool->ref_clocks.dccg_ref_clock_inKhz,
188 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
189 } else {
190 // Not all ASICs have DCCG sw component
191 res_pool->ref_clocks.dccg_ref_clock_inKhz =
192 res_pool->ref_clocks.xtalin_clock_inKhz;
193 res_pool->ref_clocks.dchub_ref_clock_inKhz =
194 res_pool->ref_clocks.xtalin_clock_inKhz;
195 }
196 } else
197 ASSERT_CRITICAL(false);
198
199 for (i = 0; i < dc->link_count; i++) {
200 /* Power up AND update implementation according to the
201 * required signal (which may be different from the
202 * default signal on connector).
203 */
204 struct dc_link *link = dc->links[i];
205
206 link->link_enc->funcs->hw_init(link->link_enc);
207
208 /* Check for enabled DIG to identify enabled display */
209 if (link->link_enc->funcs->is_dig_enabled &&
210 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
211 link->link_status.link_active = true;
212 link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
213 if (link->link_enc->funcs->fec_is_active &&
214 link->link_enc->funcs->fec_is_active(link->link_enc))
215 link->fec_state = dc_link_fec_enabled;
216 }
217 }
218
219 /* enable_power_gating_plane before dsc_pg_control because
220 * FORCEON = 1 with hw default value on bootup, resume from s3
221 */
222 if (hws->funcs.enable_power_gating_plane)
223 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
224
225 /* we want to turn off all dp displays before doing detection */
226 dc->link_srv->blank_all_dp_displays(dc);
227
228 /* If taking control over from VBIOS, we may want to optimize our first
229 * mode set, so we need to skip powering down pipes until we know which
230 * pipes we want to use.
231 * Otherwise, if taking control is not possible, we need to power
232 * everything down.
233 */
234 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
235 /* Disable boot optimizations means power down everything including PHY, DIG,
236 * and OTG (i.e. the boot is not optimized because we do a full power down).
237 */
238 if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
239 dc->hwss.enable_accelerated_mode(dc, dc->current_state);
240 else
241 hws->funcs.init_pipes(dc, dc->current_state);
242
243 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
244 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
245 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
246
247 dcn401_initialize_min_clocks(dc);
248
249 /* On HW init, allow idle optimizations after pipes have been turned off.
250 *
251 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
252 * is reset (i.e. not in idle at the time hw init is called), but software state
253 * still has idle_optimizations = true, so we must disable idle optimizations first
254 * (i.e. set false), then re-enable (set true).
255 */
256 dc_allow_idle_optimizations(dc, false);
257 dc_allow_idle_optimizations(dc, true);
258 }
259
260 /* In headless boot cases, DIG may be turned
261 * on which causes HW/SW discrepancies.
262 * To avoid this, power down hardware on boot
263 * if DIG is turned on and seamless boot not enabled
264 */
265 if (!dc->config.seamless_boot_edp_requested) {
266 struct dc_link *edp_links[MAX_NUM_EDP];
267 struct dc_link *edp_link;
268
269 dc_get_edp_links(dc, edp_links, &edp_num);
270 if (edp_num) {
271 for (i = 0; i < edp_num; i++) {
272 edp_link = edp_links[i];
273 if (edp_link->link_enc->funcs->is_dig_enabled &&
274 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
275 dc->hwss.edp_backlight_control &&
276 hws->funcs.power_down &&
277 dc->hwss.edp_power_control) {
278 dc->hwss.edp_backlight_control(edp_link, false);
279 hws->funcs.power_down(dc);
280 dc->hwss.edp_power_control(edp_link, false);
281 }
282 }
283 } else {
284 for (i = 0; i < dc->link_count; i++) {
285 struct dc_link *link = dc->links[i];
286
287 if (link->link_enc->funcs->is_dig_enabled &&
288 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
289 hws->funcs.power_down) {
290 hws->funcs.power_down(dc);
291 break;
292 }
293
294 }
295 }
296 }
297
298 for (i = 0; i < res_pool->audio_count; i++) {
299 struct audio *audio = res_pool->audios[i];
300
301 audio->funcs->hw_init(audio);
302 }
303
304 for (i = 0; i < dc->link_count; i++) {
305 struct dc_link *link = dc->links[i];
306
307 if (link->panel_cntl) {
308 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
309 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
310 }
311 }
312
313 for (i = 0; i < dc->res_pool->pipe_count; i++) {
314 if (abms[i] != NULL && abms[i]->funcs != NULL)
315 abms[i]->funcs->abm_init(abms[i], backlight, user_level);
316 }
317
318 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
319 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
320
321 if (!dc->debug.disable_clock_gate) {
322 /* enable all DCN clock gating */
323 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
324
325 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
326
327 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
328 }
329
330 dcn401_setup_hpo_hw_control(hws, true);
331
332 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
333 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
334
335 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
336 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
337
338 if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
339 dc->res_pool->hubbub->funcs->force_pstate_change_control(
340 dc->res_pool->hubbub, false, false);
341
342 if (dc->res_pool->hubbub->funcs->init_crb)
343 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
344
345 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
346 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
347
348 // Get DMCUB capabilities
349 if (dc->ctx->dmub_srv) {
350 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
351 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
352 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
353 dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
354 dc->debug.fams2_config.bits.enable &=
355 dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
356 if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
357 || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
358 /* update bounding box if FAMS2 disabled, or if dchub clk has changed */
359 if (dc->clk_mgr)
360 dc->res_pool->funcs->update_bw_bounding_box(dc,
361 dc->clk_mgr->bw_params);
362 }
363 }
364 }
365
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)366 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
367 enum MCM_LUT_XABLE *shaper_xable,
368 enum MCM_LUT_XABLE *lut3d_xable,
369 enum MCM_LUT_XABLE *lut1d_xable)
370 {
371 enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
372 bool lut1d_enable = false;
373 struct mpc *mpc = dc->res_pool->mpc;
374 int mpcc_id = pipe_ctx->plane_res.hubp->inst;
375
376 if (!pipe_ctx->plane_state)
377 return;
378 shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
379 lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
380 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
381 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
382
383 *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
384
385 switch (shaper_3dlut_setting) {
386 case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
387 *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
388 break;
389 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
390 *lut3d_xable = MCM_LUT_DISABLE;
391 *shaper_xable = MCM_LUT_ENABLE;
392 break;
393 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
394 *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
395 break;
396 }
397 }
398
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)399 void dcn401_populate_mcm_luts(struct dc *dc,
400 struct pipe_ctx *pipe_ctx,
401 struct dc_cm2_func_luts mcm_luts,
402 bool lut_bank_a)
403 {
404 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
405 struct hubp *hubp = pipe_ctx->plane_res.hubp;
406 int mpcc_id = hubp->inst;
407 struct mpc *mpc = dc->res_pool->mpc;
408 union mcm_lut_params m_lut_params;
409 enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
410 enum hubp_3dlut_fl_format format;
411 enum hubp_3dlut_fl_mode mode;
412 enum hubp_3dlut_fl_width width;
413 enum hubp_3dlut_fl_addressing_mode addr_mode;
414 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
415 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
416 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
417 enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
418 enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
419 enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
420 bool is_17x17x17 = true;
421 bool rval;
422
423 dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
424
425 /* 1D LUT */
426 if (mcm_luts.lut1d_func) {
427 memset(&m_lut_params, 0, sizeof(m_lut_params));
428 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
429 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
430 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
431 rval = cm3_helper_translate_curve_to_hw_format(
432 mcm_luts.lut1d_func,
433 &dpp_base->regamma_params, false);
434 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
435 }
436 if (m_lut_params.pwl) {
437 if (mpc->funcs->populate_lut)
438 mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
439 }
440 if (mpc->funcs->program_lut_mode)
441 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
442 }
443
444 /* Shaper */
445 if (mcm_luts.shaper) {
446 memset(&m_lut_params, 0, sizeof(m_lut_params));
447 if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
448 m_lut_params.pwl = &mcm_luts.shaper->pwl;
449 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
450 ASSERT(false);
451 rval = cm3_helper_translate_curve_to_hw_format(
452 mcm_luts.shaper,
453 &dpp_base->regamma_params, true);
454 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
455 }
456 if (m_lut_params.pwl) {
457 if (mpc->funcs->populate_lut)
458 mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
459 }
460 if (mpc->funcs->program_lut_mode)
461 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
462 }
463
464 /* 3DLUT */
465 switch (lut3d_src) {
466 case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
467 memset(&m_lut_params, 0, sizeof(m_lut_params));
468 if (hubp->funcs->hubp_enable_3dlut_fl)
469 hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
470 if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
471 m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
472 if (mpc->funcs->populate_lut)
473 mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
474 if (mpc->funcs->program_lut_mode)
475 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
476 mpcc_id);
477 }
478 break;
479 case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
480
481 if (mpc->funcs->program_lut_read_write_control)
482 mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
483 if (mpc->funcs->program_lut_mode)
484 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
485 if (mpc->funcs->program_3dlut_size)
486 mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
487 if (hubp->funcs->hubp_program_3dlut_fl_addr)
488 hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
489 switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
490 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
491 mode = hubp_3dlut_fl_mode_native_1;
492 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
493 break;
494 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
495 mode = hubp_3dlut_fl_mode_native_2;
496 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
497 break;
498 case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
499 mode = hubp_3dlut_fl_mode_transform;
500 addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
501 break;
502 default:
503 mode = hubp_3dlut_fl_mode_disable;
504 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
505 break;
506 }
507 if (hubp->funcs->hubp_program_3dlut_fl_mode)
508 hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
509
510 if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
511 hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
512
513 switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
514 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
515 default:
516 format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
517 break;
518 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
519 format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
520 break;
521 case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
522 format = hubp_3dlut_fl_format_float_fp1_5_10;
523 break;
524 }
525 if (hubp->funcs->hubp_program_3dlut_fl_format)
526 hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
527 if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
528 hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
529 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
530 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
531
532 switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
533 case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
534 default:
535 crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
536 crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
537 crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
538 break;
539 }
540
541 if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
542 hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
543 crossbar_bit_slice_y_g,
544 crossbar_bit_slice_cb_b,
545 crossbar_bit_slice_cr_r);
546
547 switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
548 case DC_CM2_GPU_MEM_SIZE_171717:
549 default:
550 width = hubp_3dlut_fl_width_17;
551 break;
552 case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
553 width = hubp_3dlut_fl_width_transformed;
554 break;
555 }
556 if (hubp->funcs->hubp_program_3dlut_fl_width)
557 hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
558 if (mpc->funcs->update_3dlut_fast_load_select)
559 mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
560
561 if (hubp->funcs->hubp_enable_3dlut_fl)
562 hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
563 else {
564 if (mpc->funcs->program_lut_mode) {
565 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
566 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
567 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
568 }
569 }
570 break;
571
572 }
573 }
574
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)575 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
576 {
577 struct hubp *hubp = pipe_ctx->plane_res.hubp;
578
579 if (hubp->funcs->hubp_enable_3dlut_fl) {
580 hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
581 }
582 }
583
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)584 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
585 const struct dc_plane_state *plane_state)
586 {
587 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
588 int mpcc_id = pipe_ctx->plane_res.hubp->inst;
589 struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
590 struct mpc *mpc = dc->res_pool->mpc;
591 bool result;
592 const struct pwl_params *lut_params = NULL;
593 bool rval;
594
595 if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
596 dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
597 return true;
598 }
599
600 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
601 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
602 // 1D LUT
603 if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
604 lut_params = &plane_state->blend_tf.pwl;
605 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
606 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
607 &dpp_base->regamma_params, false);
608 lut_params = rval ? &dpp_base->regamma_params : NULL;
609 }
610 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
611 lut_params = NULL;
612
613 // Shaper
614 if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
615 lut_params = &plane_state->in_shaper_func.pwl;
616 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
617 // TODO: dpp_base replace
618 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
619 &dpp_base->shaper_params, true);
620 lut_params = rval ? &dpp_base->shaper_params : NULL;
621 }
622 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
623
624 // 3D
625 if (mpc->funcs->program_3dlut) {
626 if (plane_state->lut3d_func.state.bits.initialized == 1)
627 result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
628 else
629 result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
630 }
631
632 return result;
633 }
634
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)635 bool dcn401_set_output_transfer_func(struct dc *dc,
636 struct pipe_ctx *pipe_ctx,
637 const struct dc_stream_state *stream)
638 {
639 int mpcc_id = pipe_ctx->plane_res.hubp->inst;
640 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
641 const struct pwl_params *params = NULL;
642 bool ret = false;
643
644 /* program OGAM or 3DLUT only for the top pipe*/
645 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
646 /*program shaper and 3dlut in MPC*/
647 ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
648 if (ret == false && mpc->funcs->set_output_gamma) {
649 if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
650 params = &stream->out_transfer_func.pwl;
651 else if (pipe_ctx->stream->out_transfer_func.type ==
652 TF_TYPE_DISTRIBUTED_POINTS &&
653 cm3_helper_translate_curve_to_hw_format(
654 &stream->out_transfer_func,
655 &mpc->blender_params, false))
656 params = &mpc->blender_params;
657 /* there are no ROM LUTs in OUTGAM */
658 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
659 BREAK_TO_DEBUGGER();
660 }
661 }
662
663 if (mpc->funcs->set_output_gamma)
664 mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
665
666 return ret;
667 }
668
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)669 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
670 unsigned int *tmds_div)
671 {
672 struct dc_stream_state *stream = pipe_ctx->stream;
673
674 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
675 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
676 *tmds_div = PIXEL_RATE_DIV_BY_2;
677 else
678 *tmds_div = PIXEL_RATE_DIV_BY_4;
679 } else {
680 *tmds_div = PIXEL_RATE_DIV_BY_1;
681 }
682
683 if (*tmds_div == PIXEL_RATE_DIV_NA)
684 ASSERT(false);
685
686 }
687
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)688 static void enable_stream_timing_calc(
689 struct pipe_ctx *pipe_ctx,
690 struct dc_state *context,
691 struct dc *dc,
692 unsigned int *tmds_div,
693 int *opp_inst,
694 int *opp_cnt,
695 struct pipe_ctx *opp_heads[MAX_PIPES],
696 bool *manual_mode,
697 struct drr_params *params,
698 unsigned int *event_triggers)
699 {
700 struct dc_stream_state *stream = pipe_ctx->stream;
701 int i;
702
703 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
704 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
705
706 *opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
707 for (i = 0; i < *opp_cnt; i++)
708 opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
709
710 if (dc_is_tmds_signal(stream->signal)) {
711 stream->link->phy_state.symclk_ref_cnts.otg = 1;
712 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
713 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
714 else
715 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
716 }
717
718 params->vertical_total_min = stream->adjust.v_total_min;
719 params->vertical_total_max = stream->adjust.v_total_max;
720 params->vertical_total_mid = stream->adjust.v_total_mid;
721 params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
722
723 // DRR should set trigger event to monitor surface update event
724 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
725 *event_triggers = 0x80;
726 }
727
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)728 enum dc_status dcn401_enable_stream_timing(
729 struct pipe_ctx *pipe_ctx,
730 struct dc_state *context,
731 struct dc *dc)
732 {
733 struct dce_hwseq *hws = dc->hwseq;
734 struct dc_stream_state *stream = pipe_ctx->stream;
735 struct drr_params params = {0};
736 unsigned int event_triggers = 0;
737 int opp_cnt = 1;
738 int opp_inst[MAX_PIPES] = {0};
739 struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
740 struct dc_crtc_timing patched_crtc_timing = stream->timing;
741 bool manual_mode = false;
742 unsigned int tmds_div = PIXEL_RATE_DIV_NA;
743 unsigned int unused_div = PIXEL_RATE_DIV_NA;
744 int odm_slice_width;
745 int last_odm_slice_width;
746 int i;
747
748 if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
749 return DC_OK;
750
751 enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
752 &opp_cnt, opp_heads, &manual_mode, ¶ms, &event_triggers);
753
754 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
755 dc->res_pool->dccg->funcs->set_pixel_rate_div(
756 dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
757 tmds_div, unused_div);
758 }
759
760 /* TODO check if timing_changed, disable stream if timing changed */
761
762 if (opp_cnt > 1) {
763 odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
764 last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
765 pipe_ctx->stream_res.tg->funcs->set_odm_combine(
766 pipe_ctx->stream_res.tg,
767 opp_inst, opp_cnt,
768 odm_slice_width, last_odm_slice_width);
769 }
770
771 /* set DTBCLK_P */
772 if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
773 if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
774 dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
775 }
776 }
777
778 /* HW program guide assume display already disable
779 * by unplug sequence. OTG assume stop.
780 */
781 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
782
783 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
784 pipe_ctx->clock_source,
785 &pipe_ctx->stream_res.pix_clk_params,
786 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
787 &pipe_ctx->pll_settings)) {
788 BREAK_TO_DEBUGGER();
789 return DC_ERROR_UNEXPECTED;
790 }
791
792 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
793 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
794
795 /* if we are borrowing from hblank, h_addressable needs to be adjusted */
796 if (dc->debug.enable_hblank_borrow)
797 patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
798
799 pipe_ctx->stream_res.tg->funcs->program_timing(
800 pipe_ctx->stream_res.tg,
801 &patched_crtc_timing,
802 (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
803 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
804 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
805 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
806 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
807 pipe_ctx->stream->signal,
808 true);
809
810 for (i = 0; i < opp_cnt; i++) {
811 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
812 opp_heads[i]->stream_res.opp,
813 true);
814 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
815 opp_heads[i]->stream_res.opp,
816 stream->timing.pixel_encoding,
817 resource_is_pipe_type(opp_heads[i], OTG_MASTER));
818 }
819
820 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
821 pipe_ctx->stream_res.opp,
822 true);
823
824 hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
825
826 /* VTG is within DCHUB command block. DCFCLK is always on */
827 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
828 BREAK_TO_DEBUGGER();
829 return DC_ERROR_UNEXPECTED;
830 }
831
832 hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
833
834 if (pipe_ctx->stream_res.tg->funcs->set_drr)
835 pipe_ctx->stream_res.tg->funcs->set_drr(
836 pipe_ctx->stream_res.tg, ¶ms);
837
838 /* Event triggers and num frames initialized for DRR, but can be
839 * later updated for PSR use. Note DRR trigger events are generated
840 * regardless of whether num frames met.
841 */
842 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
843 pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
844 pipe_ctx->stream_res.tg, event_triggers, 2);
845
846 /* TODO program crtc source select for non-virtual signal*/
847 /* TODO program FMT */
848 /* TODO setup link_enc */
849 /* TODO set stream attributes */
850 /* TODO program audio */
851 /* TODO enable stream if timing changed */
852 /* TODO unblank stream if DP */
853
854 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
855 if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
856 pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
857 }
858
859 return DC_OK;
860 }
861
get_phyd32clk_src(struct dc_link * link)862 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
863 {
864 switch (link->link_enc->transmitter) {
865 case TRANSMITTER_UNIPHY_A:
866 return PHYD32CLKA;
867 case TRANSMITTER_UNIPHY_B:
868 return PHYD32CLKB;
869 case TRANSMITTER_UNIPHY_C:
870 return PHYD32CLKC;
871 case TRANSMITTER_UNIPHY_D:
872 return PHYD32CLKD;
873 case TRANSMITTER_UNIPHY_E:
874 return PHYD32CLKE;
875 default:
876 return PHYD32CLKA;
877 }
878 }
879
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)880 static void dcn401_enable_stream_calc(
881 struct pipe_ctx *pipe_ctx,
882 int *dp_hpo_inst,
883 enum phyd32clk_clock_source *phyd32clk,
884 unsigned int *tmds_div,
885 uint32_t *early_control)
886 {
887
888 struct dc *dc = pipe_ctx->stream->ctx->dc;
889 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
890 enum dc_lane_count lane_count =
891 pipe_ctx->stream->link->cur_link_settings.lane_count;
892 uint32_t active_total_with_borders;
893
894 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
895 *dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
896
897 *phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
898
899 if (dc_is_tmds_signal(pipe_ctx->stream->signal))
900 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
901 else
902 *tmds_div = PIXEL_RATE_DIV_BY_1;
903
904 /* enable early control to avoid corruption on DP monitor*/
905 active_total_with_borders =
906 timing->h_addressable
907 + timing->h_border_left
908 + timing->h_border_right;
909
910 if (lane_count != 0)
911 *early_control = active_total_with_borders % lane_count;
912
913 if (*early_control == 0)
914 *early_control = lane_count;
915
916 }
917
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)918 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
919 {
920 uint32_t early_control = 0;
921 struct timing_generator *tg = pipe_ctx->stream_res.tg;
922 struct dc_link *link = pipe_ctx->stream->link;
923 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
924 struct dc *dc = pipe_ctx->stream->ctx->dc;
925 struct dccg *dccg = dc->res_pool->dccg;
926 enum phyd32clk_clock_source phyd32clk;
927 int dp_hpo_inst = 0;
928 unsigned int tmds_div = PIXEL_RATE_DIV_NA;
929 unsigned int unused_div = PIXEL_RATE_DIV_NA;
930 struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
931 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
932
933 dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
934 &tmds_div, &early_control);
935
936 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
937 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
938 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
939 if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
940 dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
941 } else {
942 dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
943 }
944 } else {
945 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
946 link_enc->transmitter - TRANSMITTER_UNIPHY_A);
947 }
948 }
949
950 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
951 dc->res_pool->dccg->funcs->set_pixel_rate_div(
952 dc->res_pool->dccg,
953 pipe_ctx->stream_res.tg->inst,
954 tmds_div,
955 unused_div);
956 }
957
958 link_hwss->setup_stream_encoder(pipe_ctx);
959
960 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
961 if (dc->hwss.program_dmdata_engine)
962 dc->hwss.program_dmdata_engine(pipe_ctx);
963 }
964
965 dc->hwss.update_info_frame(pipe_ctx);
966
967 if (dc_is_dp_signal(pipe_ctx->stream->signal))
968 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
969
970 tg->funcs->set_early_control(tg, early_control);
971 }
972
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)973 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
974 {
975 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
976 }
977
dcn401_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)978 static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
979 {
980 struct pipe_ctx *test_pipe, *split_pipe;
981 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
982 struct rect r1 = scl_data->recout, r2, r2_half;
983 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
984 int cur_layer = pipe_ctx->plane_state->layer_index;
985
986 /**
987 * Disable the cursor if there's another pipe above this with a
988 * plane that contains this pipe's viewport to prevent double cursor
989 * and incorrect scaling artifacts.
990 */
991 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
992 test_pipe = test_pipe->top_pipe) {
993 // Skip invisible layer and pipe-split plane on same layer
994 if (!test_pipe->plane_state ||
995 !test_pipe->plane_state->visible ||
996 test_pipe->plane_state->layer_index == cur_layer)
997 continue;
998
999 r2 = test_pipe->plane_res.scl_data.recout;
1000 r2_r = r2.x + r2.width;
1001 r2_b = r2.y + r2.height;
1002
1003 /**
1004 * There is another half plane on same layer because of
1005 * pipe-split, merge together per same height.
1006 */
1007 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
1008 split_pipe = split_pipe->top_pipe)
1009 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
1010 r2_half = split_pipe->plane_res.scl_data.recout;
1011 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
1012 r2.width = r2.width + r2_half.width;
1013 r2_r = r2.x + r2.width;
1014 break;
1015 }
1016
1017 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
1018 return true;
1019 }
1020
1021 return false;
1022 }
1023
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)1024 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
1025 {
1026 if (cursor_width <= 128) {
1027 pos_cpy->x_hotspot /= 2;
1028 pos_cpy->x_hotspot += 1;
1029 } else {
1030 pos_cpy->x_hotspot /= 2;
1031 pos_cpy->x_hotspot += 2;
1032 }
1033 }
1034
disable_link_output_symclk_on_tx_off(struct dc_link * link,enum dp_link_encoding link_encoding)1035 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
1036 {
1037 struct dc *dc = link->ctx->dc;
1038 struct pipe_ctx *pipe_ctx = NULL;
1039 uint8_t i;
1040
1041 for (i = 0; i < MAX_PIPES; i++) {
1042 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1043 if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
1044 pipe_ctx->clock_source->funcs->program_pix_clk(
1045 pipe_ctx->clock_source,
1046 &pipe_ctx->stream_res.pix_clk_params,
1047 link_encoding,
1048 &pipe_ctx->pll_settings);
1049 break;
1050 }
1051 }
1052 }
1053
dcn401_disable_link_output(struct dc_link * link,const struct link_resource * link_res,enum signal_type signal)1054 void dcn401_disable_link_output(struct dc_link *link,
1055 const struct link_resource *link_res,
1056 enum signal_type signal)
1057 {
1058 struct dc *dc = link->ctx->dc;
1059 const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
1060 struct dmcu *dmcu = dc->res_pool->dmcu;
1061
1062 if (signal == SIGNAL_TYPE_EDP &&
1063 link->dc->hwss.edp_backlight_control &&
1064 !link->skip_implict_edp_power_control)
1065 link->dc->hwss.edp_backlight_control(link, false);
1066 else if (dmcu != NULL && dmcu->funcs->lock_phy)
1067 dmcu->funcs->lock_phy(dmcu);
1068
1069 if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
1070 disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
1071 link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1072 } else {
1073 link_hwss->disable_link_output(link, link_res, signal);
1074 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1075 }
1076
1077 if (signal == SIGNAL_TYPE_EDP &&
1078 link->dc->hwss.edp_backlight_control &&
1079 !link->skip_implict_edp_power_control)
1080 link->dc->hwss.edp_power_control(link, false);
1081 else if (dmcu != NULL && dmcu->funcs->lock_phy)
1082 dmcu->funcs->unlock_phy(dmcu);
1083
1084 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
1085 }
1086
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1087 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1088 {
1089 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1090 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1091 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1092 struct dc_cursor_mi_param param = {
1093 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1094 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1095 .viewport = pipe_ctx->plane_res.scl_data.viewport,
1096 .recout = pipe_ctx->plane_res.scl_data.recout,
1097 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1098 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1099 .rotation = pipe_ctx->plane_state->rotation,
1100 .mirror = pipe_ctx->plane_state->horizontal_mirror,
1101 .stream = pipe_ctx->stream
1102 };
1103 struct rect odm_slice_src = { 0 };
1104 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1105 (pipe_ctx->prev_odm_pipe != NULL);
1106 int prev_odm_width = 0;
1107 struct pipe_ctx *prev_odm_pipe = NULL;
1108 bool mpc_combine_on = false;
1109 int bottom_pipe_x_pos = 0;
1110
1111 int x_pos = pos_cpy.x;
1112 int y_pos = pos_cpy.y;
1113 int recout_x_pos = 0;
1114 int recout_y_pos = 0;
1115
1116 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1117 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1118 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1119 mpc_combine_on = true;
1120 }
1121 }
1122
1123 /* DCN4 moved cursor composition after Scaler, so in HW it is in
1124 * recout space and for HW Cursor position programming need to
1125 * translate to recout space.
1126 *
1127 * Cursor X and Y position programmed into HW can't be negative,
1128 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1129 * position that goes into HW X and Y coordinates while HW Hot spot
1130 * X and Y coordinates are length relative to the cursor top left
1131 * corner, hotspot must be smaller than the cursor size.
1132 *
1133 * DMs/DC interface for Cursor position is in stream->src space, and
1134 * DMs supposed to transform Cursor coordinates to stream->src space,
1135 * then here we need to translate Cursor coordinates to stream->dst
1136 * space, as now in HW, Cursor coordinates are in per pipe recout
1137 * space, and for the given pipe valid coordinates are only in range
1138 * from 0,0 - recout width, recout height space.
1139 * If certain pipe combining is in place, need to further adjust per
1140 * pipe to make sure each pipe enabling cursor on its part of the
1141 * screen.
1142 */
1143 x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1144 pipe_ctx->stream->src.width;
1145 y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1146 pipe_ctx->stream->src.height;
1147
1148 /* If the cursor's source viewport is clipped then we need to
1149 * translate the cursor to appear in the correct position on
1150 * the screen.
1151 *
1152 * This translation isn't affected by scaling so it needs to be
1153 * done *after* we adjust the position for the scale factor.
1154 *
1155 * This is only done by opt-in for now since there are still
1156 * some usecases like tiled display that might enable the
1157 * cursor on both streams while expecting dc to clip it.
1158 */
1159 if (pos_cpy.translate_by_source) {
1160 x_pos += pipe_ctx->plane_state->src_rect.x;
1161 y_pos += pipe_ctx->plane_state->src_rect.y;
1162 }
1163
1164 /* Adjust for ODM Combine
1165 * next/prev_odm_offset is to account for scaled modes that have underscan
1166 */
1167 if (odm_combine_on) {
1168 prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1169
1170 while (prev_odm_pipe != NULL) {
1171 odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1172 prev_odm_width += odm_slice_src.width;
1173 prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1174 }
1175
1176 x_pos -= (prev_odm_width);
1177 }
1178
1179 /* If the position is negative then we need to add to the hotspot
1180 * to fix cursor size between ODM slices
1181 */
1182
1183 if (x_pos < 0) {
1184 pos_cpy.x_hotspot -= x_pos;
1185 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1186 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1187 x_pos = 0;
1188 }
1189
1190 if (y_pos < 0) {
1191 pos_cpy.y_hotspot -= y_pos;
1192 y_pos = 0;
1193 }
1194
1195 /* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1196 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1197 */
1198 if (mpc_combine_on &&
1199 pipe_ctx->top_pipe &&
1200 (pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1201
1202 bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1203 if (bottom_pipe_x_pos < 0) {
1204 x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1205 pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1206 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1207 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1208 }
1209 }
1210
1211 pos_cpy.x = (uint32_t)x_pos;
1212 pos_cpy.y = (uint32_t)y_pos;
1213
1214 if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
1215 pos_cpy.enable = false;
1216
1217 x_pos = pos_cpy.x - param.recout.x;
1218 y_pos = pos_cpy.y - param.recout.y;
1219
1220 recout_x_pos = x_pos - pos_cpy.x_hotspot;
1221 recout_y_pos = y_pos - pos_cpy.y_hotspot;
1222
1223 if (recout_x_pos >= (int)param.recout.width)
1224 pos_cpy.enable = false; /* not visible beyond right edge*/
1225
1226 if (recout_y_pos >= (int)param.recout.height)
1227 pos_cpy.enable = false; /* not visible beyond bottom edge*/
1228
1229 if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1230 pos_cpy.enable = false; /* not visible beyond left edge*/
1231
1232 if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1233 pos_cpy.enable = false; /* not visible beyond top edge*/
1234
1235 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
1236 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
1237 }
1238
dcn401_check_no_memory_request_for_cab(struct dc * dc)1239 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1240 {
1241 int i;
1242
1243 /* First, check no-memory-request case */
1244 for (i = 0; i < dc->current_state->stream_count; i++) {
1245 if ((dc->current_state->stream_status[i].plane_count) &&
1246 (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1247 /* Fail eligibility on a visible stream */
1248 return false;
1249 }
1250
1251 return true;
1252 }
1253
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1254 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1255 {
1256 int i;
1257 uint8_t num_ways = 0;
1258 uint32_t mall_ss_size_bytes = 0;
1259
1260 mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1261 // TODO add additional logic for PSR active stream exclusion optimization
1262 // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1263
1264 // Include cursor size for CAB allocation
1265 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1266 struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1267
1268 if (!pipe->stream || !pipe->plane_state)
1269 continue;
1270
1271 mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1272 }
1273
1274 // Convert number of cache lines required to number of ways
1275 if (dc->debug.force_mall_ss_num_ways > 0)
1276 num_ways = dc->debug.force_mall_ss_num_ways;
1277 else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1278 num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1279 else
1280 num_ways = 0;
1281
1282 return num_ways;
1283 }
1284
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1285 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1286 {
1287 union dmub_rb_cmd cmd;
1288 uint8_t ways, i;
1289 int j;
1290 bool mall_ss_unsupported = false;
1291 struct dc_plane_state *plane = NULL;
1292
1293 if (!dc->ctx->dmub_srv || !dc->current_state)
1294 return false;
1295
1296 for (i = 0; i < dc->current_state->stream_count; i++) {
1297 /* MALL SS messaging is not supported with PSR at this time */
1298 if (dc->current_state->streams[i] != NULL &&
1299 dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1300 DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1301 return false;
1302 }
1303 }
1304
1305 memset(&cmd, 0, sizeof(cmd));
1306 cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1307 cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1308
1309 if (enable) {
1310 if (dcn401_check_no_memory_request_for_cab(dc)) {
1311 /* 1. Check no memory request case for CAB.
1312 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1313 */
1314 DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1315 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1316 } else {
1317 /* 2. Check if all surfaces can fit in CAB.
1318 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1319 * and configure HUBP's to fetch from MALL
1320 */
1321 ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1322
1323 /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1324 * or TMZ surface, don't try to enter MALL.
1325 */
1326 for (i = 0; i < dc->current_state->stream_count; i++) {
1327 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1328 plane = dc->current_state->stream_status[i].plane_states[j];
1329
1330 if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1331 plane->address.tmz_surface) {
1332 mall_ss_unsupported = true;
1333 break;
1334 }
1335 }
1336 if (mall_ss_unsupported)
1337 break;
1338 }
1339 if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1340 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1341 cmd.cab.cab_alloc_ways = ways;
1342 DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1343 } else {
1344 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1345 DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1346 }
1347 }
1348 } else {
1349 /* Disable CAB */
1350 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1351 DC_LOG_MALL("idle optimization disabled\n");
1352 }
1353
1354 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1355
1356 return true;
1357 }
1358
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1359 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1360 const struct pipe_ctx *top_pipe)
1361 {
1362 bool is_wait_needed = false;
1363 const struct pipe_ctx *pipe_ctx = top_pipe;
1364
1365 /* check if any surfaces are updating address while using flip immediate and dcc */
1366 while (pipe_ctx != NULL) {
1367 if (pipe_ctx->plane_state &&
1368 pipe_ctx->plane_state->dcc.enable &&
1369 pipe_ctx->plane_state->flip_immediate &&
1370 pipe_ctx->plane_state->update_flags.bits.addr_update) {
1371 is_wait_needed = true;
1372 break;
1373 }
1374
1375 /* check next pipe */
1376 pipe_ctx = pipe_ctx->bottom_pipe;
1377 }
1378
1379 if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1380 udelay(dc->debug.dcc_meta_propagation_delay_us);
1381 }
1382 }
1383
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1384 void dcn401_prepare_bandwidth(struct dc *dc,
1385 struct dc_state *context)
1386 {
1387 struct hubbub *hubbub = dc->res_pool->hubbub;
1388 bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1389 unsigned int compbuf_size = 0;
1390
1391 /* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1392 if (p_state_change_support) {
1393 dc->optimized_required = true;
1394 context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1395 }
1396
1397 if (dc->clk_mgr->dc_mode_softmax_enabled)
1398 if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1399 context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1400 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1401
1402 /* Increase clocks */
1403 dc->clk_mgr->funcs->update_clocks(
1404 dc->clk_mgr,
1405 context,
1406 false);
1407
1408 /* program dchubbub watermarks:
1409 * For assigning wm_optimized_required, use |= operator since we don't want
1410 * to clear the value if the optimize has not happened yet
1411 */
1412 dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1413 &context->bw_ctx.bw.dcn.watermarks,
1414 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1415 false);
1416 /* update timeout thresholds */
1417 if (hubbub->funcs->program_arbiter) {
1418 dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
1419 }
1420
1421 /* decrease compbuf size */
1422 if (hubbub->funcs->program_compbuf_segments) {
1423 compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1424 dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1425
1426 hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1427 }
1428
1429 if (dc->debug.fams2_config.bits.enable) {
1430 dcn401_fams2_global_control_lock(dc, context, true);
1431 dcn401_fams2_update_config(dc, context, false);
1432 dcn401_fams2_global_control_lock(dc, context, false);
1433 }
1434
1435 if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1436 /* After disabling P-State, restore the original value to ensure we get the correct P-State
1437 * on the next optimize. */
1438 context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1439 }
1440 }
1441
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1442 void dcn401_optimize_bandwidth(
1443 struct dc *dc,
1444 struct dc_state *context)
1445 {
1446 int i;
1447 struct hubbub *hubbub = dc->res_pool->hubbub;
1448
1449 /* enable fams2 if needed */
1450 if (dc->debug.fams2_config.bits.enable) {
1451 dcn401_fams2_global_control_lock(dc, context, true);
1452 dcn401_fams2_update_config(dc, context, true);
1453 dcn401_fams2_global_control_lock(dc, context, false);
1454 }
1455
1456 /* program dchubbub watermarks */
1457 hubbub->funcs->program_watermarks(hubbub,
1458 &context->bw_ctx.bw.dcn.watermarks,
1459 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1460 true);
1461 /* update timeout thresholds */
1462 if (hubbub->funcs->program_arbiter) {
1463 hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
1464 }
1465
1466 if (dc->clk_mgr->dc_mode_softmax_enabled)
1467 if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1468 context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1469 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1470
1471 /* increase compbuf size */
1472 if (hubbub->funcs->program_compbuf_segments)
1473 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1474
1475 dc->clk_mgr->funcs->update_clocks(
1476 dc->clk_mgr,
1477 context,
1478 true);
1479 if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1480 for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1481 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1482
1483 if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1484 && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1485 && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1486 pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1487 pipe_ctx->dlg_regs.min_dst_y_next_start);
1488 }
1489 }
1490 }
1491
dcn401_fams2_global_control_lock(struct dc * dc,struct dc_state * context,bool lock)1492 void dcn401_fams2_global_control_lock(struct dc *dc,
1493 struct dc_state *context,
1494 bool lock)
1495 {
1496 /* use always for now */
1497 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1498
1499 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1500 return;
1501
1502 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1503 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1504 hw_lock_cmd.bits.lock = lock;
1505 hw_lock_cmd.bits.should_release = !lock;
1506 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1507 }
1508
dcn401_fams2_global_control_lock_fast(union block_sequence_params * params)1509 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
1510 {
1511 struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
1512 bool lock = params->fams2_global_control_lock_fast_params.lock;
1513
1514 if (params->fams2_global_control_lock_fast_params.is_required) {
1515 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1516
1517 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1518 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1519 hw_lock_cmd.bits.lock = lock;
1520 hw_lock_cmd.bits.should_release = !lock;
1521 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1522 }
1523 }
1524
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1525 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1526 {
1527 bool fams2_required;
1528
1529 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1530 return;
1531
1532 fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1533
1534 dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1535 }
1536
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1537 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1538 struct pipe_ctx *otg_master)
1539 {
1540 int i;
1541 struct pipe_ctx *old_pipe;
1542 struct pipe_ctx *new_pipe;
1543 struct pipe_ctx *old_opp_heads[MAX_PIPES];
1544 struct pipe_ctx *old_otg_master;
1545 int old_opp_head_count = 0;
1546
1547 old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1548
1549 if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1550 old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1551 &dc->current_state->res_ctx,
1552 old_opp_heads);
1553 } else {
1554 // DC cannot assume that the current state and the new state
1555 // share the same OTG pipe since this is not true when called
1556 // in the context of a commit stream not checked. Hence, set
1557 // old_otg_master to NULL to skip the DSC configuration.
1558 old_otg_master = NULL;
1559 }
1560
1561
1562 if (otg_master->stream_res.dsc)
1563 dcn32_update_dsc_on_stream(otg_master,
1564 otg_master->stream->timing.flags.DSC);
1565 if (old_otg_master && old_otg_master->stream_res.dsc) {
1566 for (i = 0; i < old_opp_head_count; i++) {
1567 old_pipe = old_opp_heads[i];
1568 new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1569 if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1570 old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1571 old_pipe->stream_res.dsc);
1572 }
1573 }
1574 }
1575
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1576 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1577 struct pipe_ctx *otg_master)
1578 {
1579 struct pipe_ctx *opp_heads[MAX_PIPES];
1580 int opp_inst[MAX_PIPES] = {0};
1581 int opp_head_count;
1582 int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1583 int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1584 int i;
1585
1586 opp_head_count = resource_get_opp_heads_for_otg_master(
1587 otg_master, &context->res_ctx, opp_heads);
1588
1589 for (i = 0; i < opp_head_count; i++)
1590 opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1591 if (opp_head_count > 1)
1592 otg_master->stream_res.tg->funcs->set_odm_combine(
1593 otg_master->stream_res.tg,
1594 opp_inst, opp_head_count,
1595 odm_slice_width, last_odm_slice_width);
1596 else
1597 otg_master->stream_res.tg->funcs->set_odm_bypass(
1598 otg_master->stream_res.tg,
1599 &otg_master->stream->timing);
1600
1601 for (i = 0; i < opp_head_count; i++) {
1602 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1603 opp_heads[i]->stream_res.opp,
1604 true);
1605 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1606 opp_heads[i]->stream_res.opp,
1607 opp_heads[i]->stream->timing.pixel_encoding,
1608 resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1609 }
1610
1611 update_dsc_for_odm_change(dc, context, otg_master);
1612
1613 if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1614 /*
1615 * blank pattern is generated by OPP, reprogram blank pattern
1616 * due to OPP count change
1617 */
1618 dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1619 }
1620
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1621 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1622 struct dc_link_settings *link_settings)
1623 {
1624 struct encoder_unblank_param params = {0};
1625 struct dc_stream_state *stream = pipe_ctx->stream;
1626 struct dc_link *link = stream->link;
1627 struct dce_hwseq *hws = link->dc->hwseq;
1628
1629 /* calculate parameters for unblank */
1630 params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1631
1632 params.timing = pipe_ctx->stream->timing;
1633 params.link_settings.link_rate = link_settings->link_rate;
1634 params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1635
1636 if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1637 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1638 pipe_ctx->stream_res.hpo_dp_stream_enc,
1639 pipe_ctx->stream_res.tg->inst);
1640 } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1641 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
1642 }
1643
1644 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1645 hws->funcs.edp_backlight_control(link, true);
1646 }
1647
dcn401_hardware_release(struct dc * dc)1648 void dcn401_hardware_release(struct dc *dc)
1649 {
1650 dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1651
1652 /* If pstate unsupported, or still supported
1653 * by firmware, force it supported by dcn
1654 */
1655 if (dc->current_state) {
1656 if ((!dc->clk_mgr->clks.p_state_change_support ||
1657 dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1658 dc->res_pool->hubbub->funcs->force_pstate_change_control)
1659 dc->res_pool->hubbub->funcs->force_pstate_change_control(
1660 dc->res_pool->hubbub, true, true);
1661
1662 dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1663 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1664 }
1665 }
1666
dcn401_wait_for_det_buffer_update_under_otg_master(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1667 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1668 {
1669 struct pipe_ctx *opp_heads[MAX_PIPES];
1670 struct pipe_ctx *dpp_pipes[MAX_PIPES];
1671 struct hubbub *hubbub = dc->res_pool->hubbub;
1672 int dpp_count = 0;
1673
1674 if (!otg_master->stream)
1675 return;
1676
1677 int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1678 &context->res_ctx, opp_heads);
1679
1680 for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1681 if (opp_heads[slice_idx]->plane_state) {
1682 dpp_count = resource_get_dpp_pipes_for_opp_head(
1683 opp_heads[slice_idx],
1684 &context->res_ctx,
1685 dpp_pipes);
1686 for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1687 struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1688 if (dpp_pipe && hubbub &&
1689 dpp_pipe->plane_res.hubp &&
1690 hubbub->funcs->wait_for_det_update)
1691 hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1692 }
1693 } else {
1694 if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
1695 hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
1696 }
1697 }
1698 }
1699
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1700 void dcn401_interdependent_update_lock(struct dc *dc,
1701 struct dc_state *context, bool lock)
1702 {
1703 unsigned int i = 0;
1704 struct pipe_ctx *pipe = NULL;
1705 struct timing_generator *tg = NULL;
1706
1707 if (lock) {
1708 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1709 pipe = &context->res_ctx.pipe_ctx[i];
1710 tg = pipe->stream_res.tg;
1711
1712 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1713 !tg->funcs->is_tg_enabled(tg) ||
1714 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1715 continue;
1716 dc->hwss.pipe_control_lock(dc, pipe, true);
1717 }
1718 } else {
1719 /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
1720 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1721 pipe = &context->res_ctx.pipe_ctx[i];
1722 tg = pipe->stream_res.tg;
1723
1724 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1725 !tg->funcs->is_tg_enabled(tg) ||
1726 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1727 continue;
1728 }
1729
1730 if (dc->scratch.pipes_to_unlock_first[i]) {
1731 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1732 dc->hwss.pipe_control_lock(dc, pipe, false);
1733 /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
1734 dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
1735 }
1736 }
1737
1738 /* Unlocking the rest of the pipes */
1739 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1740 if (dc->scratch.pipes_to_unlock_first[i])
1741 continue;
1742
1743 pipe = &context->res_ctx.pipe_ctx[i];
1744 tg = pipe->stream_res.tg;
1745 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1746 !tg->funcs->is_tg_enabled(tg) ||
1747 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1748 continue;
1749 }
1750
1751 dc->hwss.pipe_control_lock(dc, pipe, false);
1752 }
1753 }
1754 }
1755
dcn401_perform_3dlut_wa_unlock(struct pipe_ctx * pipe_ctx)1756 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
1757 {
1758 /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
1759 * HUBP will properly fetch 3DLUT contents after unlock.
1760 *
1761 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
1762 * of whether OTG lock is currently being held or not.
1763 */
1764 struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
1765 struct pipe_ctx *odm_pipe, *mpc_pipe;
1766 int i, wa_pipe_ct = 0;
1767
1768 for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
1769 for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
1770 if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
1771 == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
1772 && mpc_pipe->plane_state->mcm_shaper_3dlut_setting
1773 == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
1774 wa_pipes[wa_pipe_ct++] = mpc_pipe;
1775 }
1776 }
1777 }
1778
1779 if (wa_pipe_ct > 0) {
1780 if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1781 pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
1782
1783 for (i = 0; i < wa_pipe_ct; ++i) {
1784 if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1785 wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1786 }
1787
1788 pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1789 if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
1790 pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
1791
1792 for (i = 0; i < wa_pipe_ct; ++i) {
1793 if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1794 wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1795 }
1796
1797 if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1798 pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
1799 } else {
1800 pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1801 }
1802 }
1803
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1804 void dcn401_program_outstanding_updates(struct dc *dc,
1805 struct dc_state *context)
1806 {
1807 struct hubbub *hubbub = dc->res_pool->hubbub;
1808
1809 /* update compbuf if required */
1810 if (hubbub->funcs->program_compbuf_segments)
1811 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1812 }
1813
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1814 void dcn401_reset_back_end_for_pipe(
1815 struct dc *dc,
1816 struct pipe_ctx *pipe_ctx,
1817 struct dc_state *context)
1818 {
1819 struct dc_link *link = pipe_ctx->stream->link;
1820 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1821
1822 DC_LOGGER_INIT(dc->ctx->logger);
1823 if (pipe_ctx->stream_res.stream_enc == NULL) {
1824 pipe_ctx->stream = NULL;
1825 return;
1826 }
1827
1828 /* DPMS may already disable or */
1829 /* dpms_off status is incorrect due to fastboot
1830 * feature. When system resume from S4 with second
1831 * screen only, the dpms_off would be true but
1832 * VBIOS lit up eDP, so check link status too.
1833 */
1834 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1835 dc->link_srv->set_dpms_off(pipe_ctx);
1836 else if (pipe_ctx->stream_res.audio)
1837 dc->hwss.disable_audio_stream(pipe_ctx);
1838
1839 /* free acquired resources */
1840 if (pipe_ctx->stream_res.audio) {
1841 /*disable az_endpoint*/
1842 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1843
1844 /*free audio*/
1845 if (dc->caps.dynamic_audio == true) {
1846 /*we have to dynamic arbitrate the audio endpoints*/
1847 /*we free the resource, need reset is_audio_acquired*/
1848 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1849 pipe_ctx->stream_res.audio, false);
1850 pipe_ctx->stream_res.audio = NULL;
1851 }
1852 }
1853
1854 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1855 * back end share by all pipes and will be disable only when disable
1856 * parent pipe.
1857 */
1858 if (pipe_ctx->top_pipe == NULL) {
1859
1860 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1861
1862 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1863
1864 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1865 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1866 pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1867 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1868
1869 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1870 pipe_ctx->stream_res.tg->funcs->set_drr(
1871 pipe_ctx->stream_res.tg, NULL);
1872 /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
1873 * the case where the same symclk is shared across multiple otg
1874 * instances
1875 */
1876 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1877 link->phy_state.symclk_ref_cnts.otg = 0;
1878 if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
1879 link_hwss->disable_link_output(link,
1880 &pipe_ctx->link_res, pipe_ctx->stream->signal);
1881 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1882 }
1883
1884 /* reset DTBCLK_P */
1885 if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
1886 dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
1887 }
1888
1889 /*
1890 * In case of a dangling plane, setting this to NULL unconditionally
1891 * causes failures during reset hw ctx where, if stream is NULL,
1892 * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
1893 */
1894 pipe_ctx->stream = NULL;
1895 pipe_ctx->top_pipe = NULL;
1896 pipe_ctx->bottom_pipe = NULL;
1897 pipe_ctx->next_odm_pipe = NULL;
1898 pipe_ctx->prev_odm_pipe = NULL;
1899 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1900 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1901 }
1902
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1903 void dcn401_reset_hw_ctx_wrap(
1904 struct dc *dc,
1905 struct dc_state *context)
1906 {
1907 int i;
1908 struct dce_hwseq *hws = dc->hwseq;
1909
1910 /* Reset Back End*/
1911 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1912 struct pipe_ctx *pipe_ctx_old =
1913 &dc->current_state->res_ctx.pipe_ctx[i];
1914 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1915
1916 if (!pipe_ctx_old->stream)
1917 continue;
1918
1919 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
1920 continue;
1921
1922 if (!pipe_ctx->stream ||
1923 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1924 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1925
1926 if (hws->funcs.reset_back_end_for_pipe)
1927 hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1928 if (hws->funcs.enable_stream_gating)
1929 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1930 if (old_clk)
1931 old_clk->funcs->cs_power_down(old_clk);
1932 }
1933 }
1934 }
1935
dcn401_calculate_vready_offset_for_group(struct pipe_ctx * pipe)1936 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
1937 {
1938 struct pipe_ctx *other_pipe;
1939 unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
1940
1941 /* Always use the largest vready_offset of all connected pipes */
1942 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
1943 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1944 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1945 }
1946 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
1947 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1948 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1949 }
1950 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
1951 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1952 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1953 }
1954 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
1955 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1956 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1957 }
1958
1959 return vready_offset;
1960 }
1961
dcn401_program_tg(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dce_hwseq * hws)1962 static void dcn401_program_tg(
1963 struct dc *dc,
1964 struct pipe_ctx *pipe_ctx,
1965 struct dc_state *context,
1966 struct dce_hwseq *hws)
1967 {
1968 pipe_ctx->stream_res.tg->funcs->program_global_sync(
1969 pipe_ctx->stream_res.tg,
1970 dcn401_calculate_vready_offset_for_group(pipe_ctx),
1971 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
1972 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
1973 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
1974 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
1975
1976 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
1977 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
1978
1979 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
1980 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
1981
1982 if (hws->funcs.setup_vupdate_interrupt)
1983 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
1984 }
1985
dcn401_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1986 static void dcn401_program_pipe(
1987 struct dc *dc,
1988 struct pipe_ctx *pipe_ctx,
1989 struct dc_state *context)
1990 {
1991 struct dce_hwseq *hws = dc->hwseq;
1992
1993 /* Only need to unblank on top pipe */
1994 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
1995 if (pipe_ctx->update_flags.bits.enable ||
1996 pipe_ctx->update_flags.bits.odm ||
1997 pipe_ctx->stream->update_flags.bits.abm_level)
1998 hws->funcs.blank_pixel_data(dc, pipe_ctx,
1999 !pipe_ctx->plane_state ||
2000 !pipe_ctx->plane_state->visible);
2001 }
2002
2003 /* Only update TG on top pipe */
2004 if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2005 && !pipe_ctx->prev_odm_pipe)
2006 dcn401_program_tg(dc, pipe_ctx, context, hws);
2007
2008 if (pipe_ctx->update_flags.bits.odm)
2009 hws->funcs.update_odm(dc, context, pipe_ctx);
2010
2011 if (pipe_ctx->update_flags.bits.enable) {
2012 if (hws->funcs.enable_plane)
2013 hws->funcs.enable_plane(dc, pipe_ctx, context);
2014 else
2015 dc->hwss.enable_plane(dc, pipe_ctx, context);
2016
2017 if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
2018 dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
2019 }
2020
2021 if (pipe_ctx->update_flags.bits.det_size) {
2022 if (dc->res_pool->hubbub->funcs->program_det_size)
2023 dc->res_pool->hubbub->funcs->program_det_size(
2024 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2025 if (dc->res_pool->hubbub->funcs->program_det_segments)
2026 dc->res_pool->hubbub->funcs->program_det_segments(
2027 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2028 }
2029
2030 if (pipe_ctx->update_flags.raw ||
2031 (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
2032 pipe_ctx->stream->update_flags.raw)
2033 dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
2034
2035 if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2036 pipe_ctx->plane_state->update_flags.bits.hdr_mult))
2037 hws->funcs.set_hdr_multiplier(pipe_ctx);
2038
2039 if (hws->funcs.populate_mcm_luts) {
2040 if (pipe_ctx->plane_state) {
2041 hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
2042 pipe_ctx->plane_state->lut_bank_a);
2043 pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
2044 }
2045 }
2046
2047 if (pipe_ctx->plane_state &&
2048 (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2049 pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2050 pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2051 pipe_ctx->update_flags.bits.enable))
2052 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2053
2054 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2055 * only do gamma programming for powering on, internal memcmp to avoid
2056 * updating on slave planes
2057 */
2058 if (pipe_ctx->update_flags.bits.enable ||
2059 pipe_ctx->update_flags.bits.plane_changed ||
2060 pipe_ctx->stream->update_flags.bits.out_tf ||
2061 (pipe_ctx->plane_state &&
2062 pipe_ctx->plane_state->update_flags.bits.output_tf_change))
2063 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2064
2065 /* If the pipe has been enabled or has a different opp, we
2066 * should reprogram the fmt. This deals with cases where
2067 * interation between mpc and odm combine on different streams
2068 * causes a different pipe to be chosen to odm combine with.
2069 */
2070 if (pipe_ctx->update_flags.bits.enable
2071 || pipe_ctx->update_flags.bits.opp_changed) {
2072
2073 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
2074 pipe_ctx->stream_res.opp,
2075 COLOR_SPACE_YCBCR601,
2076 pipe_ctx->stream->timing.display_color_depth,
2077 pipe_ctx->stream->signal);
2078
2079 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
2080 pipe_ctx->stream_res.opp,
2081 &pipe_ctx->stream->bit_depth_params,
2082 &pipe_ctx->stream->clamping);
2083 }
2084
2085 /* Set ABM pipe after other pipe configurations done */
2086 if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2087 if (pipe_ctx->stream_res.abm) {
2088 dc->hwss.set_pipe(pipe_ctx);
2089 pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
2090 pipe_ctx->stream->abm_level);
2091 }
2092 }
2093
2094 if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2095 struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2096 struct bit_depth_reduction_params params;
2097
2098 memset(¶ms, 0, sizeof(params));
2099 odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms);
2100 dc->hwss.set_disp_pattern_generator(dc,
2101 pipe_ctx,
2102 pipe_ctx->stream_res.test_pattern_params.test_pattern,
2103 pipe_ctx->stream_res.test_pattern_params.color_space,
2104 pipe_ctx->stream_res.test_pattern_params.color_depth,
2105 NULL,
2106 pipe_ctx->stream_res.test_pattern_params.width,
2107 pipe_ctx->stream_res.test_pattern_params.height,
2108 pipe_ctx->stream_res.test_pattern_params.offset);
2109 }
2110 }
2111
dcn401_program_front_end_for_ctx(struct dc * dc,struct dc_state * context)2112 void dcn401_program_front_end_for_ctx(
2113 struct dc *dc,
2114 struct dc_state *context)
2115 {
2116 int i;
2117 unsigned int prev_hubp_count = 0;
2118 unsigned int hubp_count = 0;
2119 struct dce_hwseq *hws = dc->hwseq;
2120 struct pipe_ctx *pipe = NULL;
2121
2122 DC_LOGGER_INIT(dc->ctx->logger);
2123
2124 if (resource_is_pipe_topology_changed(dc->current_state, context))
2125 resource_log_pipe_topology_update(dc, context);
2126
2127 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2128 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2129 pipe = &context->res_ctx.pipe_ctx[i];
2130
2131 if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
2132 if (pipe->plane_state->triplebuffer_flips)
2133 BREAK_TO_DEBUGGER();
2134
2135 /*turn off triple buffer for full update*/
2136 dc->hwss.program_triplebuffer(
2137 dc, pipe, pipe->plane_state->triplebuffer_flips);
2138 }
2139 }
2140 }
2141
2142 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2143 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2144 prev_hubp_count++;
2145 if (context->res_ctx.pipe_ctx[i].plane_state)
2146 hubp_count++;
2147 }
2148
2149 if (prev_hubp_count == 0 && hubp_count > 0) {
2150 if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2151 dc->res_pool->hubbub->funcs->force_pstate_change_control(
2152 dc->res_pool->hubbub, true, false);
2153 udelay(500);
2154 }
2155
2156 /* Set pipe update flags and lock pipes */
2157 for (i = 0; i < dc->res_pool->pipe_count; i++)
2158 dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
2159 &context->res_ctx.pipe_ctx[i]);
2160
2161 /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
2162 * buffer updates properly)
2163 */
2164 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2165 struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
2166
2167 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2168
2169 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
2170 dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
2171 struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
2172
2173 if (tg->funcs->enable_crtc) {
2174 if (dc->hwseq->funcs.blank_pixel_data)
2175 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
2176
2177 tg->funcs->enable_crtc(tg);
2178 }
2179 }
2180 }
2181 /* OTG blank before disabling all front ends */
2182 for (i = 0; i < dc->res_pool->pipe_count; i++)
2183 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2184 && !context->res_ctx.pipe_ctx[i].top_pipe
2185 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
2186 && context->res_ctx.pipe_ctx[i].stream)
2187 hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
2188
2189
2190 /* Disconnect mpcc */
2191 for (i = 0; i < dc->res_pool->pipe_count; i++)
2192 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2193 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
2194 struct hubbub *hubbub = dc->res_pool->hubbub;
2195
2196 /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
2197 * then we want to do the programming here (effectively it's being disabled). If we do
2198 * the programming later the DET won't be updated until the OTG for the phantom pipe is
2199 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
2200 * DET allocation.
2201 */
2202 if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
2203 (context->res_ctx.pipe_ctx[i].plane_state &&
2204 dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
2205 SUBVP_PHANTOM))) {
2206 if (hubbub->funcs->program_det_size)
2207 hubbub->funcs->program_det_size(hubbub,
2208 dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2209 if (dc->res_pool->hubbub->funcs->program_det_segments)
2210 dc->res_pool->hubbub->funcs->program_det_segments(
2211 hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2212 }
2213 hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
2214 &dc->current_state->res_ctx.pipe_ctx[i]);
2215 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2216 }
2217
2218 /* update ODM for blanked OTG master pipes */
2219 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2220 pipe = &context->res_ctx.pipe_ctx[i];
2221 if (resource_is_pipe_type(pipe, OTG_MASTER) &&
2222 !resource_is_pipe_type(pipe, DPP_PIPE) &&
2223 pipe->update_flags.bits.odm &&
2224 hws->funcs.update_odm)
2225 hws->funcs.update_odm(dc, context, pipe);
2226 }
2227
2228 /*
2229 * Program all updated pipes, order matters for mpcc setup. Start with
2230 * top pipe and program all pipes that follow in order
2231 */
2232 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2233 pipe = &context->res_ctx.pipe_ctx[i];
2234
2235 if (pipe->plane_state && !pipe->top_pipe) {
2236 while (pipe) {
2237 if (hws->funcs.program_pipe)
2238 hws->funcs.program_pipe(dc, pipe, context);
2239 else {
2240 /* Don't program phantom pipes in the regular front end programming sequence.
2241 * There is an MPO transition case where a pipe being used by a video plane is
2242 * transitioned directly to be a phantom pipe when closing the MPO video.
2243 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
2244 * right away) but the MPO still exists until the double buffered update of the
2245 * main pipe so we will get a frame of underflow if the phantom pipe is
2246 * programmed here.
2247 */
2248 if (pipe->stream &&
2249 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
2250 dcn401_program_pipe(dc, pipe, context);
2251 }
2252
2253 pipe = pipe->bottom_pipe;
2254 }
2255 }
2256
2257 /* Program secondary blending tree and writeback pipes */
2258 pipe = &context->res_ctx.pipe_ctx[i];
2259 if (!pipe->top_pipe && !pipe->prev_odm_pipe
2260 && pipe->stream && pipe->stream->num_wb_info > 0
2261 && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
2262 || pipe->stream->update_flags.raw)
2263 && hws->funcs.program_all_writeback_pipes_in_tree)
2264 hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2265
2266 /* Avoid underflow by check of pipe line read when adding 2nd plane. */
2267 if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
2268 !pipe->top_pipe &&
2269 pipe->stream &&
2270 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
2271 dc->current_state->stream_status[0].plane_count == 1 &&
2272 context->stream_status[0].plane_count > 1) {
2273 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
2274 }
2275 }
2276 }
2277
dcn401_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2278 void dcn401_post_unlock_program_front_end(
2279 struct dc *dc,
2280 struct dc_state *context)
2281 {
2282 // Timeout for pipe enable
2283 unsigned int timeout_us = 100000;
2284 unsigned int polling_interval_us = 1;
2285 struct dce_hwseq *hwseq = dc->hwseq;
2286 int i;
2287
2288 DC_LOGGER_INIT(dc->ctx->logger);
2289
2290 for (i = 0; i < dc->res_pool->pipe_count; i++)
2291 if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
2292 !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
2293 dc->hwss.post_unlock_reset_opp(dc,
2294 &dc->current_state->res_ctx.pipe_ctx[i]);
2295
2296 for (i = 0; i < dc->res_pool->pipe_count; i++)
2297 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2298 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2299
2300 /*
2301 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
2302 * part of the enable operation otherwise, DM may request an immediate flip which
2303 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
2304 * is unsupported on DCN.
2305 */
2306 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2307 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2308 // Don't check flip pending on phantom pipes
2309 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
2310 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2311 struct hubp *hubp = pipe->plane_res.hubp;
2312 int j = 0;
2313
2314 for (j = 0; j < timeout_us / polling_interval_us
2315 && hubp->funcs->hubp_is_flip_pending(hubp); j++)
2316 udelay(polling_interval_us);
2317 }
2318 }
2319
2320 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2321 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2322 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2323
2324 /* When going from a smaller ODM slice count to larger, we must ensure double
2325 * buffer update completes before we return to ensure we don't reduce DISPCLK
2326 * before we've transitioned to 2:1 or 4:1
2327 */
2328 if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
2329 resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
2330 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2331 int j = 0;
2332 struct timing_generator *tg = pipe->stream_res.tg;
2333
2334 if (tg->funcs->get_optc_double_buffer_pending) {
2335 for (j = 0; j < timeout_us / polling_interval_us
2336 && tg->funcs->get_optc_double_buffer_pending(tg); j++)
2337 udelay(polling_interval_us);
2338 }
2339 }
2340 }
2341
2342 if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2343 dc->res_pool->hubbub->funcs->force_pstate_change_control(
2344 dc->res_pool->hubbub, false, false);
2345
2346
2347 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2348 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2349
2350 if (pipe->plane_state && !pipe->top_pipe) {
2351 /* Program phantom pipe here to prevent a frame of underflow in the MPO transition
2352 * case (if a pipe being used for a video plane transitions to a phantom pipe, it
2353 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
2354 * programming sequence).
2355 */
2356 while (pipe) {
2357 if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2358 /* When turning on the phantom pipe we want to run through the
2359 * entire enable sequence, so apply all the "enable" flags.
2360 */
2361 if (dc->hwss.apply_update_flags_for_phantom)
2362 dc->hwss.apply_update_flags_for_phantom(pipe);
2363 if (dc->hwss.update_phantom_vp_position)
2364 dc->hwss.update_phantom_vp_position(dc, context, pipe);
2365 dcn401_program_pipe(dc, pipe, context);
2366 }
2367 pipe = pipe->bottom_pipe;
2368 }
2369 }
2370 }
2371
2372 if (!hwseq)
2373 return;
2374
2375 /* P-State support transitions:
2376 * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
2377 * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
2378 * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
2379 * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
2380 * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
2381 */
2382 if (hwseq->funcs.update_force_pstate)
2383 dc->hwseq->funcs.update_force_pstate(dc, context);
2384
2385 /* Only program the MALL registers after all the main and phantom pipes
2386 * are done programming.
2387 */
2388 if (hwseq->funcs.program_mall_pipe_config)
2389 hwseq->funcs.program_mall_pipe_config(dc, context);
2390
2391 /* WA to apply WM setting*/
2392 if (hwseq->wa.DEGVIDCN21)
2393 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2394
2395
2396 /* WA for stutter underflow during MPO transitions when adding 2nd plane */
2397 if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
2398
2399 if (dc->current_state->stream_status[0].plane_count == 1 &&
2400 context->stream_status[0].plane_count > 1) {
2401
2402 struct timing_generator *tg = dc->res_pool->timing_generators[0];
2403
2404 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
2405
2406 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
2407 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
2408 tg->funcs->get_frame_count(tg);
2409 }
2410 }
2411 }
2412
dcn401_update_bandwidth(struct dc * dc,struct dc_state * context)2413 bool dcn401_update_bandwidth(
2414 struct dc *dc,
2415 struct dc_state *context)
2416 {
2417 int i;
2418 struct dce_hwseq *hws = dc->hwseq;
2419
2420 /* recalculate DML parameters */
2421 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
2422 return false;
2423
2424 /* apply updated bandwidth parameters */
2425 dc->hwss.prepare_bandwidth(dc, context);
2426
2427 /* update hubp configs for all pipes */
2428 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2429 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2430
2431 if (pipe_ctx->plane_state == NULL)
2432 continue;
2433
2434 if (pipe_ctx->top_pipe == NULL) {
2435 bool blank = !is_pipe_tree_visible(pipe_ctx);
2436
2437 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2438 pipe_ctx->stream_res.tg,
2439 dcn401_calculate_vready_offset_for_group(pipe_ctx),
2440 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2441 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2442 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2443 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2444
2445 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2446 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
2447
2448 if (pipe_ctx->prev_odm_pipe == NULL)
2449 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2450
2451 if (hws->funcs.setup_vupdate_interrupt)
2452 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2453 }
2454
2455 if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
2456 pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
2457 pipe_ctx->plane_res.hubp,
2458 &pipe_ctx->hubp_regs,
2459 &pipe_ctx->global_sync,
2460 &pipe_ctx->stream->timing);
2461 }
2462
2463 return true;
2464 }
2465
dcn401_detect_pipe_changes(struct dc_state * old_state,struct dc_state * new_state,struct pipe_ctx * old_pipe,struct pipe_ctx * new_pipe)2466 void dcn401_detect_pipe_changes(struct dc_state *old_state,
2467 struct dc_state *new_state,
2468 struct pipe_ctx *old_pipe,
2469 struct pipe_ctx *new_pipe)
2470 {
2471 bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
2472 bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
2473
2474 unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
2475 unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
2476 unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
2477 unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
2478 unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2479 unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2480 unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2481 unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2482
2483 new_pipe->update_flags.raw = 0;
2484
2485 /* If non-phantom pipe is being transitioned to a phantom pipe,
2486 * set disable and return immediately. This is because the pipe
2487 * that was previously in use must be fully disabled before we
2488 * can "enable" it as a phantom pipe (since the OTG will certainly
2489 * be different). The post_unlock sequence will set the correct
2490 * update flags to enable the phantom pipe.
2491 */
2492 if (old_pipe->plane_state && !old_is_phantom &&
2493 new_pipe->plane_state && new_is_phantom) {
2494 new_pipe->update_flags.bits.disable = 1;
2495 return;
2496 }
2497
2498 if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
2499 resource_is_odm_topology_changed(new_pipe, old_pipe))
2500 /* Detect odm changes */
2501 new_pipe->update_flags.bits.odm = 1;
2502
2503 /* Exit on unchanged, unused pipe */
2504 if (!old_pipe->plane_state && !new_pipe->plane_state)
2505 return;
2506 /* Detect pipe enable/disable */
2507 if (!old_pipe->plane_state && new_pipe->plane_state) {
2508 new_pipe->update_flags.bits.enable = 1;
2509 new_pipe->update_flags.bits.mpcc = 1;
2510 new_pipe->update_flags.bits.dppclk = 1;
2511 new_pipe->update_flags.bits.hubp_interdependent = 1;
2512 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2513 new_pipe->update_flags.bits.unbounded_req = 1;
2514 new_pipe->update_flags.bits.gamut_remap = 1;
2515 new_pipe->update_flags.bits.scaler = 1;
2516 new_pipe->update_flags.bits.viewport = 1;
2517 new_pipe->update_flags.bits.det_size = 1;
2518 if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
2519 new_pipe->stream_res.test_pattern_params.width != 0 &&
2520 new_pipe->stream_res.test_pattern_params.height != 0)
2521 new_pipe->update_flags.bits.test_pattern_changed = 1;
2522 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
2523 new_pipe->update_flags.bits.odm = 1;
2524 new_pipe->update_flags.bits.global_sync = 1;
2525 }
2526 return;
2527 }
2528
2529 /* For SubVP we need to unconditionally enable because any phantom pipes are
2530 * always removed then newly added for every full updates whenever SubVP is in use.
2531 * The remove-add sequence of the phantom pipe always results in the pipe
2532 * being blanked in enable_stream_timing (DPG).
2533 */
2534 if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
2535 new_pipe->update_flags.bits.enable = 1;
2536
2537 /* Phantom pipes are effectively disabled, if the pipe was previously phantom
2538 * we have to enable
2539 */
2540 if (old_pipe->plane_state && old_is_phantom &&
2541 new_pipe->plane_state && !new_is_phantom)
2542 new_pipe->update_flags.bits.enable = 1;
2543
2544 if (old_pipe->plane_state && !new_pipe->plane_state) {
2545 new_pipe->update_flags.bits.disable = 1;
2546 return;
2547 }
2548
2549 /* Detect plane change */
2550 if (old_pipe->plane_state != new_pipe->plane_state)
2551 new_pipe->update_flags.bits.plane_changed = true;
2552
2553 /* Detect top pipe only changes */
2554 if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
2555 /* Detect global sync changes */
2556 if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
2557 || (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
2558 || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
2559 || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
2560 new_pipe->update_flags.bits.global_sync = 1;
2561 }
2562
2563 if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
2564 new_pipe->update_flags.bits.det_size = 1;
2565
2566 /*
2567 * Detect opp / tg change, only set on change, not on enable
2568 * Assume mpcc inst = pipe index, if not this code needs to be updated
2569 * since mpcc is what is affected by these. In fact all of our sequence
2570 * makes this assumption at the moment with how hubp reset is matched to
2571 * same index mpcc reset.
2572 */
2573 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2574 new_pipe->update_flags.bits.opp_changed = 1;
2575 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
2576 new_pipe->update_flags.bits.tg_changed = 1;
2577
2578 /*
2579 * Detect mpcc blending changes, only dpp inst and opp matter here,
2580 * mpccs getting removed/inserted update connected ones during their own
2581 * programming
2582 */
2583 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
2584 || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2585 new_pipe->update_flags.bits.mpcc = 1;
2586
2587 /* Detect dppclk change */
2588 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
2589 new_pipe->update_flags.bits.dppclk = 1;
2590
2591 /* Check for scl update */
2592 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2593 new_pipe->update_flags.bits.scaler = 1;
2594 /* Check for vp update */
2595 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2596 || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2597 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2598 new_pipe->update_flags.bits.viewport = 1;
2599
2600 /* Detect dlg/ttu/rq updates */
2601 {
2602 struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
2603 struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
2604 struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs;
2605 struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
2606 struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
2607 struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
2608
2609 /* Detect pipe interdependent updates */
2610 if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
2611 || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
2612 || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
2613 || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
2614 || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
2615 || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
2616 || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
2617 || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
2618 || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
2619 || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
2620 || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
2621 || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
2622 || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
2623 || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
2624 || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
2625 new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
2626 || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
2627 || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
2628 old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
2629 old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
2630 old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
2631 old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
2632 old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
2633 old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
2634 old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
2635 old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
2636 old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
2637 old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
2638 old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
2639 old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
2640 old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
2641 old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
2642 old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
2643 old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
2644 old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
2645 new_pipe->update_flags.bits.hubp_interdependent = 1;
2646 }
2647 /* Detect any other updates to ttu/rq/dlg */
2648 if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
2649 memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
2650 memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
2651 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2652 }
2653
2654 if (old_pipe->unbounded_req != new_pipe->unbounded_req)
2655 new_pipe->update_flags.bits.unbounded_req = 1;
2656
2657 if (memcmp(&old_pipe->stream_res.test_pattern_params,
2658 &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
2659 new_pipe->update_flags.bits.test_pattern_changed = 1;
2660 }
2661 }
2662