1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <linux/vmalloc.h>
28 
29 #include "display_mode_core.h"
30 #include "dml2_internal_types.h"
31 #include "dml2_utils.h"
32 #include "dml2_policy.h"
33 #include "dml2_translation_helper.h"
34 #include "dml2_mall_phantom.h"
35 #include "dml2_dc_resource_mgmt.h"
36 #include "dml21_wrapper.h"
37 
38 
initialize_dml2_ip_params(struct dml2_context * dml2,const struct dc * in_dc,struct ip_params_st * out)39 static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
40 {
41 	if (dml2->config.use_native_soc_bb_construction)
42 		dml2_init_ip_params(dml2, in_dc, out);
43 	else
44 		dml2_translate_ip_params(in_dc, out);
45 }
46 
initialize_dml2_soc_bbox(struct dml2_context * dml2,const struct dc * in_dc,struct soc_bounding_box_st * out)47 static void initialize_dml2_soc_bbox(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out)
48 {
49 	if (dml2->config.use_native_soc_bb_construction)
50 		dml2_init_socbb_params(dml2, in_dc, out);
51 	else
52 		dml2_translate_socbb_params(in_dc, out);
53 }
54 
initialize_dml2_soc_states(struct dml2_context * dml2,const struct dc * in_dc,const struct soc_bounding_box_st * in_bbox,struct soc_states_st * out)55 static void initialize_dml2_soc_states(struct dml2_context *dml2,
56 	const struct dc *in_dc, const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out)
57 {
58 	if (dml2->config.use_native_soc_bb_construction)
59 		dml2_init_soc_states(dml2, in_dc, in_bbox, out);
60 	else
61 		dml2_translate_soc_states(in_dc, out, in_dc->dml.soc.num_states);
62 }
63 
map_hw_resources(struct dml2_context * dml2,struct dml_display_cfg_st * in_out_display_cfg,struct dml_mode_support_info_st * mode_support_info)64 static void map_hw_resources(struct dml2_context *dml2,
65 		struct dml_display_cfg_st *in_out_display_cfg, struct dml_mode_support_info_st *mode_support_info)
66 {
67 	unsigned int num_pipes = 0;
68 	int i, j;
69 
70 	for (i = 0; i < __DML_NUM_PLANES__; i++) {
71 		in_out_display_cfg->hw.ODMMode[i] = mode_support_info->ODMMode[i];
72 		in_out_display_cfg->hw.DPPPerSurface[i] = mode_support_info->DPPPerSurface[i];
73 		in_out_display_cfg->hw.DSCEnabled[i] = mode_support_info->DSCEnabled[i];
74 		in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
75 		in_out_display_cfg->hw.DLGRefClkFreqMHz = 24;
76 		if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
77 			dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
78 			/*dGPU default as 50Mhz*/
79 			in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
80 		}
81 		for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
82 			if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
83 				dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
84 					  __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
85 				break;
86 			}
87 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
88 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
89 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
90 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[num_pipes] = true;
91 			num_pipes++;
92 		}
93 	}
94 }
95 
pack_and_call_dml_mode_support_ex(struct dml2_context * dml2,const struct dml_display_cfg_st * display_cfg,struct dml_mode_support_info_st * evaluation_info)96 static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
97 	const struct dml_display_cfg_st *display_cfg,
98 	struct dml_mode_support_info_st *evaluation_info)
99 {
100 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
101 
102 	s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
103 	s->mode_support_params.in_display_cfg = display_cfg;
104 	s->mode_support_params.out_evaluation_info = evaluation_info;
105 
106 	memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
107 	s->mode_support_params.out_lowest_state_idx = 0;
108 
109 	return dml_mode_support_ex(&s->mode_support_params);
110 }
111 
optimize_configuration(struct dml2_context * dml2,struct dml2_wrapper_optimize_configuration_params * p)112 static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
113 {
114 	int unused_dpps = p->ip_params->max_num_dpp;
115 	int i, j;
116 	int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
117 	int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
118 	float frame_time_sec, max_frame_time_sec;
119 	int largest_blend_and_timing = 0;
120 	bool optimization_done = false;
121 
122 	for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
123 		if (p->cur_display_config->plane.BlendingAndTiming[i] > largest_blend_and_timing)
124 			largest_blend_and_timing = p->cur_display_config->plane.BlendingAndTiming[i];
125 	}
126 
127 	if (p->new_policy != p->cur_policy)
128 		*p->new_policy = *p->cur_policy;
129 
130 	if (p->new_display_config != p->cur_display_config)
131 		*p->new_display_config = *p->cur_display_config;
132 
133 	// Optimize P-State Support
134 	if (dml2->config.use_native_pstate_optimization) {
135 		if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
136 			// Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
137 			subvp_timing_to_add = -1;
138 			subvp_surface_to_add = -1;
139 			max_frame_time_sec = 0;
140 			surface_count = 0;
141 			for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
142 				refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
143 					(p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
144 				if (refresh_rate_hz < 120) {
145 					// Check its upstream surfaces to see if this one could be converted to subvp.
146 					dpps_needed = 0;
147 				for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
148 					if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
149 						p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
150 						dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
151 						subvp_surface_to_add = j;
152 						surface_count++;
153 					}
154 				}
155 
156 				if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
157 					frame_time_sec = (float)1 / refresh_rate_hz;
158 					if (frame_time_sec > max_frame_time_sec) {
159 						max_frame_time_sec = frame_time_sec;
160 						subvp_timing_to_add = i;
161 						}
162 					}
163 				}
164 			}
165 			if (subvp_timing_to_add >= 0) {
166 				new_timing_index = p->new_display_config->num_timings++;
167 				new_surface_index = p->new_display_config->num_surfaces++;
168 				// Add a phantom pipe reflecting the main pipe's timing
169 				dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
170 
171 				pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
172 					p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
173 				(p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
174 				(double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
175 
176 				subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
177 
178 				p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
179 				p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
180 				p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
181 
182 				p->new_display_config->output.OutputDisabled[new_timing_index] = true;
183 
184 				p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
185 
186 				dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
187 				dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
188 
189 				p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
190 				p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
191 				p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
192 
193 				p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
194 				p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
195 
196 				p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
197 
198 				p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
199 
200 				p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
201 
202 				optimization_done = true;
203 			}
204 		}
205 	}
206 
207 	// Optimize Clocks
208 	if (!optimization_done) {
209 		if (largest_blend_and_timing == 0 && p->cur_policy->ODMUse[0] == dml_odm_use_policy_combine_as_needed && dml2->config.minimize_dispclk_using_odm) {
210 			odms_needed = dml2_util_get_maximum_odm_combine_for_output(dml2->config.optimize_odm_4to1,
211 				p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1;
212 
213 			if (odms_needed <= unused_dpps) {
214 				if (odms_needed == 1) {
215 					p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
216 					optimization_done = true;
217 				} else if (odms_needed == 3) {
218 					p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_4to1;
219 					optimization_done = true;
220 				} else
221 					optimization_done = false;
222 			}
223 		}
224 	}
225 
226 	return optimization_done;
227 }
228 
calculate_lowest_supported_state_for_temp_read(struct dml2_context * dml2,struct dc_state * display_state)229 static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
230 {
231 	struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
232 	struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
233 
234 	unsigned int dml_result = 0;
235 	int result = -1, i, j;
236 
237 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
238 
239 	/* Zero out before each call before proceeding */
240 	memset(s, 0, sizeof(struct dml2_calculate_lowest_supported_state_for_temp_read_scratch));
241 	memset(&s_global->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
242 	memset(&s_global->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
243 
244 	for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
245 		/* Calling resource_build_scaling_params will populate the pipe params
246 		 * with the necessary information needed for correct DML calculations
247 		 * This is also done in DML1 driver code path and hence display_state
248 		 * cannot be const.
249 		 */
250 		struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
251 
252 		if (pipe->plane_state) {
253 			if (!dml2->config.callbacks.build_scaling_params(pipe)) {
254 				ASSERT(false);
255 				return false;
256 			}
257 		}
258 	}
259 
260 	map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
261 
262 	for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
263 		s->uclk_change_latencies[i] = dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us;
264 	}
265 
266 	for (i = 0; i < 4; i++) {
267 		for (j = 0; j < dml2->v20.dml_core_ctx.states.num_states; j++) {
268 			dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
269 		}
270 
271 		dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
272 
273 		if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
274 			map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
275 			dml_result = dml_mode_programming(&dml2->v20.dml_core_ctx, s_global->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
276 
277 			ASSERT(dml_result);
278 
279 			dml2_extract_watermark_set(&dml2->v20.g6_temp_read_watermark_set, &dml2->v20.dml_core_ctx);
280 			dml2->v20.g6_temp_read_watermark_set.cstate_pstate.fclk_pstate_change_ns = dml2->v20.g6_temp_read_watermark_set.cstate_pstate.pstate_change_ns;
281 
282 			result = s_global->mode_support_params.out_lowest_state_idx;
283 
284 			while (dml2->v20.dml_core_ctx.states.state_array[result].dram_speed_mts < s_global->dummy_pstate_table[i].dram_speed_mts)
285 				result++;
286 
287 			break;
288 		}
289 	}
290 
291 	for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
292 		dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us = s->uclk_change_latencies[i];
293 	}
294 
295 	return result;
296 }
297 
copy_dummy_pstate_table(struct dummy_pstate_entry * dest,struct dummy_pstate_entry * src,unsigned int num_entries)298 static void copy_dummy_pstate_table(struct dummy_pstate_entry *dest, struct dummy_pstate_entry *src, unsigned int num_entries)
299 {
300 	for (int i = 0; i < num_entries; i++) {
301 		dest[i] = src[i];
302 	}
303 }
304 
are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st * display_cfg,const struct dml_mode_support_info_st * evaluation_info)305 static bool are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st *display_cfg,
306 		const struct dml_mode_support_info_st *evaluation_info)
307 {
308 	unsigned int planes_per_timing[__DML_NUM_PLANES__] = {0};
309 	int i;
310 
311 	for (i = 0; i < display_cfg->num_surfaces; i++)
312 		planes_per_timing[display_cfg->plane.BlendingAndTiming[i]]++;
313 
314 	for (i = 0; i < __DML_NUM_PLANES__; i++) {
315 		if (planes_per_timing[i] > 1 && evaluation_info->ODMMode[i] != dml_odm_mode_bypass)
316 			return true;
317 	}
318 
319 	return false;
320 }
321 
does_configuration_meet_sw_policies(struct dml2_context * ctx,const struct dml_display_cfg_st * display_cfg,const struct dml_mode_support_info_st * evaluation_info)322 static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const struct dml_display_cfg_st *display_cfg,
323 	const struct dml_mode_support_info_st *evaluation_info)
324 {
325 	bool pass = true;
326 
327 	if (!ctx->config.enable_windowed_mpo_odm) {
328 		if (are_timings_requiring_odm_doing_blending(display_cfg, evaluation_info))
329 			pass = false;
330 	}
331 
332 	return pass;
333 }
334 
dml_mode_support_wrapper(struct dml2_context * dml2,struct dc_state * display_state)335 static bool dml_mode_support_wrapper(struct dml2_context *dml2,
336 		struct dc_state *display_state)
337 {
338 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
339 	unsigned int result = 0, i;
340 	unsigned int optimized_result = true;
341 
342 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
343 
344 	/* Zero out before each call before proceeding */
345 	memset(&s->cur_display_config, 0, sizeof(struct dml_display_cfg_st));
346 	memset(&s->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
347 	memset(&s->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
348 	memset(&s->optimize_configuration_params, 0, sizeof(struct dml2_wrapper_optimize_configuration_params));
349 
350 	for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
351 		/* Calling resource_build_scaling_params will populate the pipe params
352 		 * with the necessary information needed for correct DML calculations
353 		 * This is also done in DML1 driver code path and hence display_state
354 		 * cannot be const.
355 		 */
356 		struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
357 
358 		if (pipe->plane_state) {
359 			if (!dml2->config.callbacks.build_scaling_params(pipe)) {
360 				ASSERT(false);
361 				return false;
362 			}
363 		}
364 	}
365 
366 	map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
367 	if (!dml2->config.skip_hw_state_mapping)
368 		dml2_apply_det_buffer_allocation_policy(dml2, &s->cur_display_config);
369 
370 	result = pack_and_call_dml_mode_support_ex(dml2,
371 		&s->cur_display_config,
372 		&s->mode_support_info);
373 
374 	if (result)
375 		result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
376 
377 	// Try to optimize
378 	if (result) {
379 		s->cur_policy = dml2->v20.dml_core_ctx.policy;
380 		s->optimize_configuration_params.dml_core_ctx = &dml2->v20.dml_core_ctx;
381 		s->optimize_configuration_params.config = &dml2->config;
382 		s->optimize_configuration_params.ip_params = &dml2->v20.dml_core_ctx.ip;
383 		s->optimize_configuration_params.cur_display_config = &s->cur_display_config;
384 		s->optimize_configuration_params.cur_mode_support_info = &s->mode_support_info;
385 		s->optimize_configuration_params.cur_policy = &s->cur_policy;
386 		s->optimize_configuration_params.new_display_config = &s->new_display_config;
387 		s->optimize_configuration_params.new_policy = &s->new_policy;
388 
389 		while (optimized_result && optimize_configuration(dml2, &s->optimize_configuration_params)) {
390 			dml2->v20.dml_core_ctx.policy = s->new_policy;
391 			optimized_result = pack_and_call_dml_mode_support_ex(dml2,
392 				&s->new_display_config,
393 				&s->mode_support_info);
394 
395 			if (optimized_result)
396 				optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
397 
398 			// If the new optimized state is supposed, then set current = new
399 			if (optimized_result) {
400 				s->cur_display_config = s->new_display_config;
401 				s->cur_policy = s->new_policy;
402 			} else {
403 				// Else, restore policy to current
404 				dml2->v20.dml_core_ctx.policy = s->cur_policy;
405 			}
406 		}
407 
408 		// Optimize ended with a failed config, so we need to restore DML state to last passing
409 		if (!optimized_result) {
410 			result = pack_and_call_dml_mode_support_ex(dml2,
411 				&s->cur_display_config,
412 				&s->mode_support_info);
413 		}
414 	}
415 
416 	if (result)
417 		map_hw_resources(dml2, &s->cur_display_config, &s->mode_support_info);
418 
419 	return result;
420 }
421 
find_drr_eligible_stream(struct dc_state * display_state)422 static int find_drr_eligible_stream(struct dc_state *display_state)
423 {
424 	int i;
425 
426 	for (i = 0; i < display_state->stream_count; i++) {
427 		if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
428 			&& display_state->streams[i]->ignore_msa_timing_param) {
429 			// Use ignore_msa_timing_param flag to identify as DRR
430 			return i;
431 		}
432 	}
433 
434 	return -1;
435 }
436 
optimize_pstate_with_svp_and_drr(struct dml2_context * dml2,struct dc_state * display_state)437 static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
438 {
439 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
440 	bool pstate_optimization_done = false;
441 	bool pstate_optimization_success = false;
442 	bool result = false;
443 	int drr_display_index = 0, non_svp_streams = 0;
444 	bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
445 
446 	display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
447 	display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
448 
449 	result = dml_mode_support_wrapper(dml2, display_state);
450 
451 	if (!result) {
452 		pstate_optimization_done = true;
453 	} else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
454 		pstate_optimization_success = true;
455 		pstate_optimization_done = true;
456 	}
457 
458 	if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
459 			display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
460 
461 			result = dml_mode_support_wrapper(dml2, display_state);
462 	} else {
463 		non_svp_streams = display_state->stream_count;
464 
465 		while (!pstate_optimization_done) {
466 			result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
467 
468 			// Always try adding SVP first
469 			if (result)
470 				result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
471 			else
472 				pstate_optimization_done = true;
473 
474 
475 			if (result) {
476 				result = dml_mode_support_wrapper(dml2, display_state);
477 			} else {
478 				pstate_optimization_done = true;
479 			}
480 
481 			if (result) {
482 				non_svp_streams--;
483 
484 				if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
485 					if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
486 						pstate_optimization_success = true;
487 						pstate_optimization_done = true;
488 					} else {
489 						pstate_optimization_success = false;
490 						pstate_optimization_done = false;
491 					}
492 				} else {
493 					drr_display_index = find_drr_eligible_stream(display_state);
494 
495 					// If there is only 1 remaining non SubVP pipe that is DRR, check static
496 					// schedulability for SubVP + DRR.
497 					if (non_svp_streams == 1 && drr_display_index >= 0) {
498 						if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
499 							display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
500 							display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
501 							result = dml_mode_support_wrapper(dml2, display_state);
502 						}
503 
504 						if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
505 							pstate_optimization_success = true;
506 							pstate_optimization_done = true;
507 						} else {
508 							pstate_optimization_success = false;
509 							pstate_optimization_done = false;
510 						}
511 					}
512 
513 					if (pstate_optimization_success) {
514 						pstate_optimization_done = true;
515 					} else {
516 						pstate_optimization_done = false;
517 					}
518 				}
519 			}
520 		}
521 	}
522 
523 	if (!pstate_optimization_success) {
524 		dml2_svp_remove_all_phantom_pipes(dml2, display_state);
525 		display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
526 		display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
527 		result = dml_mode_support_wrapper(dml2, display_state);
528 	}
529 
530 	return result;
531 }
532 
call_dml_mode_support_and_programming(struct dc_state * context)533 static bool call_dml_mode_support_and_programming(struct dc_state *context)
534 {
535 	unsigned int result = 0;
536 	unsigned int min_state = 0;
537 	int min_state_for_g6_temp_read = 0;
538 
539 
540 	if (!context)
541 		return false;
542 
543 	struct dml2_context *dml2 = context->bw_ctx.dml2;
544 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
545 
546 	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
547 		min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
548 
549 		ASSERT(min_state_for_g6_temp_read >= 0);
550 	}
551 
552 	if (!dml2->config.use_native_pstate_optimization) {
553 		result = optimize_pstate_with_svp_and_drr(dml2, context);
554 	} else {
555 		result = dml_mode_support_wrapper(dml2, context);
556 	}
557 
558 	/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
559 	 * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
560 	 */
561 	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
562 		if (min_state_for_g6_temp_read >= 0)
563 			min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
564 		else
565 			min_state = s->mode_support_params.out_lowest_state_idx;
566 	}
567 
568 	if (result) {
569 		if (!context->streams[0]->sink->link->dc->caps.is_apu) {
570 			result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
571 		} else {
572 			result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
573 		}
574 	}
575 	return result;
576 }
577 
dml2_validate_and_build_resource(const struct dc * in_dc,struct dc_state * context)578 static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
579 {
580 	struct dml2_context *dml2 = context->bw_ctx.dml2;
581 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
582 	struct dml2_dcn_clocks out_clks;
583 	unsigned int result = 0;
584 	bool need_recalculation = false;
585 	uint32_t cstate_enter_plus_exit_z8_ns;
586 
587 	if (context->stream_count == 0) {
588 		unsigned int lowest_state_idx = 0;
589 
590 		out_clks.p_state_supported = true;
591 		out_clks.dispclk_khz = 0; /* No requirement, and lowest index will generally be maximum dispclk. */
592 		out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
593 		out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
594 		out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
595 		out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
596 		out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
597 		out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
598 		context->bw_ctx.bw.dcn.clk.dtbclk_en = false;
599 		dml2_copy_clocks_to_dc_state(&out_clks, context);
600 		return true;
601 	}
602 
603 	/* Zero out before each call before proceeding */
604 	memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
605 	memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
606 	memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
607 	memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
608 
609 	/* Initialize DET scratch */
610 	dml2_initialize_det_scratch(dml2);
611 
612 	copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
613 
614 	result = call_dml_mode_support_and_programming(context);
615 	/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
616 	 * is required or not, the resource context needs to correctly reflect the number of active pipes. We would
617 	 * only know the correct number if active pipes after dml2_map_dc_pipes is called.
618 	 */
619 	if (result && !dml2->config.skip_hw_state_mapping)
620 		dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
621 
622 	/* Verify and update DET Buffer configuration if needed. dml2_verify_det_buffer_configuration will check if DET Buffer
623 	 * size needs to be updated. If yes it will update the DETOverride variable and set need_recalculation flag to true.
624 	 * Based on that flag, run mode support again. Verification needs to be run after dml_mode_programming because the getters
625 	 * return correct det buffer values only after dml_mode_programming is called.
626 	 */
627 	if (result && !dml2->config.skip_hw_state_mapping) {
628 		need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
629 		if (need_recalculation) {
630 			/* Engage the DML again if recalculation is required. */
631 			call_dml_mode_support_and_programming(context);
632 			if (!dml2->config.skip_hw_state_mapping) {
633 				dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
634 			}
635 			need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
636 			ASSERT(need_recalculation == false);
637 		}
638 	}
639 
640 	if (result) {
641 		unsigned int lowest_state_idx = s->mode_support_params.out_lowest_state_idx;
642 		out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.mp.Dispclk_calculated * 1000;
643 		out_clks.p_state_supported = s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported;
644 		if (in_dc->config.use_default_clock_table &&
645 			(lowest_state_idx < dml2->v20.dml_core_ctx.states.num_states - 1)) {
646 			lowest_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
647 			out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
648 		}
649 
650 		out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
651 		out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
652 		out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
653 		out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
654 		out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
655 		out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
656 		context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(in_dc, context);
657 
658 		if (!dml2->config.skip_hw_state_mapping) {
659 			/* Call dml2_calculate_rq_and_dlg_params */
660 			dml2_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml2, in_dc->res_pool->pipe_count);
661 		}
662 
663 		dml2_copy_clocks_to_dc_state(&out_clks, context);
664 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
665 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
666 		memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
667 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
668 		dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
669 		//copy for deciding zstate use
670 		context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
671 
672 		cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
673 
674 		if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time &&
675 				cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000)
676 			cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000;
677 
678 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns;
679 	}
680 
681 	return result;
682 }
683 
dml2_validate_only(struct dc_state * context)684 static bool dml2_validate_only(struct dc_state *context)
685 {
686 	struct dml2_context *dml2;
687 	unsigned int result = 0;
688 
689 	if (!context || context->stream_count == 0)
690 		return true;
691 
692 	dml2 = context->bw_ctx.dml2;
693 
694 	/* Zero out before each call before proceeding */
695 	memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
696 	memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
697 	memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
698 	memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
699 
700 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
701 
702 	map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
703 	 if (!dml2->config.skip_hw_state_mapping)
704 		 dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
705 
706 	result = pack_and_call_dml_mode_support_ex(dml2,
707 		&dml2->v20.scratch.cur_display_config,
708 		&dml2->v20.scratch.mode_support_info);
709 
710 	if (result)
711 		result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
712 
713 	return (result == 1) ? true : false;
714 }
715 
dml2_apply_debug_options(const struct dc * dc,struct dml2_context * dml2)716 static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
717 {
718 	if (dc->debug.override_odm_optimization) {
719 		dml2->config.minimize_dispclk_using_odm = dc->debug.minimize_dispclk_using_odm;
720 	}
721 }
722 
dml2_validate(const struct dc * in_dc,struct dc_state * context,struct dml2_context * dml2,bool fast_validate)723 bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
724 {
725 	bool out = false;
726 
727 	if (!dml2)
728 		return false;
729 	dml2_apply_debug_options(in_dc, dml2);
730 
731 	/* DML2.1 validation path */
732 	if (dml2->architecture == dml2_architecture_21) {
733 		out = dml21_validate(in_dc, context, dml2, fast_validate);
734 		return out;
735 	}
736 
737 	DC_FP_START();
738 
739 	/* Use dml_validate_only for fast_validate path */
740 	if (fast_validate)
741 		out = dml2_validate_only(context);
742 	else
743 		out = dml2_validate_and_build_resource(in_dc, context);
744 
745 	DC_FP_END();
746 
747 	return out;
748 }
749 
dml2_allocate_memory(void)750 static inline struct dml2_context *dml2_allocate_memory(void)
751 {
752 	return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
753 }
754 
dml2_init(const struct dc * in_dc,const struct dml2_configuration_options * config,struct dml2_context ** dml2)755 static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
756 {
757 	if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
758 		dml21_reinit(in_dc, dml2, config);
759 		return;
760 	}
761 
762 	// Store config options
763 	(*dml2)->config = *config;
764 
765 	switch (in_dc->ctx->dce_version) {
766 	case DCN_VERSION_3_5:
767 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn35;
768 		break;
769 	case DCN_VERSION_3_51:
770 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn351;
771 		break;
772 	case DCN_VERSION_3_2:
773 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
774 		break;
775 	case DCN_VERSION_3_21:
776 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn321;
777 		break;
778 	case DCN_VERSION_4_01:
779 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn401;
780 		break;
781 	default:
782 		(*dml2)->v20.dml_core_ctx.project = dml_project_default;
783 		break;
784 	}
785 
786 	DC_FP_START();
787 
788 	initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
789 
790 	initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
791 
792 	initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
793 
794 	DC_FP_END();
795 }
796 
dml2_create(const struct dc * in_dc,const struct dml2_configuration_options * config,struct dml2_context ** dml2)797 bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
798 {
799 	if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01))
800 		return dml21_create(in_dc, dml2, config);
801 
802 	// Allocate Mode Lib Ctx
803 	*dml2 = dml2_allocate_memory();
804 
805 	if (!(*dml2))
806 		return false;
807 
808 	dml2_init(in_dc, config, dml2);
809 
810 	return true;
811 }
812 
dml2_destroy(struct dml2_context * dml2)813 void dml2_destroy(struct dml2_context *dml2)
814 {
815 	if (!dml2)
816 		return;
817 
818 	if (dml2->architecture == dml2_architecture_21)
819 		dml21_destroy(dml2);
820 	vfree(dml2);
821 }
822 
dml2_extract_dram_and_fclk_change_support(struct dml2_context * dml2,unsigned int * fclk_change_support,unsigned int * dram_clk_change_support)823 void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
824 	unsigned int *fclk_change_support, unsigned int *dram_clk_change_support)
825 {
826 	*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
827 	*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
828 }
829 
dml2_prepare_mcache_programming(struct dc * in_dc,struct dc_state * context,struct dml2_context * dml2)830 void dml2_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2)
831 {
832 	if (dml2->architecture == dml2_architecture_21)
833 		dml21_prepare_mcache_programming(in_dc, context, dml2);
834 }
835 
dml2_copy(struct dml2_context * dst_dml2,struct dml2_context * src_dml2)836 void dml2_copy(struct dml2_context *dst_dml2,
837 	struct dml2_context *src_dml2)
838 {
839 	if (src_dml2->architecture == dml2_architecture_21) {
840 		dml21_copy(dst_dml2, src_dml2);
841 		return;
842 	}
843 	/* copy Mode Lib Ctx */
844 	memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
845 }
846 
dml2_create_copy(struct dml2_context ** dst_dml2,struct dml2_context * src_dml2)847 bool dml2_create_copy(struct dml2_context **dst_dml2,
848 	struct dml2_context *src_dml2)
849 {
850 	if (src_dml2->architecture == dml2_architecture_21)
851 		return dml21_create_copy(dst_dml2, src_dml2);
852 	/* Allocate Mode Lib Ctx */
853 	*dst_dml2 = dml2_allocate_memory();
854 
855 	if (!(*dst_dml2))
856 		return false;
857 
858 	/* copy Mode Lib Ctx */
859 	dml2_copy(*dst_dml2, src_dml2);
860 
861 	return true;
862 }
863 
dml2_reinit(const struct dc * in_dc,const struct dml2_configuration_options * config,struct dml2_context ** dml2)864 void dml2_reinit(const struct dc *in_dc,
865 				 const struct dml2_configuration_options *config,
866 				 struct dml2_context **dml2)
867 {
868 	if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
869 		dml21_reinit(in_dc, dml2, config);
870 		return;
871 	}
872 
873 	dml2_init(in_dc, config, dml2);
874 }
875