1 /*
2 * Copyright © 2014 Red Hat.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24
25 #include <linux/types.h>
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28 #include <drm/drm_fixed.h>
29
30 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
31 #include <linux/stackdepot.h>
32 #include <linux/timekeeping.h>
33
34 enum drm_dp_mst_topology_ref_type {
35 DRM_DP_MST_TOPOLOGY_REF_GET,
36 DRM_DP_MST_TOPOLOGY_REF_PUT,
37 };
38
39 struct drm_dp_mst_topology_ref_history {
40 struct drm_dp_mst_topology_ref_entry {
41 enum drm_dp_mst_topology_ref_type type;
42 int count;
43 ktime_t ts_nsec;
44 depot_stack_handle_t backtrace;
45 } *entries;
46 int len;
47 };
48 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
49
50 enum drm_dp_mst_payload_allocation {
51 DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
52 DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
53 DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
54 DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
55 };
56
57 struct drm_dp_mst_branch;
58
59 /**
60 * struct drm_dp_mst_port - MST port
61 * @port_num: port number
62 * @input: if this port is an input port. Protected by
63 * &drm_dp_mst_topology_mgr.base.lock.
64 * @mcs: message capability status - DP 1.2 spec. Protected by
65 * &drm_dp_mst_topology_mgr.base.lock.
66 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
67 * &drm_dp_mst_topology_mgr.base.lock.
68 * @pdt: Peer Device Type. Protected by
69 * &drm_dp_mst_topology_mgr.base.lock.
70 * @ldps: Legacy Device Plug Status. Protected by
71 * &drm_dp_mst_topology_mgr.base.lock.
72 * @dpcd_rev: DPCD revision of device on this port. Protected by
73 * &drm_dp_mst_topology_mgr.base.lock.
74 * @num_sdp_streams: Number of simultaneous streams. Protected by
75 * &drm_dp_mst_topology_mgr.base.lock.
76 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
77 * &drm_dp_mst_topology_mgr.base.lock.
78 * @full_pbn: Max possible bandwidth for this port. Protected by
79 * &drm_dp_mst_topology_mgr.base.lock.
80 * @next: link to next port on this branch device
81 * @aux: i2c aux transport to talk to device connected to this port, protected
82 * by &drm_dp_mst_topology_mgr.base.lock.
83 * @passthrough_aux: parent aux to which DSC pass-through requests should be
84 * sent, only set if DSC pass-through is possible.
85 * @parent: branch device parent of this port
86 * @connector: DRM connector this port is connected to. Protected by
87 * &drm_dp_mst_topology_mgr.base.lock.
88 * @mgr: topology manager this port lives under.
89 *
90 * This structure represents an MST port endpoint on a device somewhere
91 * in the MST topology.
92 */
93 struct drm_dp_mst_port {
94 /**
95 * @topology_kref: refcount for this port's lifetime in the topology,
96 * only the DP MST helpers should need to touch this
97 */
98 struct kref topology_kref;
99
100 /**
101 * @malloc_kref: refcount for the memory allocation containing this
102 * structure. See drm_dp_mst_get_port_malloc() and
103 * drm_dp_mst_put_port_malloc().
104 */
105 struct kref malloc_kref;
106
107 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
108 /**
109 * @topology_ref_history: A history of each topology
110 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
111 */
112 struct drm_dp_mst_topology_ref_history topology_ref_history;
113 #endif
114
115 u8 port_num;
116 bool input;
117 bool mcs;
118 bool ddps;
119 u8 pdt;
120 bool ldps;
121 u8 dpcd_rev;
122 u8 num_sdp_streams;
123 u8 num_sdp_stream_sinks;
124 uint16_t full_pbn;
125 struct list_head next;
126 /**
127 * @mstb: the branch device connected to this port, if there is one.
128 * This should be considered protected for reading by
129 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
130 * &drm_dp_mst_topology_mgr.up_req_work and
131 * &drm_dp_mst_topology_mgr.work, which do not grab
132 * &drm_dp_mst_topology_mgr.lock during reads but are the only
133 * updaters of this list and are protected from writing concurrently
134 * by &drm_dp_mst_topology_mgr.probe_lock.
135 */
136 struct drm_dp_mst_branch *mstb;
137 struct drm_dp_aux aux; /* i2c bus for this port? */
138 struct drm_dp_aux *passthrough_aux;
139 struct drm_dp_mst_branch *parent;
140
141 struct drm_connector *connector;
142 struct drm_dp_mst_topology_mgr *mgr;
143
144 /**
145 * @cached_edid: for DP logical ports - make tiling work by ensuring
146 * that the EDID for all connectors is read immediately.
147 */
148 const struct drm_edid *cached_edid;
149
150 /**
151 * @fec_capable: bool indicating if FEC can be supported up to that
152 * point in the MST topology.
153 */
154 bool fec_capable;
155 };
156
157 /* sideband msg header - not bit struct */
158 struct drm_dp_sideband_msg_hdr {
159 u8 lct;
160 u8 lcr;
161 u8 rad[8];
162 bool broadcast;
163 bool path_msg;
164 u8 msg_len;
165 bool somt;
166 bool eomt;
167 bool seqno;
168 };
169
170 struct drm_dp_sideband_msg_rx {
171 u8 chunk[48];
172 u8 msg[256];
173 u8 curchunk_len;
174 u8 curchunk_idx; /* chunk we are parsing now */
175 u8 curchunk_hdrlen;
176 u8 curlen; /* total length of the msg */
177 bool have_somt;
178 bool have_eomt;
179 struct drm_dp_sideband_msg_hdr initial_hdr;
180 };
181
182 /**
183 * struct drm_dp_mst_branch - MST branch device.
184 * @rad: Relative Address to talk to this branch device.
185 * @lct: Link count total to talk to this branch device.
186 * @num_ports: number of ports on the branch.
187 * @port_parent: pointer to the port parent, NULL if toplevel.
188 * @mgr: topology manager for this branch device.
189 * @link_address_sent: if a link address message has been sent to this device yet.
190 * @guid: guid for DP 1.2 branch device. port under this branch can be
191 * identified by port #.
192 *
193 * This structure represents an MST branch device, there is one
194 * primary branch device at the root, along with any other branches connected
195 * to downstream port of parent branches.
196 */
197 struct drm_dp_mst_branch {
198 /**
199 * @topology_kref: refcount for this branch device's lifetime in the
200 * topology, only the DP MST helpers should need to touch this
201 */
202 struct kref topology_kref;
203
204 /**
205 * @malloc_kref: refcount for the memory allocation containing this
206 * structure. See drm_dp_mst_get_mstb_malloc() and
207 * drm_dp_mst_put_mstb_malloc().
208 */
209 struct kref malloc_kref;
210
211 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
212 /**
213 * @topology_ref_history: A history of each topology
214 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
215 */
216 struct drm_dp_mst_topology_ref_history topology_ref_history;
217 #endif
218
219 /**
220 * @destroy_next: linked-list entry used by
221 * drm_dp_delayed_destroy_work()
222 */
223 struct list_head destroy_next;
224
225 /**
226 * @rad: Relative Address of the MST branch.
227 * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0,
228 * unset and unused. For MST branches connected after mst_primary,
229 * in each element of rad[] the nibbles are ordered by the most
230 * signifcant 4 bits first and the least significant 4 bits second.
231 */
232 u8 rad[8];
233 u8 lct;
234 int num_ports;
235
236 /**
237 * @ports: the list of ports on this branch device. This should be
238 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
239 * There are two exceptions to this:
240 * &drm_dp_mst_topology_mgr.up_req_work and
241 * &drm_dp_mst_topology_mgr.work, which do not grab
242 * &drm_dp_mst_topology_mgr.lock during reads but are the only
243 * updaters of this list and are protected from updating the list
244 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
245 */
246 struct list_head ports;
247
248 struct drm_dp_mst_port *port_parent;
249 struct drm_dp_mst_topology_mgr *mgr;
250
251 bool link_address_sent;
252
253 /* global unique identifier to identify branch devices */
254 guid_t guid;
255 };
256
257
258 struct drm_dp_nak_reply {
259 guid_t guid;
260 u8 reason;
261 u8 nak_data;
262 };
263
264 struct drm_dp_link_address_ack_reply {
265 guid_t guid;
266 u8 nports;
267 struct drm_dp_link_addr_reply_port {
268 bool input_port;
269 u8 peer_device_type;
270 u8 port_number;
271 bool mcs;
272 bool ddps;
273 bool legacy_device_plug_status;
274 u8 dpcd_revision;
275 guid_t peer_guid;
276 u8 num_sdp_streams;
277 u8 num_sdp_stream_sinks;
278 } ports[16];
279 };
280
281 struct drm_dp_remote_dpcd_read_ack_reply {
282 u8 port_number;
283 u8 num_bytes;
284 u8 bytes[255];
285 };
286
287 struct drm_dp_remote_dpcd_write_ack_reply {
288 u8 port_number;
289 };
290
291 struct drm_dp_remote_dpcd_write_nak_reply {
292 u8 port_number;
293 u8 reason;
294 u8 bytes_written_before_failure;
295 };
296
297 struct drm_dp_remote_i2c_read_ack_reply {
298 u8 port_number;
299 u8 num_bytes;
300 u8 bytes[255];
301 };
302
303 struct drm_dp_remote_i2c_read_nak_reply {
304 u8 port_number;
305 u8 nak_reason;
306 u8 i2c_nak_transaction;
307 };
308
309 struct drm_dp_remote_i2c_write_ack_reply {
310 u8 port_number;
311 };
312
313 struct drm_dp_query_stream_enc_status_ack_reply {
314 /* Bit[23:16]- Stream Id */
315 u8 stream_id;
316
317 /* Bit[15]- Signed */
318 bool reply_signed;
319
320 /* Bit[10:8]- Stream Output Sink Type */
321 bool unauthorizable_device_present;
322 bool legacy_device_present;
323 bool query_capable_device_present;
324
325 /* Bit[12:11]- Stream Output CP Type */
326 bool hdcp_1x_device_present;
327 bool hdcp_2x_device_present;
328
329 /* Bit[4]- Stream Authentication */
330 bool auth_completed;
331
332 /* Bit[3]- Stream Encryption */
333 bool encryption_enabled;
334
335 /* Bit[2]- Stream Repeater Function Present */
336 bool repeater_present;
337
338 /* Bit[1:0]- Stream State */
339 u8 state;
340 };
341
342 #define DRM_DP_MAX_SDP_STREAMS 16
343 struct drm_dp_allocate_payload {
344 u8 port_number;
345 u8 number_sdp_streams;
346 u8 vcpi;
347 u16 pbn;
348 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
349 };
350
351 struct drm_dp_allocate_payload_ack_reply {
352 u8 port_number;
353 u8 vcpi;
354 u16 allocated_pbn;
355 };
356
357 struct drm_dp_connection_status_notify {
358 guid_t guid;
359 u8 port_number;
360 bool legacy_device_plug_status;
361 bool displayport_device_plug_status;
362 bool message_capability_status;
363 bool input_port;
364 u8 peer_device_type;
365 };
366
367 struct drm_dp_remote_dpcd_read {
368 u8 port_number;
369 u32 dpcd_address;
370 u8 num_bytes;
371 };
372
373 struct drm_dp_remote_dpcd_write {
374 u8 port_number;
375 u32 dpcd_address;
376 u8 num_bytes;
377 u8 *bytes;
378 };
379
380 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
381 struct drm_dp_remote_i2c_read {
382 u8 num_transactions;
383 u8 port_number;
384 struct drm_dp_remote_i2c_read_tx {
385 u8 i2c_dev_id;
386 u8 num_bytes;
387 u8 *bytes;
388 u8 no_stop_bit;
389 u8 i2c_transaction_delay;
390 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
391 u8 read_i2c_device_id;
392 u8 num_bytes_read;
393 };
394
395 struct drm_dp_remote_i2c_write {
396 u8 port_number;
397 u8 write_i2c_device_id;
398 u8 num_bytes;
399 u8 *bytes;
400 };
401
402 struct drm_dp_query_stream_enc_status {
403 u8 stream_id;
404 u8 client_id[7]; /* 56-bit nonce */
405 u8 stream_event;
406 bool valid_stream_event;
407 u8 stream_behavior;
408 u8 valid_stream_behavior;
409 };
410
411 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
412 struct drm_dp_port_number_req {
413 u8 port_number;
414 };
415
416 struct drm_dp_enum_path_resources_ack_reply {
417 u8 port_number;
418 bool fec_capable;
419 u16 full_payload_bw_number;
420 u16 avail_payload_bw_number;
421 };
422
423 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
424 struct drm_dp_port_number_rep {
425 u8 port_number;
426 };
427
428 struct drm_dp_query_payload {
429 u8 port_number;
430 u8 vcpi;
431 };
432
433 struct drm_dp_resource_status_notify {
434 u8 port_number;
435 guid_t guid;
436 u16 available_pbn;
437 };
438
439 struct drm_dp_query_payload_ack_reply {
440 u8 port_number;
441 u16 allocated_pbn;
442 };
443
444 struct drm_dp_sideband_msg_req_body {
445 u8 req_type;
446 union ack_req {
447 struct drm_dp_connection_status_notify conn_stat;
448 struct drm_dp_port_number_req port_num;
449 struct drm_dp_resource_status_notify resource_stat;
450
451 struct drm_dp_query_payload query_payload;
452 struct drm_dp_allocate_payload allocate_payload;
453
454 struct drm_dp_remote_dpcd_read dpcd_read;
455 struct drm_dp_remote_dpcd_write dpcd_write;
456
457 struct drm_dp_remote_i2c_read i2c_read;
458 struct drm_dp_remote_i2c_write i2c_write;
459
460 struct drm_dp_query_stream_enc_status enc_status;
461 } u;
462 };
463
464 struct drm_dp_sideband_msg_reply_body {
465 u8 reply_type;
466 u8 req_type;
467 union ack_replies {
468 struct drm_dp_nak_reply nak;
469 struct drm_dp_link_address_ack_reply link_addr;
470 struct drm_dp_port_number_rep port_number;
471
472 struct drm_dp_enum_path_resources_ack_reply path_resources;
473 struct drm_dp_allocate_payload_ack_reply allocate_payload;
474 struct drm_dp_query_payload_ack_reply query_payload;
475
476 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
477 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
478 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
479
480 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
481 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
482 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
483
484 struct drm_dp_query_stream_enc_status_ack_reply enc_status;
485 } u;
486 };
487
488 /* msg is queued to be put into a slot */
489 #define DRM_DP_SIDEBAND_TX_QUEUED 0
490 /* msg has started transmitting on a slot - still on msgq */
491 #define DRM_DP_SIDEBAND_TX_START_SEND 1
492 /* msg has finished transmitting on a slot - removed from msgq only in slot */
493 #define DRM_DP_SIDEBAND_TX_SENT 2
494 /* msg has received a response - removed from slot */
495 #define DRM_DP_SIDEBAND_TX_RX 3
496 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
497
498 struct drm_dp_sideband_msg_tx {
499 u8 msg[256];
500 u8 chunk[48];
501 u8 cur_offset;
502 u8 cur_len;
503 struct drm_dp_mst_branch *dst;
504 struct list_head next;
505 int seqno;
506 int state;
507 bool path_msg;
508 struct drm_dp_sideband_msg_reply_body reply;
509 };
510
511 /* sideband msg handler */
512 struct drm_dp_mst_topology_mgr;
513 struct drm_dp_mst_topology_cbs {
514 /* create a connector for a port */
515 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
516 /*
517 * Checks for any pending MST interrupts, passing them to MST core for
518 * processing, the same way an HPD IRQ pulse handler would do this.
519 * If provided MST core calls this callback from a poll-waiting loop
520 * when waiting for MST down message replies. The driver is expected
521 * to guard against a race between this callback and the driver's HPD
522 * IRQ pulse handler.
523 */
524 void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
525 };
526
527 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
528
529 /**
530 * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
531 *
532 * The primary atomic state structure for a given MST payload. Stores information like current
533 * bandwidth allocation, intended action for this payload, etc.
534 */
535 struct drm_dp_mst_atomic_payload {
536 /** @port: The MST port assigned to this payload */
537 struct drm_dp_mst_port *port;
538
539 /**
540 * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
541 * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
542 * check time. This shouldn't usually matter, as the start slot should never be relevant for
543 * atomic state computations.
544 *
545 * Since this value is determined at commit time instead of check time, this value is
546 * protected by the MST helpers ensuring that async commits operating on the given topology
547 * never run in parallel. In the event that a driver does need to read this value (e.g. to
548 * inform hardware of the starting timeslot for a payload), the driver may either:
549 *
550 * * Read this field during the atomic commit after
551 * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
552 * previous MST states payload start slots have been copied over to the new state. Note
553 * that a new start slot won't be assigned/removed from this payload until
554 * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
555 * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
556 * get committed to hardware by calling drm_crtc_commit_wait() on each of the
557 * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
558 *
559 * If neither of the two above solutions suffice (e.g. the driver needs to read the start
560 * slot in the middle of an atomic commit without waiting for some reason), then drivers
561 * should cache this value themselves after changing payloads.
562 */
563 s8 vc_start_slot;
564
565 /** @vcpi: The Virtual Channel Payload Identifier */
566 u8 vcpi;
567 /**
568 * @time_slots:
569 * The number of timeslots allocated to this payload from the source DP Tx to
570 * the immediate downstream DP Rx
571 */
572 int time_slots;
573 /** @pbn: The payload bandwidth for this payload */
574 int pbn;
575
576 /** @delete: Whether or not we intend to delete this payload during this atomic commit */
577 bool delete : 1;
578 /** @dsc_enabled: Whether or not this payload has DSC enabled */
579 bool dsc_enabled : 1;
580
581 /** @payload_allocation_status: The allocation status of this payload */
582 enum drm_dp_mst_payload_allocation payload_allocation_status;
583
584 /** @next: The list node for this payload */
585 struct list_head next;
586 };
587
588 /**
589 * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
590 *
591 * This struct represents the atomic state of the toplevel DisplayPort MST manager
592 */
593 struct drm_dp_mst_topology_state {
594 /** @base: Base private state for atomic */
595 struct drm_private_state base;
596
597 /** @mgr: The topology manager */
598 struct drm_dp_mst_topology_mgr *mgr;
599
600 /**
601 * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
602 * modify this to add additional dependencies if needed.
603 */
604 u32 pending_crtc_mask;
605 /**
606 * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
607 * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
608 */
609 struct drm_crtc_commit **commit_deps;
610 /** @num_commit_deps: The number of CRTC commits in @commit_deps */
611 size_t num_commit_deps;
612
613 /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
614 u32 payload_mask;
615 /** @payloads: The list of payloads being created/destroyed in this state */
616 struct list_head payloads;
617
618 /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
619 u8 total_avail_slots;
620 /** @start_slot: The first usable time slot in this topology (1 or 0) */
621 u8 start_slot;
622
623 /**
624 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
625 * out itself.
626 */
627 fixed20_12 pbn_div;
628 };
629
630 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
631
632 /**
633 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
634 *
635 * This struct represents the toplevel displayport MST topology manager.
636 * There should be one instance of this for every MST capable DP connector
637 * on the GPU.
638 */
639 struct drm_dp_mst_topology_mgr {
640 /**
641 * @base: Base private object for atomic
642 */
643 struct drm_private_obj base;
644
645 /**
646 * @dev: device pointer for adding i2c devices etc.
647 */
648 struct drm_device *dev;
649 /**
650 * @cbs: callbacks for connector addition and destruction.
651 */
652 const struct drm_dp_mst_topology_cbs *cbs;
653 /**
654 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
655 * in one go.
656 */
657 int max_dpcd_transaction_bytes;
658 /**
659 * @aux: AUX channel for the DP MST connector this topolgy mgr is
660 * controlling.
661 */
662 struct drm_dp_aux *aux;
663 /**
664 * @max_payloads: maximum number of payloads the GPU can generate.
665 */
666 int max_payloads;
667 /**
668 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
669 * to build the MST connector path value.
670 */
671 int conn_base_id;
672
673 /**
674 * @up_req_recv: Message receiver state for up requests.
675 */
676 struct drm_dp_sideband_msg_rx up_req_recv;
677
678 /**
679 * @down_rep_recv: Message receiver state for replies to down
680 * requests.
681 */
682 struct drm_dp_sideband_msg_rx down_rep_recv;
683
684 /**
685 * @lock: protects @mst_state, @mst_primary, @dpcd, and
686 * @payload_id_table_cleared.
687 */
688 struct mutex lock;
689
690 /**
691 * @probe_lock: Prevents @work and @up_req_work, the only writers of
692 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
693 * while they update the topology.
694 */
695 struct mutex probe_lock;
696
697 /**
698 * @mst_state: If this manager is enabled for an MST capable port. False
699 * if no MST sink/branch devices is connected.
700 */
701 bool mst_state : 1;
702
703 /**
704 * @payload_id_table_cleared: Whether or not we've cleared the payload
705 * ID table for @mst_primary. Protected by @lock.
706 */
707 bool payload_id_table_cleared : 1;
708
709 /**
710 * @reset_rx_state: The down request's reply and up request message
711 * receiver state must be reset, after the topology manager got
712 * removed. Protected by @lock.
713 */
714 bool reset_rx_state : 1;
715
716 /**
717 * @payload_count: The number of currently active payloads in hardware. This value is only
718 * intended to be used internally by MST helpers for payload tracking, and is only safe to
719 * read/write from the atomic commit (not check) context.
720 */
721 u8 payload_count;
722
723 /**
724 * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
725 * internally by MST helpers for payload tracking, and is only safe to read/write from the
726 * atomic commit (not check) context.
727 */
728 u8 next_start_slot;
729
730 /**
731 * @mst_primary: Pointer to the primary/first branch device.
732 */
733 struct drm_dp_mst_branch *mst_primary;
734
735 /**
736 * @dpcd: Cache of DPCD for primary port.
737 */
738 u8 dpcd[DP_RECEIVER_CAP_SIZE];
739 /**
740 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
741 */
742 u8 sink_count;
743
744 /**
745 * @funcs: Atomic helper callbacks
746 */
747 const struct drm_private_state_funcs *funcs;
748
749 /**
750 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
751 */
752 struct mutex qlock;
753
754 /**
755 * @tx_msg_downq: List of pending down requests
756 */
757 struct list_head tx_msg_downq;
758
759 /**
760 * @tx_waitq: Wait to queue stall for the tx worker.
761 */
762 wait_queue_head_t tx_waitq;
763 /**
764 * @work: Probe work.
765 */
766 struct work_struct work;
767 /**
768 * @tx_work: Sideband transmit worker. This can nest within the main
769 * @work worker for each transaction @work launches.
770 */
771 struct work_struct tx_work;
772
773 /**
774 * @destroy_port_list: List of to be destroyed connectors.
775 */
776 struct list_head destroy_port_list;
777 /**
778 * @destroy_branch_device_list: List of to be destroyed branch
779 * devices.
780 */
781 struct list_head destroy_branch_device_list;
782 /**
783 * @delayed_destroy_lock: Protects @destroy_port_list and
784 * @destroy_branch_device_list.
785 */
786 struct mutex delayed_destroy_lock;
787
788 /**
789 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
790 * A dedicated WQ makes it possible to drain any requeued work items
791 * on it.
792 */
793 struct workqueue_struct *delayed_destroy_wq;
794
795 /**
796 * @delayed_destroy_work: Work item to destroy MST port and branch
797 * devices, needed to avoid locking inversion.
798 */
799 struct work_struct delayed_destroy_work;
800
801 /**
802 * @up_req_list: List of pending up requests from the topology that
803 * need to be processed, in chronological order.
804 */
805 struct list_head up_req_list;
806 /**
807 * @up_req_lock: Protects @up_req_list
808 */
809 struct mutex up_req_lock;
810 /**
811 * @up_req_work: Work item to process up requests received from the
812 * topology. Needed to avoid blocking hotplug handling and sideband
813 * transmissions.
814 */
815 struct work_struct up_req_work;
816
817 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
818 /**
819 * @topology_ref_history_lock: protects
820 * &drm_dp_mst_port.topology_ref_history and
821 * &drm_dp_mst_branch.topology_ref_history.
822 */
823 struct mutex topology_ref_history_lock;
824 #endif
825 };
826
827 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
828 struct drm_device *dev, struct drm_dp_aux *aux,
829 int max_dpcd_transaction_bytes,
830 int max_payloads, int conn_base_id);
831
832 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
833
834 /**
835 * enum drm_dp_mst_mode - sink's MST mode capability
836 */
837 enum drm_dp_mst_mode {
838 /**
839 * @DRM_DP_SST: The sink does not support MST nor single stream sideband
840 * messaging.
841 */
842 DRM_DP_SST,
843 /**
844 * @DRM_DP_MST: Sink supports MST, more than one stream and single
845 * stream sideband messaging.
846 */
847 DRM_DP_MST,
848 /**
849 * @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single
850 * stream sideband messaging.
851 */
852 DRM_DP_SST_SIDEBAND_MSG,
853 };
854
855 enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
856 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
857
858 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
859 const u8 *esi,
860 u8 *ack,
861 bool *handled);
862 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
863
864 int
865 drm_dp_mst_detect_port(struct drm_connector *connector,
866 struct drm_modeset_acquire_ctx *ctx,
867 struct drm_dp_mst_topology_mgr *mgr,
868 struct drm_dp_mst_port *port);
869
870 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
871 struct drm_dp_mst_topology_mgr *mgr,
872 struct drm_dp_mst_port *port);
873 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
874 struct drm_dp_mst_topology_mgr *mgr,
875 struct drm_dp_mst_port *port);
876
877 fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
878
879 int drm_dp_calc_pbn_mode(int clock, int bpp);
880
881 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
882
883 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
884 struct drm_dp_mst_topology_state *mst_state,
885 struct drm_dp_mst_atomic_payload *payload);
886 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
887 struct drm_dp_mst_atomic_payload *payload);
888 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
889 struct drm_dp_mst_topology_state *mst_state,
890 struct drm_dp_mst_atomic_payload *payload);
891 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
892 struct drm_dp_mst_topology_state *mst_state,
893 const struct drm_dp_mst_atomic_payload *old_payload,
894 struct drm_dp_mst_atomic_payload *new_payload);
895
896 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
897
898 void drm_dp_mst_dump_topology(struct seq_file *m,
899 struct drm_dp_mst_topology_mgr *mgr);
900
901 void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
902
903 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
904 int __must_check
905 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
906 bool sync);
907
908 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
909 unsigned int offset, void *buffer, size_t size);
910 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
911 unsigned int offset, void *buffer, size_t size);
912
913 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
914 struct drm_dp_mst_port *port);
915 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
916 struct drm_dp_mst_port *port);
917
918 struct drm_dp_mst_topology_state *
919 drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
920 struct drm_dp_mst_topology_mgr *mgr);
921 struct drm_dp_mst_topology_state *
922 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
923 struct drm_dp_mst_topology_mgr *mgr);
924 struct drm_dp_mst_topology_state *
925 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
926 struct drm_dp_mst_topology_mgr *mgr);
927 struct drm_dp_mst_atomic_payload *
928 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
929 struct drm_dp_mst_port *port);
930 bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
931 struct drm_dp_mst_port *port,
932 struct drm_dp_mst_port *parent);
933 int __must_check
934 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
935 struct drm_dp_mst_topology_mgr *mgr,
936 struct drm_dp_mst_port *port, int pbn);
937 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
938 struct drm_dp_mst_port *port,
939 int pbn, bool enable);
940 int __must_check
941 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
942 struct drm_dp_mst_topology_mgr *mgr);
943 int __must_check
944 drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
945 struct drm_dp_mst_topology_mgr *mgr,
946 struct drm_dp_mst_port *port);
947 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
948 int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
949 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
950 struct drm_dp_mst_port *port, bool power_up);
951 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
952 struct drm_dp_mst_port *port,
953 struct drm_dp_query_stream_enc_status_ack_reply *status);
954 int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
955 struct drm_dp_mst_topology_mgr *mgr,
956 struct drm_dp_mst_topology_state *mst_state,
957 struct drm_dp_mst_port **failing_port);
958 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
959 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
960 struct drm_dp_mst_topology_mgr *mgr);
961
962 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
963 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
964
965 static inline
drm_dp_mst_port_is_logical(struct drm_dp_mst_port * port)966 bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port)
967 {
968 return port->port_num >= DP_MST_LOGICAL_PORT_0;
969 }
970
971 struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port);
972 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
973
974 static inline struct drm_dp_mst_topology_state *
to_drm_dp_mst_topology_state(struct drm_private_state * state)975 to_drm_dp_mst_topology_state(struct drm_private_state *state)
976 {
977 return container_of(state, struct drm_dp_mst_topology_state, base);
978 }
979
980 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
981
982 /**
983 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
984 * macro-internal use
985 * @state: &struct drm_atomic_state pointer
986 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
987 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
988 * iteration cursor
989 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
990 * iteration cursor
991 * @i: int iteration cursor, for macro-internal use
992 *
993 * Used by for_each_oldnew_mst_mgr_in_state(),
994 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
995 * call this directly.
996 *
997 * Returns:
998 * True if the current &struct drm_private_obj is a &struct
999 * drm_dp_mst_topology_mgr, false otherwise.
1000 */
1001 static inline bool
__drm_dp_mst_state_iter_get(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr ** mgr,struct drm_dp_mst_topology_state ** old_state,struct drm_dp_mst_topology_state ** new_state,int i)1002 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
1003 struct drm_dp_mst_topology_mgr **mgr,
1004 struct drm_dp_mst_topology_state **old_state,
1005 struct drm_dp_mst_topology_state **new_state,
1006 int i)
1007 {
1008 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
1009
1010 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
1011 return false;
1012
1013 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
1014 if (old_state)
1015 *old_state = to_dp_mst_topology_state(objs_state->old_state);
1016 if (new_state)
1017 *new_state = to_dp_mst_topology_state(objs_state->new_state);
1018
1019 return true;
1020 }
1021
1022 /**
1023 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
1024 * managers in an atomic update
1025 * @__state: &struct drm_atomic_state pointer
1026 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1027 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
1028 * state
1029 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1030 * state
1031 * @__i: int iteration cursor, for macro-internal use
1032 *
1033 * This iterates over all DRM DP MST topology managers in an atomic update,
1034 * tracking both old and new state. This is useful in places where the state
1035 * delta needs to be considered, for example in atomic check functions.
1036 */
1037 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
1038 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1039 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
1040
1041 /**
1042 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
1043 * in an atomic update
1044 * @__state: &struct drm_atomic_state pointer
1045 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1046 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
1047 * state
1048 * @__i: int iteration cursor, for macro-internal use
1049 *
1050 * This iterates over all DRM DP MST topology managers in an atomic update,
1051 * tracking only the old state. This is useful in disable functions, where we
1052 * need the old state the hardware is still in.
1053 */
1054 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
1055 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1056 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
1057
1058 /**
1059 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
1060 * in an atomic update
1061 * @__state: &struct drm_atomic_state pointer
1062 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1063 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1064 * state
1065 * @__i: int iteration cursor, for macro-internal use
1066 *
1067 * This iterates over all DRM DP MST topology managers in an atomic update,
1068 * tracking only the new state. This is useful in enable functions, where we
1069 * need the new state the hardware should be in when the atomic commit
1070 * operation has completed.
1071 */
1072 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
1073 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1074 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
1075
1076 #endif
1077