1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Debugfs interface
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Gil Fine <[email protected]>
7  *	    Mika Westerberg <[email protected]>
8  */
9 
10 #include <linux/array_size.h>
11 #include <linux/bitfield.h>
12 #include <linux/debugfs.h>
13 #include <linux/delay.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/uaccess.h>
16 
17 #include "tb.h"
18 #include "sb_regs.h"
19 
20 #define PORT_CAP_V1_PCIE_LEN	1
21 #define PORT_CAP_V2_PCIE_LEN	2
22 #define PORT_CAP_POWER_LEN	2
23 #define PORT_CAP_LANE_LEN	3
24 #define PORT_CAP_USB3_LEN	5
25 #define PORT_CAP_DP_V1_LEN	9
26 #define PORT_CAP_DP_V2_LEN	14
27 #define PORT_CAP_TMU_V1_LEN	8
28 #define PORT_CAP_TMU_V2_LEN	10
29 #define PORT_CAP_BASIC_LEN	9
30 #define PORT_CAP_USB4_LEN	20
31 
32 #define SWITCH_CAP_TMU_LEN	26
33 #define SWITCH_CAP_BASIC_LEN	27
34 
35 #define PATH_LEN		2
36 
37 #define COUNTER_SET_LEN		3
38 
39 /*
40  * USB4 spec doesn't specify dwell range, the range of 100 ms to 500 ms
41  * probed to give good results.
42  */
43 #define MIN_DWELL_TIME		100 /* ms */
44 #define MAX_DWELL_TIME		500 /* ms */
45 #define DWELL_SAMPLE_INTERVAL	10
46 
47 enum usb4_margin_cap_voltage_indp {
48 	USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN,
49 	USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL,
50 	USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH,
51 	USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN,
52 	USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH,
53 	USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN,
54 };
55 
56 enum usb4_margin_cap_time_indp {
57 	USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN,
58 	USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR,
59 	USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH,
60 	USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN,
61 	USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH,
62 	USB4_MARGIN_CAP_TIME_INDP_UNKNOWN,
63 };
64 
65 /* Sideband registers and their sizes as defined in the USB4 spec */
66 struct sb_reg {
67 	unsigned int reg;
68 	unsigned int size;
69 };
70 
71 #define SB_MAX_SIZE		64
72 
73 /* Sideband registers for router */
74 static const struct sb_reg port_sb_regs[] = {
75 	{ USB4_SB_VENDOR_ID, 4 },
76 	{ USB4_SB_PRODUCT_ID, 4 },
77 	{ USB4_SB_DEBUG_CONF, 4 },
78 	{ USB4_SB_DEBUG, 54 },
79 	{ USB4_SB_LRD_TUNING, 4 },
80 	{ USB4_SB_OPCODE, 4 },
81 	{ USB4_SB_METADATA, 4 },
82 	{ USB4_SB_LINK_CONF, 3 },
83 	{ USB4_SB_GEN23_TXFFE, 4 },
84 	{ USB4_SB_GEN4_TXFFE, 4 },
85 	{ USB4_SB_VERSION, 4 },
86 	{ USB4_SB_DATA, 64 },
87 };
88 
89 /* Sideband registers for retimer */
90 static const struct sb_reg retimer_sb_regs[] = {
91 	{ USB4_SB_VENDOR_ID, 4 },
92 	{ USB4_SB_PRODUCT_ID, 4 },
93 	{ USB4_SB_FW_VERSION, 4 },
94 	{ USB4_SB_LRD_TUNING, 4 },
95 	{ USB4_SB_OPCODE, 4 },
96 	{ USB4_SB_METADATA, 4 },
97 	{ USB4_SB_GEN23_TXFFE, 4 },
98 	{ USB4_SB_GEN4_TXFFE, 4 },
99 	{ USB4_SB_VERSION, 4 },
100 	{ USB4_SB_DATA, 64 },
101 };
102 
103 #define DEBUGFS_ATTR(__space, __write)					\
104 static int __space ## _open(struct inode *inode, struct file *file)	\
105 {									\
106 	return single_open(file, __space ## _show, inode->i_private);	\
107 }									\
108 									\
109 static const struct file_operations __space ## _fops = {		\
110 	.owner = THIS_MODULE,						\
111 	.open = __space ## _open,					\
112 	.release = single_release,					\
113 	.read  = seq_read,						\
114 	.write = __write,						\
115 	.llseek = seq_lseek,						\
116 }
117 
118 #define DEBUGFS_ATTR_RO(__space)					\
119 	DEBUGFS_ATTR(__space, NULL)
120 
121 #define DEBUGFS_ATTR_RW(__space)					\
122 	DEBUGFS_ATTR(__space, __space ## _write)
123 
124 static struct dentry *tb_debugfs_root;
125 
validate_and_copy_from_user(const void __user * user_buf,size_t * count)126 static void *validate_and_copy_from_user(const void __user *user_buf,
127 					 size_t *count)
128 {
129 	size_t nbytes;
130 	void *buf;
131 
132 	if (!*count)
133 		return ERR_PTR(-EINVAL);
134 
135 	if (!access_ok(user_buf, *count))
136 		return ERR_PTR(-EFAULT);
137 
138 	buf = (void *)get_zeroed_page(GFP_KERNEL);
139 	if (!buf)
140 		return ERR_PTR(-ENOMEM);
141 
142 	nbytes = min_t(size_t, *count, PAGE_SIZE);
143 	if (copy_from_user(buf, user_buf, nbytes)) {
144 		free_page((unsigned long)buf);
145 		return ERR_PTR(-EFAULT);
146 	}
147 
148 	*count = nbytes;
149 	return buf;
150 }
151 
parse_line(char ** line,u32 * offs,u32 * val,int short_fmt_len,int long_fmt_len)152 static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
153 		       int long_fmt_len)
154 {
155 	char *token;
156 	u32 v[5];
157 	int ret;
158 
159 	token = strsep(line, "\n");
160 	if (!token)
161 		return false;
162 
163 	/*
164 	 * For Adapter/Router configuration space:
165 	 * Short format is: offset value\n
166 	 *		    v[0]   v[1]
167 	 * Long format as produced from the read side:
168 	 * offset relative_offset cap_id vs_cap_id value\n
169 	 * v[0]   v[1]            v[2]   v[3]      v[4]
170 	 *
171 	 * For Path configuration space:
172 	 * Short format is: offset value\n
173 	 *		    v[0]   v[1]
174 	 * Long format as produced from the read side:
175 	 * offset relative_offset in_hop_id value\n
176 	 * v[0]   v[1]            v[2]      v[3]
177 	 *
178 	 * For Counter configuration space:
179 	 * Short format is: offset\n
180 	 *		    v[0]
181 	 * Long format as produced from the read side:
182 	 * offset relative_offset counter_id value\n
183 	 * v[0]   v[1]            v[2]       v[3]
184 	 */
185 	ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
186 	/* In case of Counters, clear counter, "val" content is NA */
187 	if (ret == short_fmt_len) {
188 		*offs = v[0];
189 		*val = v[short_fmt_len - 1];
190 		return true;
191 	} else if (ret == long_fmt_len) {
192 		*offs = v[0];
193 		*val = v[long_fmt_len - 1];
194 		return true;
195 	}
196 
197 	return false;
198 }
199 
200 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
201 /*
202  * Path registers need to be written in double word pairs and they both must be
203  * read before written. This writes one double word in patch config space
204  * following the spec flow.
205  */
path_write_one(struct tb_port * port,u32 val,u32 offset)206 static int path_write_one(struct tb_port *port, u32 val, u32 offset)
207 {
208 	u32 index = offset % PATH_LEN;
209 	u32 offs = offset - index;
210 	u32 data[PATH_LEN];
211 	int ret;
212 
213 	ret = tb_port_read(port, data, TB_CFG_HOPS, offs, PATH_LEN);
214 	if (ret)
215 		return ret;
216 	data[index] = val;
217 	return tb_port_write(port, data, TB_CFG_HOPS, offs, PATH_LEN);
218 }
219 
regs_write(struct tb_switch * sw,struct tb_port * port,enum tb_cfg_space space,const char __user * user_buf,size_t count,loff_t * ppos)220 static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
221 			  enum tb_cfg_space space, const char __user *user_buf,
222 			  size_t count, loff_t *ppos)
223 {
224 	int long_fmt_len, ret = 0;
225 	struct tb *tb = sw->tb;
226 	char *line, *buf;
227 	u32 val, offset;
228 
229 	buf = validate_and_copy_from_user(user_buf, &count);
230 	if (IS_ERR(buf))
231 		return PTR_ERR(buf);
232 
233 	pm_runtime_get_sync(&sw->dev);
234 
235 	if (mutex_lock_interruptible(&tb->lock)) {
236 		ret = -ERESTARTSYS;
237 		goto out;
238 	}
239 
240 	/* User did hardware changes behind the driver's back */
241 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
242 
243 	if (space == TB_CFG_HOPS)
244 		long_fmt_len = 4;
245 	else
246 		long_fmt_len = 5;
247 
248 	line = buf;
249 	while (parse_line(&line, &offset, &val, 2, long_fmt_len)) {
250 		if (port) {
251 			if (space == TB_CFG_HOPS)
252 				ret = path_write_one(port, val, offset);
253 			else
254 				ret = tb_port_write(port, &val, space, offset, 1);
255 		} else {
256 			ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
257 		}
258 		if (ret)
259 			break;
260 	}
261 
262 	mutex_unlock(&tb->lock);
263 
264 out:
265 	pm_runtime_mark_last_busy(&sw->dev);
266 	pm_runtime_put_autosuspend(&sw->dev);
267 	free_page((unsigned long)buf);
268 
269 	return ret < 0 ? ret : count;
270 }
271 
port_regs_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)272 static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
273 			       size_t count, loff_t *ppos)
274 {
275 	struct seq_file *s = file->private_data;
276 	struct tb_port *port = s->private;
277 
278 	return regs_write(port->sw, port, TB_CFG_PORT, user_buf, count, ppos);
279 }
280 
path_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)281 static ssize_t path_write(struct file *file, const char __user *user_buf,
282 			  size_t count, loff_t *ppos)
283 {
284 	struct seq_file *s = file->private_data;
285 	struct tb_port *port = s->private;
286 
287 	return regs_write(port->sw, port, TB_CFG_HOPS, user_buf, count, ppos);
288 }
289 
switch_regs_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)290 static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
291 				 size_t count, loff_t *ppos)
292 {
293 	struct seq_file *s = file->private_data;
294 	struct tb_switch *sw = s->private;
295 
296 	return regs_write(sw, NULL, TB_CFG_SWITCH, user_buf, count, ppos);
297 }
298 
parse_sb_line(char ** line,u8 * reg,u8 * data,size_t data_size,size_t * bytes_read)299 static bool parse_sb_line(char **line, u8 *reg, u8 *data, size_t data_size,
300 			  size_t *bytes_read)
301 {
302 	char *field, *token;
303 	int i;
304 
305 	token = strsep(line, "\n");
306 	if (!token)
307 		return false;
308 
309 	/* Parse the register first */
310 	field = strsep(&token, " ");
311 	if (!field)
312 		return false;
313 	if (kstrtou8(field, 0, reg))
314 		return false;
315 
316 	/* Then the values for the register, up to data_size */
317 	for (i = 0; i < data_size; i++) {
318 		field = strsep(&token, " ");
319 		if (!field)
320 			break;
321 		if (kstrtou8(field, 0, &data[i]))
322 			return false;
323 	}
324 
325 	*bytes_read = i;
326 	return true;
327 }
328 
sb_regs_write(struct tb_port * port,const struct sb_reg * sb_regs,size_t size,enum usb4_sb_target target,u8 index,char * buf,size_t count,loff_t * ppos)329 static ssize_t sb_regs_write(struct tb_port *port, const struct sb_reg *sb_regs,
330 			     size_t size, enum usb4_sb_target target, u8 index,
331 			     char *buf, size_t count, loff_t *ppos)
332 {
333 	u8 reg, data[SB_MAX_SIZE];
334 	size_t bytes_read;
335 	char *line = buf;
336 
337 	/* User did hardware changes behind the driver's back */
338 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
339 
340 	/*
341 	 * For sideband registers we accept:
342 	 * reg b0 b1 b2...\n
343 	 *
344 	 * Here "reg" is the byte offset of the sideband register and "b0"..
345 	 * are the byte values. There can be less byte values than the register
346 	 * size. The leftovers will not be overwritten.
347 	 */
348 	while (parse_sb_line(&line, &reg, data, ARRAY_SIZE(data), &bytes_read)) {
349 		const struct sb_reg *sb_reg;
350 		int ret;
351 
352 		/* At least one byte must be passed */
353 		if (bytes_read < 1)
354 			return -EINVAL;
355 
356 		/* Find the register */
357 		sb_reg = NULL;
358 		for (int i = 0; i < size; i++) {
359 			if (sb_regs[i].reg == reg) {
360 				sb_reg = &sb_regs[i];
361 				break;
362 			}
363 		}
364 
365 		if (!sb_reg)
366 			return -EINVAL;
367 
368 		if (bytes_read > sb_regs->size)
369 			return -E2BIG;
370 
371 		ret = usb4_port_sb_write(port, target, index, sb_reg->reg, data,
372 					 bytes_read);
373 		if (ret)
374 			return ret;
375 	}
376 
377 	return 0;
378 }
379 
port_sb_regs_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)380 static ssize_t port_sb_regs_write(struct file *file, const char __user *user_buf,
381 				  size_t count, loff_t *ppos)
382 {
383 	struct seq_file *s = file->private_data;
384 	struct tb_port *port = s->private;
385 	struct tb_switch *sw = port->sw;
386 	struct tb *tb = sw->tb;
387 	char *buf;
388 	int ret;
389 
390 	buf = validate_and_copy_from_user(user_buf, &count);
391 	if (IS_ERR(buf))
392 		return PTR_ERR(buf);
393 
394 	pm_runtime_get_sync(&sw->dev);
395 
396 	if (mutex_lock_interruptible(&tb->lock)) {
397 		ret = -ERESTARTSYS;
398 		goto out;
399 	}
400 
401 	ret = sb_regs_write(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
402 			    USB4_SB_TARGET_ROUTER, 0, buf, count, ppos);
403 
404 	mutex_unlock(&tb->lock);
405 out:
406 	pm_runtime_mark_last_busy(&sw->dev);
407 	pm_runtime_put_autosuspend(&sw->dev);
408 	free_page((unsigned long)buf);
409 
410 	return ret < 0 ? ret : count;
411 }
412 
retimer_sb_regs_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)413 static ssize_t retimer_sb_regs_write(struct file *file,
414 				     const char __user *user_buf,
415 				     size_t count, loff_t *ppos)
416 {
417 	struct seq_file *s = file->private_data;
418 	struct tb_retimer *rt = s->private;
419 	struct tb *tb = rt->tb;
420 	char *buf;
421 	int ret;
422 
423 	buf = validate_and_copy_from_user(user_buf, &count);
424 	if (IS_ERR(buf))
425 		return PTR_ERR(buf);
426 
427 	pm_runtime_get_sync(&rt->dev);
428 
429 	if (mutex_lock_interruptible(&tb->lock)) {
430 		ret = -ERESTARTSYS;
431 		goto out;
432 	}
433 
434 	ret = sb_regs_write(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
435 			    USB4_SB_TARGET_RETIMER, rt->index, buf, count, ppos);
436 
437 	mutex_unlock(&tb->lock);
438 out:
439 	pm_runtime_mark_last_busy(&rt->dev);
440 	pm_runtime_put_autosuspend(&rt->dev);
441 	free_page((unsigned long)buf);
442 
443 	return ret < 0 ? ret : count;
444 }
445 #define DEBUGFS_MODE		0600
446 #else
447 #define port_regs_write		NULL
448 #define path_write		NULL
449 #define switch_regs_write	NULL
450 #define port_sb_regs_write	NULL
451 #define retimer_sb_regs_write	NULL
452 #define DEBUGFS_MODE		0400
453 #endif
454 
455 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
456 /**
457  * struct tb_margining - Lane margining support
458  * @port: USB4 port through which the margining operations are run
459  * @target: Sideband target
460  * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
461  * @dev: Pointer to the device that is the target (USB4 port or retimer)
462  * @gen: Link generation
463  * @asym_rx: %true% if @port supports asymmetric link with 3 Rx
464  * @caps: Port lane margining capabilities
465  * @results: Last lane margining results
466  * @lanes: %0, %1 or %7 (all)
467  * @min_ber_level: Minimum supported BER level contour value
468  * @max_ber_level: Maximum supported BER level contour value
469  * @ber_level: Current BER level contour value
470  * @voltage_steps: Number of mandatory voltage steps
471  * @max_voltage_offset: Maximum mandatory voltage offset (in mV)
472  * @voltage_steps_optional_range: Number of voltage steps for optional range
473  * @max_voltage_offset_optional_range: Maximum voltage offset for the optional
474  *					range (in mV).
475  * @time_steps: Number of time margin steps
476  * @max_time_offset: Maximum time margin offset (in mUI)
477  * @voltage_time_offset: Offset for voltage / time for software margining
478  * @dwell_time: Dwell time for software margining (in ms)
479  * @error_counter: Error counter operation for software margining
480  * @optional_voltage_offset_range: Enable optional extended voltage range
481  * @software: %true if software margining is used instead of hardware
482  * @time: %true if time margining is used instead of voltage
483  * @right_high: %false if left/low margin test is performed, %true if
484  *		right/high
485  * @upper_eye: %false if the lower PAM3 eye is used, %true if the upper
486  *	       eye is used
487  */
488 struct tb_margining {
489 	struct tb_port *port;
490 	enum usb4_sb_target target;
491 	u8 index;
492 	struct device *dev;
493 	unsigned int gen;
494 	bool asym_rx;
495 	u32 caps[3];
496 	u32 results[3];
497 	enum usb4_margining_lane lanes;
498 	unsigned int min_ber_level;
499 	unsigned int max_ber_level;
500 	unsigned int ber_level;
501 	unsigned int voltage_steps;
502 	unsigned int max_voltage_offset;
503 	unsigned int voltage_steps_optional_range;
504 	unsigned int max_voltage_offset_optional_range;
505 	unsigned int time_steps;
506 	unsigned int max_time_offset;
507 	unsigned int voltage_time_offset;
508 	unsigned int dwell_time;
509 	enum usb4_margin_sw_error_counter error_counter;
510 	bool optional_voltage_offset_range;
511 	bool software;
512 	bool time;
513 	bool right_high;
514 	bool upper_eye;
515 };
516 
margining_modify_error_counter(struct tb_margining * margining,u32 lanes,enum usb4_margin_sw_error_counter error_counter)517 static int margining_modify_error_counter(struct tb_margining *margining,
518 	u32 lanes, enum usb4_margin_sw_error_counter error_counter)
519 {
520 	struct usb4_port_margining_params params = { 0 };
521 	struct tb_port *port = margining->port;
522 	u32 result;
523 
524 	if (error_counter != USB4_MARGIN_SW_ERROR_COUNTER_CLEAR &&
525 	    error_counter != USB4_MARGIN_SW_ERROR_COUNTER_STOP)
526 		return -EOPNOTSUPP;
527 
528 	params.error_counter = error_counter;
529 	params.lanes = lanes;
530 
531 	return usb4_port_sw_margin(port, margining->target, margining->index,
532 				   &params, &result);
533 }
534 
supports_software(const struct tb_margining * margining)535 static bool supports_software(const struct tb_margining *margining)
536 {
537 	if (margining->gen < 4)
538 		return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
539 	return margining->caps[2] & USB4_MARGIN_CAP_2_MODES_SW;
540 }
541 
supports_hardware(const struct tb_margining * margining)542 static bool supports_hardware(const struct tb_margining *margining)
543 {
544 	if (margining->gen < 4)
545 		return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
546 	return margining->caps[2] & USB4_MARGIN_CAP_2_MODES_HW;
547 }
548 
all_lanes(const struct tb_margining * margining)549 static bool all_lanes(const struct tb_margining *margining)
550 {
551 	return margining->caps[0] & USB4_MARGIN_CAP_0_ALL_LANES;
552 }
553 
554 static enum usb4_margin_cap_voltage_indp
independent_voltage_margins(const struct tb_margining * margining)555 independent_voltage_margins(const struct tb_margining *margining)
556 {
557 	if (margining->gen < 4) {
558 		switch (FIELD_GET(USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK, margining->caps[0])) {
559 		case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
560 			return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN;
561 		case USB4_MARGIN_CAP_0_VOLTAGE_HL:
562 			return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL;
563 		case USB4_MARGIN_CAP_1_TIME_BOTH:
564 			return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH;
565 		}
566 	} else {
567 		switch (FIELD_GET(USB4_MARGIN_CAP_2_VOLTAGE_INDP_MASK, margining->caps[2])) {
568 		case USB4_MARGIN_CAP_2_VOLTAGE_MIN:
569 			return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN;
570 		case USB4_MARGIN_CAP_2_VOLTAGE_BOTH:
571 			return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH;
572 		}
573 	}
574 	return USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN;
575 }
576 
supports_time(const struct tb_margining * margining)577 static bool supports_time(const struct tb_margining *margining)
578 {
579 	if (margining->gen < 4)
580 		return margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
581 	return margining->caps[2] & USB4_MARGIN_CAP_2_TIME;
582 }
583 
584 /* Only applicable if supports_time() returns true */
585 static enum usb4_margin_cap_time_indp
independent_time_margins(const struct tb_margining * margining)586 independent_time_margins(const struct tb_margining *margining)
587 {
588 	if (margining->gen < 4) {
589 		switch (FIELD_GET(USB4_MARGIN_CAP_1_TIME_INDP_MASK, margining->caps[1])) {
590 		case USB4_MARGIN_CAP_1_TIME_MIN:
591 			return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN;
592 		case USB4_MARGIN_CAP_1_TIME_LR:
593 			return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR;
594 		case USB4_MARGIN_CAP_1_TIME_BOTH:
595 			return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH;
596 		}
597 	} else {
598 		switch (FIELD_GET(USB4_MARGIN_CAP_2_TIME_INDP_MASK, margining->caps[2])) {
599 		case USB4_MARGIN_CAP_2_TIME_MIN:
600 			return USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN;
601 		case USB4_MARGIN_CAP_2_TIME_BOTH:
602 			return USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH;
603 		}
604 	}
605 	return USB4_MARGIN_CAP_TIME_INDP_UNKNOWN;
606 }
607 
608 static bool
supports_optional_voltage_offset_range(const struct tb_margining * margining)609 supports_optional_voltage_offset_range(const struct tb_margining *margining)
610 {
611 	return margining->caps[0] & USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT;
612 }
613 
614 static ssize_t
margining_ber_level_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)615 margining_ber_level_write(struct file *file, const char __user *user_buf,
616 			   size_t count, loff_t *ppos)
617 {
618 	struct seq_file *s = file->private_data;
619 	struct tb_margining *margining = s->private;
620 	struct tb *tb = margining->port->sw->tb;
621 	unsigned int val;
622 	int ret = 0;
623 	char *buf;
624 
625 	if (mutex_lock_interruptible(&tb->lock))
626 		return -ERESTARTSYS;
627 
628 	if (margining->software) {
629 		ret = -EINVAL;
630 		goto out_unlock;
631 	}
632 
633 	buf = validate_and_copy_from_user(user_buf, &count);
634 	if (IS_ERR(buf)) {
635 		ret = PTR_ERR(buf);
636 		goto out_unlock;
637 	}
638 
639 	buf[count - 1] = '\0';
640 
641 	ret = kstrtouint(buf, 10, &val);
642 	if (ret)
643 		goto out_free;
644 
645 	if (val < margining->min_ber_level ||
646 	    val > margining->max_ber_level) {
647 		ret = -EINVAL;
648 		goto out_free;
649 	}
650 
651 	margining->ber_level = val;
652 
653 out_free:
654 	free_page((unsigned long)buf);
655 out_unlock:
656 	mutex_unlock(&tb->lock);
657 
658 	return ret < 0 ? ret : count;
659 }
660 
ber_level_show(struct seq_file * s,unsigned int val)661 static void ber_level_show(struct seq_file *s, unsigned int val)
662 {
663 	if (val % 2)
664 		seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val);
665 	else
666 		seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val);
667 }
668 
margining_ber_level_show(struct seq_file * s,void * not_used)669 static int margining_ber_level_show(struct seq_file *s, void *not_used)
670 {
671 	const struct tb_margining *margining = s->private;
672 
673 	if (margining->software)
674 		return -EINVAL;
675 	ber_level_show(s, margining->ber_level);
676 	return 0;
677 }
678 DEBUGFS_ATTR_RW(margining_ber_level);
679 
margining_caps_show(struct seq_file * s,void * not_used)680 static int margining_caps_show(struct seq_file *s, void *not_used)
681 {
682 	struct tb_margining *margining = s->private;
683 	struct tb *tb = margining->port->sw->tb;
684 	int ret = 0;
685 
686 	if (mutex_lock_interruptible(&tb->lock))
687 		return -ERESTARTSYS;
688 
689 	/* Dump the raw caps first */
690 	for (int i = 0; i < ARRAY_SIZE(margining->caps); i++)
691 		seq_printf(s, "0x%08x\n", margining->caps[i]);
692 
693 	seq_printf(s, "# software margining: %s\n",
694 		   supports_software(margining) ? "yes" : "no");
695 	if (supports_hardware(margining)) {
696 		seq_puts(s, "# hardware margining: yes\n");
697 		seq_puts(s, "# minimum BER level contour: ");
698 		ber_level_show(s, margining->min_ber_level);
699 		seq_puts(s, "# maximum BER level contour: ");
700 		ber_level_show(s, margining->max_ber_level);
701 	} else {
702 		seq_puts(s, "# hardware margining: no\n");
703 	}
704 
705 	seq_printf(s, "# all lanes simultaneously: %s\n",
706 		  str_yes_no(all_lanes(margining)));
707 	seq_printf(s, "# voltage margin steps: %u\n",
708 		   margining->voltage_steps);
709 	seq_printf(s, "# maximum voltage offset: %u mV\n",
710 		   margining->max_voltage_offset);
711 	seq_printf(s, "# optional voltage offset range support: %s\n",
712 		   str_yes_no(supports_optional_voltage_offset_range(margining)));
713 	if (supports_optional_voltage_offset_range(margining)) {
714 		seq_printf(s, "# voltage margin steps, optional range: %u\n",
715 			   margining->voltage_steps_optional_range);
716 		seq_printf(s, "# maximum voltage offset, optional range: %u mV\n",
717 			   margining->max_voltage_offset_optional_range);
718 	}
719 
720 	switch (independent_voltage_margins(margining)) {
721 	case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN:
722 		seq_puts(s, "# returns minimum between high and low voltage margins\n");
723 		break;
724 	case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL:
725 		seq_puts(s, "# returns high or low voltage margin\n");
726 		break;
727 	case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH:
728 		seq_puts(s, "# returns both high and low margins\n");
729 		break;
730 	case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN:
731 		seq_puts(s, "# returns minimum between high and low voltage margins in both lower and upper eye\n");
732 		break;
733 	case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH:
734 		seq_puts(s, "# returns both high and low margins of both upper and lower eye\n");
735 		break;
736 	case USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN:
737 		tb_port_warn(margining->port,
738 			     "failed to parse independent voltage margining capabilities\n");
739 		ret = -EIO;
740 		goto out;
741 	}
742 
743 	if (supports_time(margining)) {
744 		seq_puts(s, "# time margining: yes\n");
745 		seq_printf(s, "# time margining is destructive: %s\n",
746 			   str_yes_no(margining->caps[1] & USB4_MARGIN_CAP_1_TIME_DESTR));
747 
748 		switch (independent_time_margins(margining)) {
749 		case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN:
750 			seq_puts(s, "# returns minimum between left and right time margins\n");
751 			break;
752 		case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR:
753 			seq_puts(s, "# returns left or right margin\n");
754 			break;
755 		case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH:
756 			seq_puts(s, "# returns both left and right margins\n");
757 			break;
758 		case USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN:
759 			seq_puts(s, "# returns minimum between left and right time margins in both lower and upper eye\n");
760 			break;
761 		case USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH:
762 			seq_puts(s, "# returns both left and right margins of both upper and lower eye\n");
763 			break;
764 		case USB4_MARGIN_CAP_TIME_INDP_UNKNOWN:
765 			tb_port_warn(margining->port,
766 				     "failed to parse independent time margining capabilities\n");
767 			ret = -EIO;
768 			goto out;
769 		}
770 
771 		seq_printf(s, "# time margin steps: %u\n",
772 			   margining->time_steps);
773 		seq_printf(s, "# maximum time offset: %u mUI\n",
774 			   margining->max_time_offset);
775 	} else {
776 		seq_puts(s, "# time margining: no\n");
777 	}
778 
779 out:
780 	mutex_unlock(&tb->lock);
781 	return ret;
782 }
783 DEBUGFS_ATTR_RO(margining_caps);
784 
785 static const struct {
786 	enum usb4_margining_lane lane;
787 	const char *name;
788 } lane_names[] = {
789 	{
790 		.lane = USB4_MARGINING_LANE_RX0,
791 		.name = "0",
792 	},
793 	{
794 		.lane = USB4_MARGINING_LANE_RX1,
795 		.name = "1",
796 	},
797 	{
798 		.lane = USB4_MARGINING_LANE_RX2,
799 		.name = "2",
800 	},
801 	{
802 		.lane = USB4_MARGINING_LANE_ALL,
803 		.name = "all",
804 	},
805 };
806 
807 static ssize_t
margining_lanes_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)808 margining_lanes_write(struct file *file, const char __user *user_buf,
809 		      size_t count, loff_t *ppos)
810 {
811 	struct seq_file *s = file->private_data;
812 	struct tb_margining *margining = s->private;
813 	struct tb_port *port = margining->port;
814 	struct tb *tb = port->sw->tb;
815 	int lane = -1;
816 	char *buf;
817 
818 	buf = validate_and_copy_from_user(user_buf, &count);
819 	if (IS_ERR(buf))
820 		return PTR_ERR(buf);
821 
822 	buf[count - 1] = '\0';
823 
824 	for (int i = 0; i < ARRAY_SIZE(lane_names); i++) {
825 		if (!strcmp(buf, lane_names[i].name)) {
826 			lane = lane_names[i].lane;
827 			break;
828 		}
829 	}
830 
831 	free_page((unsigned long)buf);
832 
833 	if (lane == -1)
834 		return -EINVAL;
835 
836 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
837 		if (lane == USB4_MARGINING_LANE_ALL && !all_lanes(margining))
838 			return -EINVAL;
839 		/*
840 		 * Enabling on RX2 requires that it is supported by the
841 		 * USB4 port.
842 		 */
843 		if (lane == USB4_MARGINING_LANE_RX2 && !margining->asym_rx)
844 			return -EINVAL;
845 
846 		margining->lanes = lane;
847 	}
848 
849 	return count;
850 }
851 
margining_lanes_show(struct seq_file * s,void * not_used)852 static int margining_lanes_show(struct seq_file *s, void *not_used)
853 {
854 	struct tb_margining *margining = s->private;
855 	struct tb_port *port = margining->port;
856 	struct tb *tb = port->sw->tb;
857 
858 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
859 		for (int i = 0; i < ARRAY_SIZE(lane_names); i++) {
860 			if (lane_names[i].lane == USB4_MARGINING_LANE_ALL &&
861 			    !all_lanes(margining))
862 				continue;
863 			if (lane_names[i].lane == USB4_MARGINING_LANE_RX2 &&
864 			    !margining->asym_rx)
865 				continue;
866 
867 			if (i != 0)
868 				seq_putc(s, ' ');
869 
870 			if (lane_names[i].lane == margining->lanes)
871 				seq_printf(s, "[%s]", lane_names[i].name);
872 			else
873 				seq_printf(s, "%s", lane_names[i].name);
874 		}
875 		seq_puts(s, "\n");
876 	}
877 
878 	return 0;
879 }
880 DEBUGFS_ATTR_RW(margining_lanes);
881 
882 static ssize_t
margining_voltage_time_offset_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)883 margining_voltage_time_offset_write(struct file *file,
884 				    const char __user *user_buf,
885 				    size_t count, loff_t *ppos)
886 {
887 	struct seq_file *s = file->private_data;
888 	struct tb_margining *margining = s->private;
889 	struct tb *tb = margining->port->sw->tb;
890 	unsigned int max_margin;
891 	unsigned int val;
892 	int ret;
893 
894 	ret = kstrtouint_from_user(user_buf, count, 10, &val);
895 	if (ret)
896 		return ret;
897 
898 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
899 		if (!margining->software)
900 			return -EOPNOTSUPP;
901 
902 		if (margining->time)
903 			max_margin = margining->time_steps;
904 		else
905 			if (margining->optional_voltage_offset_range)
906 				max_margin = margining->voltage_steps_optional_range;
907 			else
908 				max_margin = margining->voltage_steps;
909 
910 		margining->voltage_time_offset = clamp(val, 0, max_margin);
911 	}
912 
913 	return count;
914 }
915 
margining_voltage_time_offset_show(struct seq_file * s,void * not_used)916 static int margining_voltage_time_offset_show(struct seq_file *s,
917 					      void *not_used)
918 {
919 	const struct tb_margining *margining = s->private;
920 	struct tb *tb = margining->port->sw->tb;
921 
922 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
923 		if (!margining->software)
924 			return -EOPNOTSUPP;
925 
926 		seq_printf(s, "%d\n", margining->voltage_time_offset);
927 	}
928 
929 	return 0;
930 }
931 DEBUGFS_ATTR_RW(margining_voltage_time_offset);
932 
933 static ssize_t
margining_error_counter_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)934 margining_error_counter_write(struct file *file, const char __user *user_buf,
935 			      size_t count, loff_t *ppos)
936 {
937 	enum usb4_margin_sw_error_counter error_counter;
938 	struct seq_file *s = file->private_data;
939 	struct tb_margining *margining = s->private;
940 	struct tb *tb = margining->port->sw->tb;
941 	char *buf;
942 
943 	buf = validate_and_copy_from_user(user_buf, &count);
944 	if (IS_ERR(buf))
945 		return PTR_ERR(buf);
946 
947 	buf[count - 1] = '\0';
948 
949 	if (!strcmp(buf, "nop"))
950 		error_counter = USB4_MARGIN_SW_ERROR_COUNTER_NOP;
951 	else if (!strcmp(buf, "clear"))
952 		error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
953 	else if (!strcmp(buf, "start"))
954 		error_counter = USB4_MARGIN_SW_ERROR_COUNTER_START;
955 	else if (!strcmp(buf, "stop"))
956 		error_counter = USB4_MARGIN_SW_ERROR_COUNTER_STOP;
957 	else
958 		return -EINVAL;
959 
960 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
961 		if (!margining->software)
962 			return -EOPNOTSUPP;
963 
964 		margining->error_counter = error_counter;
965 	}
966 
967 	return count;
968 }
969 
margining_error_counter_show(struct seq_file * s,void * not_used)970 static int margining_error_counter_show(struct seq_file *s, void *not_used)
971 {
972 	const struct tb_margining *margining = s->private;
973 	struct tb *tb = margining->port->sw->tb;
974 
975 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
976 		if (!margining->software)
977 			return -EOPNOTSUPP;
978 
979 		switch (margining->error_counter) {
980 		case USB4_MARGIN_SW_ERROR_COUNTER_NOP:
981 			seq_puts(s, "[nop] clear start stop\n");
982 			break;
983 		case USB4_MARGIN_SW_ERROR_COUNTER_CLEAR:
984 			seq_puts(s, "nop [clear] start stop\n");
985 			break;
986 		case USB4_MARGIN_SW_ERROR_COUNTER_START:
987 			seq_puts(s, "nop clear [start] stop\n");
988 			break;
989 		case USB4_MARGIN_SW_ERROR_COUNTER_STOP:
990 			seq_puts(s, "nop clear start [stop]\n");
991 			break;
992 		}
993 	}
994 
995 	return 0;
996 }
997 DEBUGFS_ATTR_RW(margining_error_counter);
998 
999 static ssize_t
margining_dwell_time_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1000 margining_dwell_time_write(struct file *file, const char __user *user_buf,
1001 			   size_t count, loff_t *ppos)
1002 {
1003 	struct seq_file *s = file->private_data;
1004 	struct tb_margining *margining = s->private;
1005 	struct tb *tb = margining->port->sw->tb;
1006 	unsigned int val;
1007 	int ret;
1008 
1009 	ret = kstrtouint_from_user(user_buf, count, 10, &val);
1010 	if (ret)
1011 		return ret;
1012 
1013 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
1014 		if (!margining->software)
1015 			return -EOPNOTSUPP;
1016 
1017 		margining->dwell_time = clamp(val, MIN_DWELL_TIME, MAX_DWELL_TIME);
1018 	}
1019 
1020 	return count;
1021 }
1022 
margining_dwell_time_show(struct seq_file * s,void * not_used)1023 static int margining_dwell_time_show(struct seq_file *s, void *not_used)
1024 {
1025 	struct tb_margining *margining = s->private;
1026 	struct tb *tb = margining->port->sw->tb;
1027 
1028 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
1029 		if (!margining->software)
1030 			return -EOPNOTSUPP;
1031 
1032 		seq_printf(s, "%d\n", margining->dwell_time);
1033 	}
1034 
1035 	return 0;
1036 }
1037 DEBUGFS_ATTR_RW(margining_dwell_time);
1038 
1039 static ssize_t
margining_optional_voltage_offset_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1040 margining_optional_voltage_offset_write(struct file *file, const char __user *user_buf,
1041 					size_t count, loff_t *ppos)
1042 {
1043 	struct seq_file *s = file->private_data;
1044 	struct tb_margining *margining = s->private;
1045 	struct tb *tb = margining->port->sw->tb;
1046 	bool val;
1047 	int ret;
1048 
1049 	ret = kstrtobool_from_user(user_buf, count, &val);
1050 	if (ret)
1051 		return ret;
1052 
1053 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
1054 		margining->optional_voltage_offset_range = val;
1055 	}
1056 
1057 	return count;
1058 }
1059 
margining_optional_voltage_offset_show(struct seq_file * s,void * not_used)1060 static int margining_optional_voltage_offset_show(struct seq_file *s,
1061 						  void *not_used)
1062 {
1063 	struct tb_margining *margining = s->private;
1064 	struct tb *tb = margining->port->sw->tb;
1065 
1066 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
1067 		seq_printf(s, "%u\n", margining->optional_voltage_offset_range);
1068 	}
1069 
1070 	return 0;
1071 }
1072 DEBUGFS_ATTR_RW(margining_optional_voltage_offset);
1073 
margining_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1074 static ssize_t margining_mode_write(struct file *file,
1075 				   const char __user *user_buf,
1076 				   size_t count, loff_t *ppos)
1077 {
1078 	struct seq_file *s = file->private_data;
1079 	struct tb_margining *margining = s->private;
1080 	struct tb *tb = margining->port->sw->tb;
1081 	int ret = 0;
1082 	char *buf;
1083 
1084 	buf = validate_and_copy_from_user(user_buf, &count);
1085 	if (IS_ERR(buf))
1086 		return PTR_ERR(buf);
1087 
1088 	buf[count - 1] = '\0';
1089 
1090 	if (mutex_lock_interruptible(&tb->lock)) {
1091 		ret = -ERESTARTSYS;
1092 		goto out_free;
1093 	}
1094 
1095 	if (!strcmp(buf, "software")) {
1096 		if (supports_software(margining))
1097 			margining->software = true;
1098 		else
1099 			ret = -EINVAL;
1100 	} else if (!strcmp(buf, "hardware")) {
1101 		if (supports_hardware(margining))
1102 			margining->software = false;
1103 		else
1104 			ret = -EINVAL;
1105 	} else {
1106 		ret = -EINVAL;
1107 	}
1108 
1109 	mutex_unlock(&tb->lock);
1110 
1111 out_free:
1112 	free_page((unsigned long)buf);
1113 	return ret ? ret : count;
1114 }
1115 
margining_mode_show(struct seq_file * s,void * not_used)1116 static int margining_mode_show(struct seq_file *s, void *not_used)
1117 {
1118 	struct tb_margining *margining = s->private;
1119 	struct tb *tb = margining->port->sw->tb;
1120 	const char *space = "";
1121 
1122 	if (mutex_lock_interruptible(&tb->lock))
1123 		return -ERESTARTSYS;
1124 
1125 	if (supports_software(margining)) {
1126 		if (margining->software)
1127 			seq_puts(s, "[software]");
1128 		else
1129 			seq_puts(s, "software");
1130 		space = " ";
1131 	}
1132 	if (supports_hardware(margining)) {
1133 		if (margining->software)
1134 			seq_printf(s, "%shardware", space);
1135 		else
1136 			seq_printf(s, "%s[hardware]", space);
1137 	}
1138 
1139 	mutex_unlock(&tb->lock);
1140 
1141 	seq_puts(s, "\n");
1142 	return 0;
1143 }
1144 DEBUGFS_ATTR_RW(margining_mode);
1145 
margining_run_sw(struct tb_margining * margining,struct usb4_port_margining_params * params)1146 static int margining_run_sw(struct tb_margining *margining,
1147 			    struct usb4_port_margining_params *params)
1148 {
1149 	u32 nsamples = margining->dwell_time / DWELL_SAMPLE_INTERVAL;
1150 	int ret, i;
1151 
1152 	ret = usb4_port_sw_margin(margining->port, margining->target, margining->index,
1153 				  params, margining->results);
1154 	if (ret)
1155 		goto out_stop;
1156 
1157 	for (i = 0; i <= nsamples; i++) {
1158 		u32 errors = 0;
1159 
1160 		ret = usb4_port_sw_margin_errors(margining->port, margining->target,
1161 						 margining->index, &margining->results[1]);
1162 		if (ret)
1163 			break;
1164 
1165 		if (margining->lanes == USB4_MARGINING_LANE_RX0)
1166 			errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
1167 					   margining->results[1]);
1168 		else if (margining->lanes == USB4_MARGINING_LANE_RX1)
1169 			errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
1170 					   margining->results[1]);
1171 		else if (margining->lanes == USB4_MARGINING_LANE_RX2)
1172 			errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK,
1173 					   margining->results[1]);
1174 		else if (margining->lanes == USB4_MARGINING_LANE_ALL)
1175 			errors = margining->results[1];
1176 
1177 		/* Any errors stop the test */
1178 		if (errors)
1179 			break;
1180 
1181 		fsleep(DWELL_SAMPLE_INTERVAL * USEC_PER_MSEC);
1182 	}
1183 
1184 out_stop:
1185 	/*
1186 	 * Stop the counters but don't clear them to allow the
1187 	 * different error counter configurations.
1188 	 */
1189 	margining_modify_error_counter(margining, margining->lanes,
1190 				       USB4_MARGIN_SW_ERROR_COUNTER_STOP);
1191 	return ret;
1192 }
1193 
validate_margining(struct tb_margining * margining)1194 static int validate_margining(struct tb_margining *margining)
1195 {
1196 	/*
1197 	 * For running on RX2 the link must be asymmetric with 3
1198 	 * receivers. Because this is can change dynamically, check it
1199 	 * here before we start the margining and report back error if
1200 	 * expectations are not met.
1201 	 */
1202 	if (margining->lanes == USB4_MARGINING_LANE_RX2) {
1203 		int ret;
1204 
1205 		ret = tb_port_get_link_width(margining->port);
1206 		if (ret < 0)
1207 			return ret;
1208 		if (ret != TB_LINK_WIDTH_ASYM_RX) {
1209 			tb_port_warn(margining->port, "link is %s expected %s",
1210 				     tb_width_name(ret),
1211 				     tb_width_name(TB_LINK_WIDTH_ASYM_RX));
1212 			return -EINVAL;
1213 		}
1214 	}
1215 
1216 	return 0;
1217 }
1218 
margining_run_write(void * data,u64 val)1219 static int margining_run_write(void *data, u64 val)
1220 {
1221 	struct tb_margining *margining = data;
1222 	struct tb_port *port = margining->port;
1223 	struct device *dev = margining->dev;
1224 	struct tb_switch *sw = port->sw;
1225 	struct tb_switch *down_sw;
1226 	struct tb *tb = sw->tb;
1227 	int ret, clx;
1228 
1229 	if (val != 1)
1230 		return -EINVAL;
1231 
1232 	pm_runtime_get_sync(dev);
1233 
1234 	if (mutex_lock_interruptible(&tb->lock)) {
1235 		ret = -ERESTARTSYS;
1236 		goto out_rpm_put;
1237 	}
1238 
1239 	ret = validate_margining(margining);
1240 	if (ret)
1241 		goto out_unlock;
1242 
1243 	if (tb_is_upstream_port(port))
1244 		down_sw = sw;
1245 	else if (port->remote)
1246 		down_sw = port->remote->sw;
1247 	else
1248 		down_sw = NULL;
1249 
1250 	if (down_sw) {
1251 		/*
1252 		 * CL states may interfere with lane margining so
1253 		 * disable them temporarily now.
1254 		 */
1255 		ret = tb_switch_clx_disable(down_sw);
1256 		if (ret < 0) {
1257 			tb_sw_warn(down_sw, "failed to disable CL states\n");
1258 			goto out_unlock;
1259 		}
1260 		clx = ret;
1261 	}
1262 
1263 	/* Clear the results */
1264 	memset(margining->results, 0, sizeof(margining->results));
1265 
1266 	if (margining->software) {
1267 		struct usb4_port_margining_params params = {
1268 			.error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
1269 			.lanes = margining->lanes,
1270 			.time = margining->time,
1271 			.voltage_time_offset = margining->voltage_time_offset,
1272 			.right_high = margining->right_high,
1273 			.upper_eye = margining->upper_eye,
1274 			.optional_voltage_offset_range = margining->optional_voltage_offset_range,
1275 		};
1276 
1277 		tb_port_dbg(port,
1278 			    "running software %s lane margining for %s lanes %u\n",
1279 			    margining->time ? "time" : "voltage", dev_name(dev),
1280 			    margining->lanes);
1281 
1282 		ret = margining_run_sw(margining, &params);
1283 	} else {
1284 		struct usb4_port_margining_params params = {
1285 			.ber_level = margining->ber_level,
1286 			.lanes = margining->lanes,
1287 			.time = margining->time,
1288 			.right_high = margining->right_high,
1289 			.upper_eye = margining->upper_eye,
1290 			.optional_voltage_offset_range = margining->optional_voltage_offset_range,
1291 		};
1292 
1293 		tb_port_dbg(port,
1294 			    "running hardware %s lane margining for %s lanes %u\n",
1295 			    margining->time ? "time" : "voltage", dev_name(dev),
1296 			    margining->lanes);
1297 
1298 		ret = usb4_port_hw_margin(port, margining->target, margining->index, &params,
1299 					  margining->results, ARRAY_SIZE(margining->results));
1300 	}
1301 
1302 	if (down_sw)
1303 		tb_switch_clx_enable(down_sw, clx);
1304 out_unlock:
1305 	mutex_unlock(&tb->lock);
1306 out_rpm_put:
1307 	pm_runtime_mark_last_busy(dev);
1308 	pm_runtime_put_autosuspend(dev);
1309 
1310 	return ret;
1311 }
1312 DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write,
1313 			 "%llu\n");
1314 
margining_results_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1315 static ssize_t margining_results_write(struct file *file,
1316 				       const char __user *user_buf,
1317 				       size_t count, loff_t *ppos)
1318 {
1319 	struct seq_file *s = file->private_data;
1320 	struct tb_margining *margining = s->private;
1321 	struct tb *tb = margining->port->sw->tb;
1322 
1323 	if (mutex_lock_interruptible(&tb->lock))
1324 		return -ERESTARTSYS;
1325 
1326 	/* Just clear the results */
1327 	memset(margining->results, 0, sizeof(margining->results));
1328 
1329 	if (margining->software) {
1330 		/* Clear the error counters */
1331 		margining_modify_error_counter(margining,
1332 					       USB4_MARGINING_LANE_ALL,
1333 					       USB4_MARGIN_SW_ERROR_COUNTER_CLEAR);
1334 	}
1335 
1336 	mutex_unlock(&tb->lock);
1337 	return count;
1338 }
1339 
voltage_margin_show(struct seq_file * s,const struct tb_margining * margining,u8 val)1340 static void voltage_margin_show(struct seq_file *s,
1341 				const struct tb_margining *margining, u8 val)
1342 {
1343 	unsigned int tmp, voltage;
1344 
1345 	tmp = FIELD_GET(USB4_MARGIN_HW_RES_MARGIN_MASK, val);
1346 	voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
1347 	seq_printf(s, "%u mV (%u)", voltage, tmp);
1348 	if (val & USB4_MARGIN_HW_RES_EXCEEDS)
1349 		seq_puts(s, " exceeds maximum");
1350 	seq_puts(s, "\n");
1351 	if (margining->optional_voltage_offset_range)
1352 		seq_puts(s, " optional voltage offset range enabled\n");
1353 }
1354 
time_margin_show(struct seq_file * s,const struct tb_margining * margining,u8 val)1355 static void time_margin_show(struct seq_file *s,
1356 			     const struct tb_margining *margining, u8 val)
1357 {
1358 	unsigned int tmp, interval;
1359 
1360 	tmp = FIELD_GET(USB4_MARGIN_HW_RES_MARGIN_MASK, val);
1361 	interval = tmp * margining->max_time_offset / margining->time_steps;
1362 	seq_printf(s, "%u mUI (%u)", interval, tmp);
1363 	if (val & USB4_MARGIN_HW_RES_EXCEEDS)
1364 		seq_puts(s, " exceeds maximum");
1365 	seq_puts(s, "\n");
1366 }
1367 
margining_hw_result_val(const u32 * results,enum usb4_margining_lane lane,bool right_high)1368 static u8 margining_hw_result_val(const u32 *results,
1369 				  enum usb4_margining_lane lane,
1370 				  bool right_high)
1371 {
1372 	u32 val;
1373 
1374 	if (lane == USB4_MARGINING_LANE_RX0)
1375 		val = results[1];
1376 	else if (lane == USB4_MARGINING_LANE_RX1)
1377 		val = results[1] >> USB4_MARGIN_HW_RES_LANE_SHIFT;
1378 	else if (lane == USB4_MARGINING_LANE_RX2)
1379 		val = results[2];
1380 	else
1381 		val = 0;
1382 
1383 	return right_high ? val : val >> USB4_MARGIN_HW_RES_LL_SHIFT;
1384 }
1385 
margining_hw_result_format(struct seq_file * s,const struct tb_margining * margining,enum usb4_margining_lane lane)1386 static void margining_hw_result_format(struct seq_file *s,
1387 				       const struct tb_margining *margining,
1388 				       enum usb4_margining_lane lane)
1389 {
1390 	u8 val;
1391 
1392 	if (margining->time) {
1393 		val = margining_hw_result_val(margining->results, lane, true);
1394 		seq_printf(s, "# lane %u right time margin: ", lane);
1395 		time_margin_show(s, margining, val);
1396 		val = margining_hw_result_val(margining->results, lane, false);
1397 		seq_printf(s, "# lane %u left time margin: ", lane);
1398 		time_margin_show(s, margining, val);
1399 	} else {
1400 		val = margining_hw_result_val(margining->results, lane, true);
1401 		seq_printf(s, "# lane %u high voltage margin: ", lane);
1402 		voltage_margin_show(s, margining, val);
1403 		val = margining_hw_result_val(margining->results, lane, false);
1404 		seq_printf(s, "# lane %u low voltage margin: ", lane);
1405 		voltage_margin_show(s, margining, val);
1406 	}
1407 }
1408 
margining_results_show(struct seq_file * s,void * not_used)1409 static int margining_results_show(struct seq_file *s, void *not_used)
1410 {
1411 	struct tb_margining *margining = s->private;
1412 	struct tb *tb = margining->port->sw->tb;
1413 
1414 	if (mutex_lock_interruptible(&tb->lock))
1415 		return -ERESTARTSYS;
1416 
1417 	/* Dump the raw results first */
1418 	seq_printf(s, "0x%08x\n", margining->results[0]);
1419 	/* Only the hardware margining has two result dwords */
1420 	if (!margining->software) {
1421 		for (int i = 1; i < ARRAY_SIZE(margining->results); i++)
1422 			seq_printf(s, "0x%08x\n", margining->results[i]);
1423 
1424 		if (margining->lanes == USB4_MARGINING_LANE_ALL) {
1425 			margining_hw_result_format(s, margining,
1426 						   USB4_MARGINING_LANE_RX0);
1427 			margining_hw_result_format(s, margining,
1428 						   USB4_MARGINING_LANE_RX1);
1429 			if (margining->asym_rx)
1430 				margining_hw_result_format(s, margining,
1431 						USB4_MARGINING_LANE_RX2);
1432 		} else {
1433 			margining_hw_result_format(s, margining,
1434 						   margining->lanes);
1435 		}
1436 	} else {
1437 		u32 lane_errors, result;
1438 
1439 		seq_printf(s, "0x%08x\n", margining->results[1]);
1440 
1441 		result = FIELD_GET(USB4_MARGIN_SW_LANES_MASK, margining->results[0]);
1442 		if (result == USB4_MARGINING_LANE_RX0 ||
1443 		    result == USB4_MARGINING_LANE_ALL) {
1444 			lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
1445 						margining->results[1]);
1446 			seq_printf(s, "# lane 0 errors: %u\n", lane_errors);
1447 		}
1448 		if (result == USB4_MARGINING_LANE_RX1 ||
1449 		    result == USB4_MARGINING_LANE_ALL) {
1450 			lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
1451 						margining->results[1]);
1452 			seq_printf(s, "# lane 1 errors: %u\n", lane_errors);
1453 		}
1454 		if (margining->asym_rx &&
1455 		    (result == USB4_MARGINING_LANE_RX2 ||
1456 		     result == USB4_MARGINING_LANE_ALL)) {
1457 			lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK,
1458 						margining->results[1]);
1459 			seq_printf(s, "# lane 2 errors: %u\n", lane_errors);
1460 		}
1461 	}
1462 
1463 	mutex_unlock(&tb->lock);
1464 	return 0;
1465 }
1466 DEBUGFS_ATTR_RW(margining_results);
1467 
margining_test_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1468 static ssize_t margining_test_write(struct file *file,
1469 				    const char __user *user_buf,
1470 				    size_t count, loff_t *ppos)
1471 {
1472 	struct seq_file *s = file->private_data;
1473 	struct tb_margining *margining = s->private;
1474 	struct tb *tb = margining->port->sw->tb;
1475 	int ret = 0;
1476 	char *buf;
1477 
1478 	buf = validate_and_copy_from_user(user_buf, &count);
1479 	if (IS_ERR(buf))
1480 		return PTR_ERR(buf);
1481 
1482 	buf[count - 1] = '\0';
1483 
1484 	if (mutex_lock_interruptible(&tb->lock)) {
1485 		ret = -ERESTARTSYS;
1486 		goto out_free;
1487 	}
1488 
1489 	if (!strcmp(buf, "time") && supports_time(margining))
1490 		margining->time = true;
1491 	else if (!strcmp(buf, "voltage"))
1492 		margining->time = false;
1493 	else
1494 		ret = -EINVAL;
1495 
1496 	mutex_unlock(&tb->lock);
1497 
1498 out_free:
1499 	free_page((unsigned long)buf);
1500 	return ret ? ret : count;
1501 }
1502 
margining_test_show(struct seq_file * s,void * not_used)1503 static int margining_test_show(struct seq_file *s, void *not_used)
1504 {
1505 	struct tb_margining *margining = s->private;
1506 	struct tb *tb = margining->port->sw->tb;
1507 
1508 	if (mutex_lock_interruptible(&tb->lock))
1509 		return -ERESTARTSYS;
1510 
1511 	if (supports_time(margining)) {
1512 		if (margining->time)
1513 			seq_puts(s, "voltage [time]\n");
1514 		else
1515 			seq_puts(s, "[voltage] time\n");
1516 	} else {
1517 		seq_puts(s, "[voltage]\n");
1518 	}
1519 
1520 	mutex_unlock(&tb->lock);
1521 	return 0;
1522 }
1523 DEBUGFS_ATTR_RW(margining_test);
1524 
margining_margin_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1525 static ssize_t margining_margin_write(struct file *file,
1526 				    const char __user *user_buf,
1527 				    size_t count, loff_t *ppos)
1528 {
1529 	struct seq_file *s = file->private_data;
1530 	struct tb_margining *margining = s->private;
1531 	struct tb *tb = margining->port->sw->tb;
1532 	int ret = 0;
1533 	char *buf;
1534 
1535 	buf = validate_and_copy_from_user(user_buf, &count);
1536 	if (IS_ERR(buf))
1537 		return PTR_ERR(buf);
1538 
1539 	buf[count - 1] = '\0';
1540 
1541 	if (mutex_lock_interruptible(&tb->lock)) {
1542 		ret = -ERESTARTSYS;
1543 		goto out_free;
1544 	}
1545 
1546 	if (margining->time) {
1547 		if (!strcmp(buf, "left"))
1548 			margining->right_high = false;
1549 		else if (!strcmp(buf, "right"))
1550 			margining->right_high = true;
1551 		else
1552 			ret = -EINVAL;
1553 	} else {
1554 		if (!strcmp(buf, "low"))
1555 			margining->right_high = false;
1556 		else if (!strcmp(buf, "high"))
1557 			margining->right_high = true;
1558 		else
1559 			ret = -EINVAL;
1560 	}
1561 
1562 	mutex_unlock(&tb->lock);
1563 
1564 out_free:
1565 	free_page((unsigned long)buf);
1566 	return ret ? ret : count;
1567 }
1568 
margining_margin_show(struct seq_file * s,void * not_used)1569 static int margining_margin_show(struct seq_file *s, void *not_used)
1570 {
1571 	struct tb_margining *margining = s->private;
1572 	struct tb *tb = margining->port->sw->tb;
1573 
1574 	if (mutex_lock_interruptible(&tb->lock))
1575 		return -ERESTARTSYS;
1576 
1577 	if (margining->time) {
1578 		if (margining->right_high)
1579 			seq_puts(s, "left [right]\n");
1580 		else
1581 			seq_puts(s, "[left] right\n");
1582 	} else {
1583 		if (margining->right_high)
1584 			seq_puts(s, "low [high]\n");
1585 		else
1586 			seq_puts(s, "[low] high\n");
1587 	}
1588 
1589 	mutex_unlock(&tb->lock);
1590 	return 0;
1591 }
1592 DEBUGFS_ATTR_RW(margining_margin);
1593 
margining_eye_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1594 static ssize_t margining_eye_write(struct file *file,
1595 				   const char __user *user_buf,
1596 				   size_t count, loff_t *ppos)
1597 {
1598 	struct seq_file *s = file->private_data;
1599 	struct tb_port *port = s->private;
1600 	struct usb4_port *usb4 = port->usb4;
1601 	struct tb *tb = port->sw->tb;
1602 	int ret = 0;
1603 	char *buf;
1604 
1605 	buf = validate_and_copy_from_user(user_buf, &count);
1606 	if (IS_ERR(buf))
1607 		return PTR_ERR(buf);
1608 
1609 	buf[count - 1] = '\0';
1610 
1611 	scoped_cond_guard(mutex_intr, ret = -ERESTARTSYS, &tb->lock) {
1612 		if (!strcmp(buf, "lower"))
1613 			usb4->margining->upper_eye = false;
1614 		else if (!strcmp(buf, "upper"))
1615 			usb4->margining->upper_eye = true;
1616 		else
1617 			ret = -EINVAL;
1618 	}
1619 
1620 	free_page((unsigned long)buf);
1621 	return ret ? ret : count;
1622 }
1623 
margining_eye_show(struct seq_file * s,void * not_used)1624 static int margining_eye_show(struct seq_file *s, void *not_used)
1625 {
1626 	struct tb_port *port = s->private;
1627 	struct usb4_port *usb4 = port->usb4;
1628 	struct tb *tb = port->sw->tb;
1629 
1630 	scoped_guard(mutex_intr, &tb->lock) {
1631 		if (usb4->margining->upper_eye)
1632 			seq_puts(s, "lower [upper]\n");
1633 		else
1634 			seq_puts(s, "[lower] upper\n");
1635 
1636 		return 0;
1637 	}
1638 
1639 	return -ERESTARTSYS;
1640 }
1641 DEBUGFS_ATTR_RW(margining_eye);
1642 
margining_alloc(struct tb_port * port,struct device * dev,enum usb4_sb_target target,u8 index,struct dentry * parent)1643 static struct tb_margining *margining_alloc(struct tb_port *port,
1644 					    struct device *dev,
1645 					    enum usb4_sb_target target,
1646 					    u8 index, struct dentry *parent)
1647 {
1648 	struct tb_margining *margining;
1649 	struct dentry *dir;
1650 	unsigned int val;
1651 	int ret;
1652 
1653 	ret = tb_port_get_link_generation(port);
1654 	if (ret < 0) {
1655 		tb_port_warn(port, "failed to read link generation\n");
1656 		return NULL;
1657 	}
1658 
1659 	margining = kzalloc(sizeof(*margining), GFP_KERNEL);
1660 	if (!margining)
1661 		return NULL;
1662 
1663 	margining->port = port;
1664 	margining->target = target;
1665 	margining->index = index;
1666 	margining->dev = dev;
1667 	margining->gen = ret;
1668 	margining->asym_rx = tb_port_width_supported(port, TB_LINK_WIDTH_ASYM_RX);
1669 
1670 	ret = usb4_port_margining_caps(port, target, index, margining->caps,
1671 				       ARRAY_SIZE(margining->caps));
1672 	if (ret) {
1673 		kfree(margining);
1674 		return NULL;
1675 	}
1676 
1677 	/* Set the initial mode */
1678 	if (supports_software(margining))
1679 		margining->software = true;
1680 
1681 	if (margining->gen < 4) {
1682 		val = FIELD_GET(USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK, margining->caps[0]);
1683 		margining->voltage_steps = val;
1684 		val = FIELD_GET(USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK, margining->caps[0]);
1685 		margining->max_voltage_offset = 74 + val * 2;
1686 	} else {
1687 		val = FIELD_GET(USB4_MARGIN_CAP_2_VOLTAGE_STEPS_MASK, margining->caps[2]);
1688 		margining->voltage_steps = val;
1689 		val = FIELD_GET(USB4_MARGIN_CAP_2_MAX_VOLTAGE_OFFSET_MASK, margining->caps[2]);
1690 		margining->max_voltage_offset = 74 + val * 2;
1691 	}
1692 
1693 	if (supports_optional_voltage_offset_range(margining)) {
1694 		val = FIELD_GET(USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK,
1695 				margining->caps[0]);
1696 		margining->voltage_steps_optional_range = val;
1697 		val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK,
1698 				margining->caps[1]);
1699 		margining->max_voltage_offset_optional_range = 74 + val * 2;
1700 	}
1701 
1702 	if (supports_time(margining)) {
1703 		val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_STEPS_MASK, margining->caps[1]);
1704 		margining->time_steps = val;
1705 		val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_OFFSET_MASK, margining->caps[1]);
1706 		/*
1707 		 * Store it as mUI (milli Unit Interval) because we want
1708 		 * to keep it as integer.
1709 		 */
1710 		margining->max_time_offset = 200 + 10 * val;
1711 	}
1712 
1713 	dir = debugfs_create_dir("margining", parent);
1714 	if (supports_hardware(margining)) {
1715 		val = FIELD_GET(USB4_MARGIN_CAP_1_MIN_BER_MASK, margining->caps[1]);
1716 		margining->min_ber_level = val;
1717 		val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_BER_MASK, margining->caps[1]);
1718 		margining->max_ber_level = val;
1719 
1720 		/* Set the default to minimum */
1721 		margining->ber_level = margining->min_ber_level;
1722 
1723 		debugfs_create_file("ber_level_contour", 0400, dir, margining,
1724 				    &margining_ber_level_fops);
1725 	}
1726 	debugfs_create_file("caps", 0400, dir, margining, &margining_caps_fops);
1727 	debugfs_create_file("lanes", 0600, dir, margining, &margining_lanes_fops);
1728 	debugfs_create_file("mode", 0600, dir, margining, &margining_mode_fops);
1729 	debugfs_create_file("run", 0600, dir, margining, &margining_run_fops);
1730 	debugfs_create_file("results", 0600, dir, margining,
1731 			    &margining_results_fops);
1732 	debugfs_create_file("test", 0600, dir, margining, &margining_test_fops);
1733 	if (independent_voltage_margins(margining) == USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL ||
1734 	    (supports_time(margining) &&
1735 	     independent_time_margins(margining) == USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR))
1736 		debugfs_create_file("margin", 0600, dir, margining, &margining_margin_fops);
1737 
1738 	margining->error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
1739 	margining->dwell_time = MIN_DWELL_TIME;
1740 
1741 	if (supports_optional_voltage_offset_range(margining))
1742 		debugfs_create_file("optional_voltage_offset", DEBUGFS_MODE, dir, margining,
1743 				    &margining_optional_voltage_offset_fops);
1744 
1745 	if (supports_software(margining)) {
1746 		debugfs_create_file("voltage_time_offset", DEBUGFS_MODE, dir, margining,
1747 				    &margining_voltage_time_offset_fops);
1748 		debugfs_create_file("error_counter", DEBUGFS_MODE, dir, margining,
1749 				    &margining_error_counter_fops);
1750 		debugfs_create_file("dwell_time", DEBUGFS_MODE, dir, margining,
1751 				    &margining_dwell_time_fops);
1752 	}
1753 
1754 	if (margining->gen >= 4)
1755 		debugfs_create_file("eye", 0600, dir, port, &margining_eye_fops);
1756 
1757 	return margining;
1758 }
1759 
margining_port_init(struct tb_port * port)1760 static void margining_port_init(struct tb_port *port)
1761 {
1762 	struct dentry *parent;
1763 	char dir_name[10];
1764 
1765 	if (!port->usb4)
1766 		return;
1767 
1768 	snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
1769 	parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
1770 	port->usb4->margining = margining_alloc(port, &port->usb4->dev,
1771 						USB4_SB_TARGET_ROUTER, 0,
1772 						parent);
1773 }
1774 
margining_port_remove(struct tb_port * port)1775 static void margining_port_remove(struct tb_port *port)
1776 {
1777 	struct dentry *parent;
1778 	char dir_name[10];
1779 
1780 	if (!port->usb4)
1781 		return;
1782 
1783 	snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
1784 	parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
1785 	if (parent)
1786 		debugfs_lookup_and_remove("margining", parent);
1787 
1788 	kfree(port->usb4->margining);
1789 	port->usb4->margining = NULL;
1790 }
1791 
margining_switch_init(struct tb_switch * sw)1792 static void margining_switch_init(struct tb_switch *sw)
1793 {
1794 	struct tb_port *upstream, *downstream;
1795 	struct tb_switch *parent_sw;
1796 	u64 route = tb_route(sw);
1797 
1798 	if (!route)
1799 		return;
1800 
1801 	upstream = tb_upstream_port(sw);
1802 	parent_sw = tb_switch_parent(sw);
1803 	downstream = tb_port_at(route, parent_sw);
1804 
1805 	margining_port_init(downstream);
1806 	margining_port_init(upstream);
1807 }
1808 
margining_switch_remove(struct tb_switch * sw)1809 static void margining_switch_remove(struct tb_switch *sw)
1810 {
1811 	struct tb_port *upstream, *downstream;
1812 	struct tb_switch *parent_sw;
1813 	u64 route = tb_route(sw);
1814 
1815 	if (!route)
1816 		return;
1817 
1818 	upstream = tb_upstream_port(sw);
1819 	parent_sw = tb_switch_parent(sw);
1820 	downstream = tb_port_at(route, parent_sw);
1821 
1822 	margining_port_remove(upstream);
1823 	margining_port_remove(downstream);
1824 }
1825 
margining_xdomain_init(struct tb_xdomain * xd)1826 static void margining_xdomain_init(struct tb_xdomain *xd)
1827 {
1828 	struct tb_switch *parent_sw;
1829 	struct tb_port *downstream;
1830 
1831 	parent_sw = tb_xdomain_parent(xd);
1832 	downstream = tb_port_at(xd->route, parent_sw);
1833 
1834 	margining_port_init(downstream);
1835 }
1836 
margining_xdomain_remove(struct tb_xdomain * xd)1837 static void margining_xdomain_remove(struct tb_xdomain *xd)
1838 {
1839 	struct tb_switch *parent_sw;
1840 	struct tb_port *downstream;
1841 
1842 	parent_sw = tb_xdomain_parent(xd);
1843 	downstream = tb_port_at(xd->route, parent_sw);
1844 	margining_port_remove(downstream);
1845 }
1846 
margining_retimer_init(struct tb_retimer * rt,struct dentry * debugfs_dir)1847 static void margining_retimer_init(struct tb_retimer *rt, struct dentry *debugfs_dir)
1848 {
1849 	rt->margining = margining_alloc(rt->port, &rt->dev,
1850 					USB4_SB_TARGET_RETIMER, rt->index,
1851 					debugfs_dir);
1852 }
1853 
margining_retimer_remove(struct tb_retimer * rt)1854 static void margining_retimer_remove(struct tb_retimer *rt)
1855 {
1856 	kfree(rt->margining);
1857 	rt->margining = NULL;
1858 }
1859 #else
margining_switch_init(struct tb_switch * sw)1860 static inline void margining_switch_init(struct tb_switch *sw) { }
margining_switch_remove(struct tb_switch * sw)1861 static inline void margining_switch_remove(struct tb_switch *sw) { }
margining_xdomain_init(struct tb_xdomain * xd)1862 static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
margining_xdomain_remove(struct tb_xdomain * xd)1863 static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
margining_retimer_init(struct tb_retimer * rt,struct dentry * debugfs_dir)1864 static inline void margining_retimer_init(struct tb_retimer *rt,
1865 					  struct dentry *debugfs_dir) { }
margining_retimer_remove(struct tb_retimer * rt)1866 static inline void margining_retimer_remove(struct tb_retimer *rt) { }
1867 #endif
1868 
port_clear_all_counters(struct tb_port * port)1869 static int port_clear_all_counters(struct tb_port *port)
1870 {
1871 	u32 *buf;
1872 	int ret;
1873 
1874 	buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
1875 		      GFP_KERNEL);
1876 	if (!buf)
1877 		return -ENOMEM;
1878 
1879 	ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
1880 			    COUNTER_SET_LEN * port->config.max_counters);
1881 	kfree(buf);
1882 
1883 	return ret;
1884 }
1885 
counters_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1886 static ssize_t counters_write(struct file *file, const char __user *user_buf,
1887 			      size_t count, loff_t *ppos)
1888 {
1889 	struct seq_file *s = file->private_data;
1890 	struct tb_port *port = s->private;
1891 	struct tb_switch *sw = port->sw;
1892 	struct tb *tb = port->sw->tb;
1893 	char *buf;
1894 	int ret;
1895 
1896 	buf = validate_and_copy_from_user(user_buf, &count);
1897 	if (IS_ERR(buf))
1898 		return PTR_ERR(buf);
1899 
1900 	pm_runtime_get_sync(&sw->dev);
1901 
1902 	if (mutex_lock_interruptible(&tb->lock)) {
1903 		ret = -ERESTARTSYS;
1904 		goto out;
1905 	}
1906 
1907 	/* If written delimiter only, clear all counters in one shot */
1908 	if (buf[0] == '\n') {
1909 		ret = port_clear_all_counters(port);
1910 	} else  {
1911 		char *line = buf;
1912 		u32 val, offset;
1913 
1914 		ret = -EINVAL;
1915 		while (parse_line(&line, &offset, &val, 1, 4)) {
1916 			ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
1917 					    offset, 1);
1918 			if (ret)
1919 				break;
1920 		}
1921 	}
1922 
1923 	mutex_unlock(&tb->lock);
1924 
1925 out:
1926 	pm_runtime_mark_last_busy(&sw->dev);
1927 	pm_runtime_put_autosuspend(&sw->dev);
1928 	free_page((unsigned long)buf);
1929 
1930 	return ret < 0 ? ret : count;
1931 }
1932 
cap_show_by_dw(struct seq_file * s,struct tb_switch * sw,struct tb_port * port,unsigned int cap,unsigned int offset,u8 cap_id,u8 vsec_id,int dwords)1933 static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw,
1934 			   struct tb_port *port, unsigned int cap,
1935 			   unsigned int offset, u8 cap_id, u8 vsec_id,
1936 			   int dwords)
1937 {
1938 	int i, ret;
1939 	u32 data;
1940 
1941 	for (i = 0; i < dwords; i++) {
1942 		if (port)
1943 			ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1);
1944 		else
1945 			ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1);
1946 		if (ret) {
1947 			seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i);
1948 			continue;
1949 		}
1950 
1951 		seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i,
1952 			   offset + i, cap_id, vsec_id, data);
1953 	}
1954 }
1955 
cap_show(struct seq_file * s,struct tb_switch * sw,struct tb_port * port,unsigned int cap,u8 cap_id,u8 vsec_id,int length)1956 static void cap_show(struct seq_file *s, struct tb_switch *sw,
1957 		     struct tb_port *port, unsigned int cap, u8 cap_id,
1958 		     u8 vsec_id, int length)
1959 {
1960 	int ret, offset = 0;
1961 
1962 	while (length > 0) {
1963 		int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
1964 		u32 data[TB_MAX_CONFIG_RW_LENGTH];
1965 
1966 		if (port)
1967 			ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
1968 					   dwords);
1969 		else
1970 			ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
1971 		if (ret) {
1972 			cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length);
1973 			return;
1974 		}
1975 
1976 		for (i = 0; i < dwords; i++) {
1977 			seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
1978 				   cap + offset + i, offset + i,
1979 				   cap_id, vsec_id, data[i]);
1980 		}
1981 
1982 		length -= dwords;
1983 		offset += dwords;
1984 	}
1985 }
1986 
port_cap_show(struct tb_port * port,struct seq_file * s,unsigned int cap)1987 static void port_cap_show(struct tb_port *port, struct seq_file *s,
1988 			  unsigned int cap)
1989 {
1990 	struct tb_cap_any header;
1991 	u8 vsec_id = 0;
1992 	size_t length;
1993 	int ret;
1994 
1995 	ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
1996 	if (ret) {
1997 		seq_printf(s, "0x%04x <capability read failed>\n", cap);
1998 		return;
1999 	}
2000 
2001 	switch (header.basic.cap) {
2002 	case TB_PORT_CAP_PHY:
2003 		length = PORT_CAP_LANE_LEN;
2004 		break;
2005 
2006 	case TB_PORT_CAP_TIME1:
2007 		if (usb4_switch_version(port->sw) < 2)
2008 			length = PORT_CAP_TMU_V1_LEN;
2009 		else
2010 			length = PORT_CAP_TMU_V2_LEN;
2011 		break;
2012 
2013 	case TB_PORT_CAP_POWER:
2014 		length = PORT_CAP_POWER_LEN;
2015 		break;
2016 
2017 	case TB_PORT_CAP_ADAP:
2018 		if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
2019 			if (usb4_switch_version(port->sw) < 2)
2020 				length = PORT_CAP_V1_PCIE_LEN;
2021 			else
2022 				length = PORT_CAP_V2_PCIE_LEN;
2023 		} else if (tb_port_is_dpin(port)) {
2024 			if (usb4_switch_version(port->sw) < 2)
2025 				length = PORT_CAP_DP_V1_LEN;
2026 			else
2027 				length = PORT_CAP_DP_V2_LEN;
2028 		} else if (tb_port_is_dpout(port)) {
2029 			length = PORT_CAP_DP_V1_LEN;
2030 		} else if (tb_port_is_usb3_down(port) ||
2031 			   tb_port_is_usb3_up(port)) {
2032 			length = PORT_CAP_USB3_LEN;
2033 		} else {
2034 			seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
2035 				   cap, header.basic.cap);
2036 			return;
2037 		}
2038 		break;
2039 
2040 	case TB_PORT_CAP_VSE:
2041 		if (!header.extended_short.length) {
2042 			ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
2043 					   cap + 1, 1);
2044 			if (ret) {
2045 				seq_printf(s, "0x%04x <capability read failed>\n",
2046 					   cap + 1);
2047 				return;
2048 			}
2049 			length = header.extended_long.length;
2050 			vsec_id = header.extended_short.vsec_id;
2051 		} else {
2052 			length = header.extended_short.length;
2053 			vsec_id = header.extended_short.vsec_id;
2054 		}
2055 		break;
2056 
2057 	case TB_PORT_CAP_USB4:
2058 		length = PORT_CAP_USB4_LEN;
2059 		break;
2060 
2061 	default:
2062 		seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
2063 			   cap, header.basic.cap);
2064 		return;
2065 	}
2066 
2067 	cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
2068 }
2069 
port_caps_show(struct tb_port * port,struct seq_file * s)2070 static void port_caps_show(struct tb_port *port, struct seq_file *s)
2071 {
2072 	int cap;
2073 
2074 	cap = tb_port_next_cap(port, 0);
2075 	while (cap > 0) {
2076 		port_cap_show(port, s, cap);
2077 		cap = tb_port_next_cap(port, cap);
2078 	}
2079 }
2080 
port_basic_regs_show(struct tb_port * port,struct seq_file * s)2081 static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
2082 {
2083 	u32 data[PORT_CAP_BASIC_LEN];
2084 	int ret, i;
2085 
2086 	ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
2087 	if (ret)
2088 		return ret;
2089 
2090 	for (i = 0; i < ARRAY_SIZE(data); i++)
2091 		seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
2092 
2093 	return 0;
2094 }
2095 
port_regs_show(struct seq_file * s,void * not_used)2096 static int port_regs_show(struct seq_file *s, void *not_used)
2097 {
2098 	struct tb_port *port = s->private;
2099 	struct tb_switch *sw = port->sw;
2100 	struct tb *tb = sw->tb;
2101 	int ret;
2102 
2103 	pm_runtime_get_sync(&sw->dev);
2104 
2105 	if (mutex_lock_interruptible(&tb->lock)) {
2106 		ret = -ERESTARTSYS;
2107 		goto out_rpm_put;
2108 	}
2109 
2110 	seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
2111 
2112 	ret = port_basic_regs_show(port, s);
2113 	if (ret)
2114 		goto out_unlock;
2115 
2116 	port_caps_show(port, s);
2117 
2118 out_unlock:
2119 	mutex_unlock(&tb->lock);
2120 out_rpm_put:
2121 	pm_runtime_mark_last_busy(&sw->dev);
2122 	pm_runtime_put_autosuspend(&sw->dev);
2123 
2124 	return ret;
2125 }
2126 DEBUGFS_ATTR_RW(port_regs);
2127 
switch_cap_show(struct tb_switch * sw,struct seq_file * s,unsigned int cap)2128 static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
2129 			    unsigned int cap)
2130 {
2131 	struct tb_cap_any header;
2132 	int ret, length;
2133 	u8 vsec_id = 0;
2134 
2135 	ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
2136 	if (ret) {
2137 		seq_printf(s, "0x%04x <capability read failed>\n", cap);
2138 		return;
2139 	}
2140 
2141 	if (header.basic.cap == TB_SWITCH_CAP_VSE) {
2142 		if (!header.extended_short.length) {
2143 			ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
2144 					 cap + 1, 1);
2145 			if (ret) {
2146 				seq_printf(s, "0x%04x <capability read failed>\n",
2147 					   cap + 1);
2148 				return;
2149 			}
2150 			length = header.extended_long.length;
2151 		} else {
2152 			length = header.extended_short.length;
2153 		}
2154 		vsec_id = header.extended_short.vsec_id;
2155 	} else {
2156 		if (header.basic.cap == TB_SWITCH_CAP_TMU) {
2157 			length = SWITCH_CAP_TMU_LEN;
2158 		} else  {
2159 			seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
2160 				   cap, header.basic.cap);
2161 			return;
2162 		}
2163 	}
2164 
2165 	cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
2166 }
2167 
switch_caps_show(struct tb_switch * sw,struct seq_file * s)2168 static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
2169 {
2170 	int cap;
2171 
2172 	cap = tb_switch_next_cap(sw, 0);
2173 	while (cap > 0) {
2174 		switch_cap_show(sw, s, cap);
2175 		cap = tb_switch_next_cap(sw, cap);
2176 	}
2177 }
2178 
switch_basic_regs_show(struct tb_switch * sw,struct seq_file * s)2179 static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
2180 {
2181 	u32 data[SWITCH_CAP_BASIC_LEN];
2182 	size_t dwords;
2183 	int ret, i;
2184 
2185 	/* Only USB4 has the additional registers */
2186 	if (tb_switch_is_usb4(sw))
2187 		dwords = ARRAY_SIZE(data);
2188 	else
2189 		dwords = 5;
2190 
2191 	ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
2192 	if (ret)
2193 		return ret;
2194 
2195 	for (i = 0; i < dwords; i++)
2196 		seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
2197 
2198 	return 0;
2199 }
2200 
switch_regs_show(struct seq_file * s,void * not_used)2201 static int switch_regs_show(struct seq_file *s, void *not_used)
2202 {
2203 	struct tb_switch *sw = s->private;
2204 	struct tb *tb = sw->tb;
2205 	int ret;
2206 
2207 	pm_runtime_get_sync(&sw->dev);
2208 
2209 	if (mutex_lock_interruptible(&tb->lock)) {
2210 		ret = -ERESTARTSYS;
2211 		goto out_rpm_put;
2212 	}
2213 
2214 	seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
2215 
2216 	ret = switch_basic_regs_show(sw, s);
2217 	if (ret)
2218 		goto out_unlock;
2219 
2220 	switch_caps_show(sw, s);
2221 
2222 out_unlock:
2223 	mutex_unlock(&tb->lock);
2224 out_rpm_put:
2225 	pm_runtime_mark_last_busy(&sw->dev);
2226 	pm_runtime_put_autosuspend(&sw->dev);
2227 
2228 	return ret;
2229 }
2230 DEBUGFS_ATTR_RW(switch_regs);
2231 
path_show_one(struct tb_port * port,struct seq_file * s,int hopid)2232 static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
2233 {
2234 	u32 data[PATH_LEN];
2235 	int ret, i;
2236 
2237 	ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
2238 			   ARRAY_SIZE(data));
2239 	if (ret) {
2240 		seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
2241 		return ret;
2242 	}
2243 
2244 	for (i = 0; i < ARRAY_SIZE(data); i++) {
2245 		seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
2246 			   hopid * PATH_LEN + i, i, hopid, data[i]);
2247 	}
2248 
2249 	return 0;
2250 }
2251 
path_show(struct seq_file * s,void * not_used)2252 static int path_show(struct seq_file *s, void *not_used)
2253 {
2254 	struct tb_port *port = s->private;
2255 	struct tb_switch *sw = port->sw;
2256 	struct tb *tb = sw->tb;
2257 	int start, i, ret = 0;
2258 
2259 	pm_runtime_get_sync(&sw->dev);
2260 
2261 	if (mutex_lock_interruptible(&tb->lock)) {
2262 		ret = -ERESTARTSYS;
2263 		goto out_rpm_put;
2264 	}
2265 
2266 	seq_puts(s, "# offset relative_offset in_hop_id value\n");
2267 
2268 	/* NHI and lane adapters have entry for path 0 */
2269 	if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
2270 		ret = path_show_one(port, s, 0);
2271 		if (ret)
2272 			goto out_unlock;
2273 	}
2274 
2275 	start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
2276 
2277 	for (i = start; i <= port->config.max_in_hop_id; i++) {
2278 		ret = path_show_one(port, s, i);
2279 		if (ret)
2280 			break;
2281 	}
2282 
2283 out_unlock:
2284 	mutex_unlock(&tb->lock);
2285 out_rpm_put:
2286 	pm_runtime_mark_last_busy(&sw->dev);
2287 	pm_runtime_put_autosuspend(&sw->dev);
2288 
2289 	return ret;
2290 }
2291 DEBUGFS_ATTR_RW(path);
2292 
counter_set_regs_show(struct tb_port * port,struct seq_file * s,int counter)2293 static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
2294 				 int counter)
2295 {
2296 	u32 data[COUNTER_SET_LEN];
2297 	int ret, i;
2298 
2299 	ret = tb_port_read(port, data, TB_CFG_COUNTERS,
2300 			   counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
2301 	if (ret) {
2302 		seq_printf(s, "0x%04x <not accessible>\n",
2303 			   counter * COUNTER_SET_LEN);
2304 		return ret;
2305 	}
2306 
2307 	for (i = 0; i < ARRAY_SIZE(data); i++) {
2308 		seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
2309 			   counter * COUNTER_SET_LEN + i, i, counter, data[i]);
2310 	}
2311 
2312 	return 0;
2313 }
2314 
counters_show(struct seq_file * s,void * not_used)2315 static int counters_show(struct seq_file *s, void *not_used)
2316 {
2317 	struct tb_port *port = s->private;
2318 	struct tb_switch *sw = port->sw;
2319 	struct tb *tb = sw->tb;
2320 	int i, ret = 0;
2321 
2322 	pm_runtime_get_sync(&sw->dev);
2323 
2324 	if (mutex_lock_interruptible(&tb->lock)) {
2325 		ret = -ERESTARTSYS;
2326 		goto out;
2327 	}
2328 
2329 	seq_puts(s, "# offset relative_offset counter_id value\n");
2330 
2331 	for (i = 0; i < port->config.max_counters; i++) {
2332 		ret = counter_set_regs_show(port, s, i);
2333 		if (ret)
2334 			break;
2335 	}
2336 
2337 	mutex_unlock(&tb->lock);
2338 
2339 out:
2340 	pm_runtime_mark_last_busy(&sw->dev);
2341 	pm_runtime_put_autosuspend(&sw->dev);
2342 
2343 	return ret;
2344 }
2345 DEBUGFS_ATTR_RW(counters);
2346 
sb_regs_show(struct tb_port * port,const struct sb_reg * sb_regs,size_t size,enum usb4_sb_target target,u8 index,struct seq_file * s)2347 static int sb_regs_show(struct tb_port *port, const struct sb_reg *sb_regs,
2348 			size_t size, enum usb4_sb_target target, u8 index,
2349 			struct seq_file *s)
2350 {
2351 	int ret, i;
2352 
2353 	seq_puts(s, "# register value\n");
2354 
2355 	for (i = 0; i < size; i++) {
2356 		const struct sb_reg *regs = &sb_regs[i];
2357 		u8 data[64];
2358 		int j;
2359 
2360 		memset(data, 0, sizeof(data));
2361 		ret = usb4_port_sb_read(port, target, index, regs->reg, data,
2362 					regs->size);
2363 		if (ret)
2364 			return ret;
2365 
2366 		seq_printf(s, "0x%02x", regs->reg);
2367 		for (j = 0; j < regs->size; j++)
2368 			seq_printf(s, " 0x%02x", data[j]);
2369 		seq_puts(s, "\n");
2370 	}
2371 
2372 	return 0;
2373 }
2374 
port_sb_regs_show(struct seq_file * s,void * not_used)2375 static int port_sb_regs_show(struct seq_file *s, void *not_used)
2376 {
2377 	struct tb_port *port = s->private;
2378 	struct tb_switch *sw = port->sw;
2379 	struct tb *tb = sw->tb;
2380 	int ret;
2381 
2382 	pm_runtime_get_sync(&sw->dev);
2383 
2384 	if (mutex_lock_interruptible(&tb->lock)) {
2385 		ret = -ERESTARTSYS;
2386 		goto out_rpm_put;
2387 	}
2388 
2389 	ret = sb_regs_show(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
2390 			   USB4_SB_TARGET_ROUTER, 0, s);
2391 
2392 	mutex_unlock(&tb->lock);
2393 out_rpm_put:
2394 	pm_runtime_mark_last_busy(&sw->dev);
2395 	pm_runtime_put_autosuspend(&sw->dev);
2396 
2397 	return ret;
2398 }
2399 DEBUGFS_ATTR_RW(port_sb_regs);
2400 
2401 /**
2402  * tb_switch_debugfs_init() - Add debugfs entries for router
2403  * @sw: Pointer to the router
2404  *
2405  * Adds debugfs directories and files for given router.
2406  */
tb_switch_debugfs_init(struct tb_switch * sw)2407 void tb_switch_debugfs_init(struct tb_switch *sw)
2408 {
2409 	struct dentry *debugfs_dir;
2410 	struct tb_port *port;
2411 
2412 	debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
2413 	sw->debugfs_dir = debugfs_dir;
2414 	debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
2415 			    &switch_regs_fops);
2416 	if (sw->drom)
2417 		debugfs_create_blob("drom", 0400, debugfs_dir, &sw->drom_blob);
2418 
2419 	tb_switch_for_each_port(sw, port) {
2420 		struct dentry *debugfs_dir;
2421 		char dir_name[10];
2422 
2423 		if (port->disabled)
2424 			continue;
2425 		if (port->config.type == TB_TYPE_INACTIVE)
2426 			continue;
2427 
2428 		snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
2429 		debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
2430 		debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
2431 				    port, &port_regs_fops);
2432 		debugfs_create_file("path", 0400, debugfs_dir, port,
2433 				    &path_fops);
2434 		if (port->config.counters_support)
2435 			debugfs_create_file("counters", 0600, debugfs_dir, port,
2436 					    &counters_fops);
2437 		if (port->usb4)
2438 			debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir,
2439 					    port, &port_sb_regs_fops);
2440 	}
2441 
2442 	margining_switch_init(sw);
2443 }
2444 
2445 /**
2446  * tb_switch_debugfs_remove() - Remove all router debugfs entries
2447  * @sw: Pointer to the router
2448  *
2449  * Removes all previously added debugfs entries under this router.
2450  */
tb_switch_debugfs_remove(struct tb_switch * sw)2451 void tb_switch_debugfs_remove(struct tb_switch *sw)
2452 {
2453 	margining_switch_remove(sw);
2454 	debugfs_remove_recursive(sw->debugfs_dir);
2455 }
2456 
tb_xdomain_debugfs_init(struct tb_xdomain * xd)2457 void tb_xdomain_debugfs_init(struct tb_xdomain *xd)
2458 {
2459 	margining_xdomain_init(xd);
2460 }
2461 
tb_xdomain_debugfs_remove(struct tb_xdomain * xd)2462 void tb_xdomain_debugfs_remove(struct tb_xdomain *xd)
2463 {
2464 	margining_xdomain_remove(xd);
2465 }
2466 
2467 /**
2468  * tb_service_debugfs_init() - Add debugfs directory for service
2469  * @svc: Thunderbolt service pointer
2470  *
2471  * Adds debugfs directory for service.
2472  */
tb_service_debugfs_init(struct tb_service * svc)2473 void tb_service_debugfs_init(struct tb_service *svc)
2474 {
2475 	svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev),
2476 					      tb_debugfs_root);
2477 }
2478 
2479 /**
2480  * tb_service_debugfs_remove() - Remove service debugfs directory
2481  * @svc: Thunderbolt service pointer
2482  *
2483  * Removes the previously created debugfs directory for @svc.
2484  */
tb_service_debugfs_remove(struct tb_service * svc)2485 void tb_service_debugfs_remove(struct tb_service *svc)
2486 {
2487 	debugfs_remove_recursive(svc->debugfs_dir);
2488 	svc->debugfs_dir = NULL;
2489 }
2490 
retimer_sb_regs_show(struct seq_file * s,void * not_used)2491 static int retimer_sb_regs_show(struct seq_file *s, void *not_used)
2492 {
2493 	struct tb_retimer *rt = s->private;
2494 	struct tb *tb = rt->tb;
2495 	int ret;
2496 
2497 	pm_runtime_get_sync(&rt->dev);
2498 
2499 	if (mutex_lock_interruptible(&tb->lock)) {
2500 		ret = -ERESTARTSYS;
2501 		goto out_rpm_put;
2502 	}
2503 
2504 	ret = sb_regs_show(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
2505 			   USB4_SB_TARGET_RETIMER, rt->index, s);
2506 
2507 	mutex_unlock(&tb->lock);
2508 out_rpm_put:
2509 	pm_runtime_mark_last_busy(&rt->dev);
2510 	pm_runtime_put_autosuspend(&rt->dev);
2511 
2512 	return ret;
2513 }
2514 DEBUGFS_ATTR_RW(retimer_sb_regs);
2515 
2516 /**
2517  * tb_retimer_debugfs_init() - Add debugfs directory for retimer
2518  * @rt: Pointer to retimer structure
2519  *
2520  * Adds and populates retimer debugfs directory.
2521  */
tb_retimer_debugfs_init(struct tb_retimer * rt)2522 void tb_retimer_debugfs_init(struct tb_retimer *rt)
2523 {
2524 	struct dentry *debugfs_dir;
2525 
2526 	debugfs_dir = debugfs_create_dir(dev_name(&rt->dev), tb_debugfs_root);
2527 	debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir, rt,
2528 			    &retimer_sb_regs_fops);
2529 	margining_retimer_init(rt, debugfs_dir);
2530 }
2531 
2532 /**
2533  * tb_retimer_debugfs_remove() - Remove retimer debugfs directory
2534  * @rt: Pointer to retimer structure
2535  *
2536  * Removes the retimer debugfs directory along with its contents.
2537  */
tb_retimer_debugfs_remove(struct tb_retimer * rt)2538 void tb_retimer_debugfs_remove(struct tb_retimer *rt)
2539 {
2540 	debugfs_lookup_and_remove(dev_name(&rt->dev), tb_debugfs_root);
2541 	margining_retimer_remove(rt);
2542 }
2543 
tb_debugfs_init(void)2544 void tb_debugfs_init(void)
2545 {
2546 	tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
2547 }
2548 
tb_debugfs_exit(void)2549 void tb_debugfs_exit(void)
2550 {
2551 	debugfs_remove_recursive(tb_debugfs_root);
2552 }
2553