1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/log2.h>
36 #include <linux/ptp_clock_kernel.h>
37 #include <rdma/mlx5-abi.h>
38 #include "lib/eq.h"
39 #include "en.h"
40 #include "clock.h"
41 #ifdef CONFIG_X86
42 #include <linux/timekeeping.h>
43 #include <linux/cpufeature.h>
44 #endif /* CONFIG_X86 */
45
46 enum {
47 MLX5_PIN_MODE_IN = 0x0,
48 MLX5_PIN_MODE_OUT = 0x1,
49 };
50
51 enum {
52 MLX5_OUT_PATTERN_PULSE = 0x0,
53 MLX5_OUT_PATTERN_PERIODIC = 0x1,
54 };
55
56 enum {
57 MLX5_EVENT_MODE_DISABLE = 0x0,
58 MLX5_EVENT_MODE_REPETETIVE = 0x1,
59 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
60 };
61
62 enum {
63 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
64 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
65 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
66 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
67 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
68 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
69 MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9),
70 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
71 };
72
73 enum {
74 MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN,
75 MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX,
76 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
77 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
78 };
79
mlx5_real_time_mode(struct mlx5_core_dev * mdev)80 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
81 {
82 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
83 }
84
mlx5_npps_real_time_supported(struct mlx5_core_dev * mdev)85 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
86 {
87 return (mlx5_real_time_mode(mdev) &&
88 MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
89 MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
90 }
91
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)92 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
93 {
94 return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
95 }
96
mlx5_ptp_shift_constant(u32 dev_freq_khz)97 static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
98 {
99 /* Optimal shift constant leads to corrections above just 1 scaled ppm.
100 *
101 * Two sets of equations are needed to derive the optimal shift
102 * constant for the cyclecounter.
103 *
104 * dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
105 * ppb = scaled_ppm * 1000 / 2^16
106 *
107 * Using the two equations together
108 *
109 * dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
110 * dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
111 * dev_freq_khz = 2^(shift_constant - 16)
112 *
113 * then yields
114 *
115 * shift_constant = ilog2(dev_freq_khz) + 16
116 */
117
118 return min(ilog2(dev_freq_khz) + 16,
119 ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
120 }
121
mlx5_ptp_getmaxphase(struct ptp_clock_info * ptp)122 static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
123 {
124 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
125 struct mlx5_core_dev *mdev;
126
127 mdev = container_of(clock, struct mlx5_core_dev, clock);
128
129 return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
130 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
131 MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
132 }
133
mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev * mdev,s64 delta)134 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
135 {
136 s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
137
138 if (delta < -max || delta > max)
139 return false;
140
141 return true;
142 }
143
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)144 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
145 {
146 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
147
148 if (!MLX5_CAP_MCAM_REG(dev, mtutc))
149 return -EOPNOTSUPP;
150
151 return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
152 MLX5_REG_MTUTC, 0, 1);
153 }
154
155 #ifdef CONFIG_X86
mlx5_is_ptm_source_time_available(struct mlx5_core_dev * dev)156 static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
157 {
158 u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
159 u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
160 int err;
161
162 if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
163 return false;
164
165 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
166 0, 0);
167 if (err)
168 return false;
169
170 return !!MLX5_GET(mtptm_reg, out, psta);
171 }
172
mlx5_mtctr_syncdevicetime(ktime_t * device_time,struct system_counterval_t * sys_counterval,void * ctx)173 static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
174 struct system_counterval_t *sys_counterval,
175 void *ctx)
176 {
177 u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
178 u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
179 struct mlx5_core_dev *mdev = ctx;
180 bool real_time_mode;
181 u64 host, device;
182 int err;
183
184 real_time_mode = mlx5_real_time_mode(mdev);
185
186 MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
187 MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
188 MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
189 real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
190 MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
191
192 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
193 0, 0);
194 if (err)
195 return err;
196
197 if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
198 !MLX5_GET(mtctr_reg, out, second_clock_valid))
199 return -EINVAL;
200
201 host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
202 *sys_counterval = (struct system_counterval_t) {
203 .cycles = host,
204 .cs_id = CSID_X86_ART,
205 .use_nsecs = true,
206 };
207
208 device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
209 if (real_time_mode)
210 *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
211 else
212 *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
213
214 return 0;
215 }
216
mlx5_ptp_getcrosststamp(struct ptp_clock_info * ptp,struct system_device_crosststamp * cts)217 static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
218 struct system_device_crosststamp *cts)
219 {
220 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
221 struct system_time_snapshot history_begin = {0};
222 struct mlx5_core_dev *mdev;
223
224 mdev = container_of(clock, struct mlx5_core_dev, clock);
225
226 if (!mlx5_is_ptm_source_time_available(mdev))
227 return -EBUSY;
228
229 ktime_get_snapshot(&history_begin);
230
231 return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
232 &history_begin, cts);
233 }
234 #endif /* CONFIG_X86 */
235
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)236 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
237 struct ptp_system_timestamp *sts,
238 bool real_time)
239 {
240 u32 timer_h, timer_h1, timer_l;
241
242 timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
243 &dev->iseg->internal_timer_h);
244 ptp_read_system_prets(sts);
245 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
246 &dev->iseg->internal_timer_l);
247 ptp_read_system_postts(sts);
248 timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
249 &dev->iseg->internal_timer_h);
250 if (timer_h != timer_h1) {
251 /* wrap around */
252 ptp_read_system_prets(sts);
253 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
254 &dev->iseg->internal_timer_l);
255 ptp_read_system_postts(sts);
256 }
257
258 return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
259 (u64)timer_l | (u64)timer_h1 << 32;
260 }
261
read_internal_timer(const struct cyclecounter * cc)262 static u64 read_internal_timer(const struct cyclecounter *cc)
263 {
264 struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
265 struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
266 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
267 clock);
268
269 return mlx5_read_time(mdev, NULL, false) & cc->mask;
270 }
271
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)272 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
273 {
274 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
275 struct mlx5_clock *clock = &mdev->clock;
276 struct mlx5_timer *timer;
277 u32 sign;
278
279 if (!clock_info)
280 return;
281
282 sign = smp_load_acquire(&clock_info->sign);
283 smp_store_mb(clock_info->sign,
284 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
285
286 timer = &clock->timer;
287 clock_info->cycles = timer->tc.cycle_last;
288 clock_info->mult = timer->cycles.mult;
289 clock_info->nsec = timer->tc.nsec;
290 clock_info->frac = timer->tc.frac;
291
292 smp_store_release(&clock_info->sign,
293 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
294 }
295
mlx5_pps_out(struct work_struct * work)296 static void mlx5_pps_out(struct work_struct *work)
297 {
298 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
299 out_work);
300 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
301 pps_info);
302 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
303 clock);
304 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
305 unsigned long flags;
306 int i;
307
308 for (i = 0; i < clock->ptp_info.n_pins; i++) {
309 u64 tstart;
310
311 write_seqlock_irqsave(&clock->lock, flags);
312 tstart = clock->pps_info.start[i];
313 clock->pps_info.start[i] = 0;
314 write_sequnlock_irqrestore(&clock->lock, flags);
315 if (!tstart)
316 continue;
317
318 MLX5_SET(mtpps_reg, in, pin, i);
319 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
320 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
321 mlx5_set_mtpps(mdev, in, sizeof(in));
322 }
323 }
324
mlx5_timestamp_overflow(struct ptp_clock_info * ptp_info)325 static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
326 {
327 struct mlx5_core_dev *mdev;
328 struct mlx5_timer *timer;
329 struct mlx5_clock *clock;
330 unsigned long flags;
331
332 clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
333 mdev = container_of(clock, struct mlx5_core_dev, clock);
334 timer = &clock->timer;
335
336 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
337 goto out;
338
339 write_seqlock_irqsave(&clock->lock, flags);
340 timecounter_read(&timer->tc);
341 mlx5_update_clock_info_page(mdev);
342 write_sequnlock_irqrestore(&clock->lock, flags);
343
344 out:
345 return timer->overflow_period;
346 }
347
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)348 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
349 const struct timespec64 *ts)
350 {
351 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
352
353 if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
354 ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
355 return -EINVAL;
356
357 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
358 MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
359 MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
360
361 return mlx5_set_mtutc(mdev, in, sizeof(in));
362 }
363
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)364 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
365 {
366 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
367 struct mlx5_timer *timer = &clock->timer;
368 struct mlx5_core_dev *mdev;
369 unsigned long flags;
370
371 mdev = container_of(clock, struct mlx5_core_dev, clock);
372
373 if (mlx5_modify_mtutc_allowed(mdev)) {
374 int err = mlx5_ptp_settime_real_time(mdev, ts);
375
376 if (err)
377 return err;
378 }
379
380 write_seqlock_irqsave(&clock->lock, flags);
381 timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
382 mlx5_update_clock_info_page(mdev);
383 write_sequnlock_irqrestore(&clock->lock, flags);
384
385 return 0;
386 }
387
388 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)389 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
390 struct ptp_system_timestamp *sts)
391 {
392 struct timespec64 ts;
393 u64 time;
394
395 time = mlx5_read_time(mdev, sts, true);
396 ts = ns_to_timespec64(time);
397 return ts;
398 }
399
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)400 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
401 struct ptp_system_timestamp *sts)
402 {
403 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
404 struct mlx5_core_dev *mdev;
405 u64 cycles, ns;
406
407 mdev = container_of(clock, struct mlx5_core_dev, clock);
408 if (mlx5_real_time_mode(mdev)) {
409 *ts = mlx5_ptp_gettimex_real_time(mdev, sts);
410 goto out;
411 }
412
413 cycles = mlx5_read_time(mdev, sts, false);
414 ns = mlx5_timecounter_cyc2time(clock, cycles);
415 *ts = ns_to_timespec64(ns);
416 out:
417 return 0;
418 }
419
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)420 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
421 {
422 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
423
424 /* HW time adjustment range is checked. If out of range, settime instead */
425 if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
426 struct timespec64 ts;
427 s64 ns;
428
429 ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
430 ns = timespec64_to_ns(&ts) + delta;
431 ts = ns_to_timespec64(ns);
432 return mlx5_ptp_settime_real_time(mdev, &ts);
433 }
434
435 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
436 MLX5_SET(mtutc_reg, in, time_adjustment, delta);
437
438 return mlx5_set_mtutc(mdev, in, sizeof(in));
439 }
440
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)441 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
442 {
443 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
444 struct mlx5_timer *timer = &clock->timer;
445 struct mlx5_core_dev *mdev;
446 unsigned long flags;
447
448 mdev = container_of(clock, struct mlx5_core_dev, clock);
449
450 if (mlx5_modify_mtutc_allowed(mdev)) {
451 int err = mlx5_ptp_adjtime_real_time(mdev, delta);
452
453 if (err)
454 return err;
455 }
456
457 write_seqlock_irqsave(&clock->lock, flags);
458 timecounter_adjtime(&timer->tc, delta);
459 mlx5_update_clock_info_page(mdev);
460 write_sequnlock_irqrestore(&clock->lock, flags);
461
462 return 0;
463 }
464
mlx5_ptp_adjphase(struct ptp_clock_info * ptp,s32 delta)465 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
466 {
467 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
468 struct mlx5_core_dev *mdev;
469
470 mdev = container_of(clock, struct mlx5_core_dev, clock);
471
472 return mlx5_ptp_adjtime_real_time(mdev, delta);
473 }
474
mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev * mdev,long scaled_ppm)475 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
476 {
477 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
478
479 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
480
481 if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
482 scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
483 /* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
484 MLX5_SET(mtutc_reg, in, freq_adj_units,
485 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
486 MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
487 } else {
488 MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
489 MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
490 }
491
492 return mlx5_set_mtutc(mdev, in, sizeof(in));
493 }
494
mlx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)495 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
496 {
497 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
498 struct mlx5_timer *timer = &clock->timer;
499 struct mlx5_core_dev *mdev;
500 unsigned long flags;
501 u32 mult;
502
503 mdev = container_of(clock, struct mlx5_core_dev, clock);
504
505 if (mlx5_modify_mtutc_allowed(mdev)) {
506 int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
507
508 if (err)
509 return err;
510 }
511
512 mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
513
514 write_seqlock_irqsave(&clock->lock, flags);
515 timecounter_read(&timer->tc);
516 timer->cycles.mult = mult;
517 mlx5_update_clock_info_page(mdev);
518 write_sequnlock_irqrestore(&clock->lock, flags);
519 ptp_schedule_worker(clock->ptp, timer->overflow_period);
520
521 return 0;
522 }
523
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)524 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
525 struct ptp_clock_request *rq,
526 int on)
527 {
528 struct mlx5_clock *clock =
529 container_of(ptp, struct mlx5_clock, ptp_info);
530 struct mlx5_core_dev *mdev =
531 container_of(clock, struct mlx5_core_dev, clock);
532 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
533 u32 field_select = 0;
534 u8 pin_mode = 0;
535 u8 pattern = 0;
536 int pin = -1;
537 int err = 0;
538
539 if (!MLX5_PPS_CAP(mdev))
540 return -EOPNOTSUPP;
541
542 /* Reject requests with unsupported flags */
543 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
544 PTP_RISING_EDGE |
545 PTP_FALLING_EDGE |
546 PTP_STRICT_FLAGS))
547 return -EOPNOTSUPP;
548
549 /* Reject requests to enable time stamping on both edges. */
550 if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
551 (rq->extts.flags & PTP_ENABLE_FEATURE) &&
552 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
553 return -EOPNOTSUPP;
554
555 if (rq->extts.index >= clock->ptp_info.n_pins)
556 return -EINVAL;
557
558 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
559 if (pin < 0)
560 return -EBUSY;
561
562 if (on) {
563 pin_mode = MLX5_PIN_MODE_IN;
564 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
565 field_select = MLX5_MTPPS_FS_PIN_MODE |
566 MLX5_MTPPS_FS_PATTERN |
567 MLX5_MTPPS_FS_ENABLE;
568 } else {
569 field_select = MLX5_MTPPS_FS_ENABLE;
570 }
571
572 MLX5_SET(mtpps_reg, in, pin, pin);
573 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
574 MLX5_SET(mtpps_reg, in, pattern, pattern);
575 MLX5_SET(mtpps_reg, in, enable, on);
576 MLX5_SET(mtpps_reg, in, field_select, field_select);
577
578 err = mlx5_set_mtpps(mdev, in, sizeof(in));
579 if (err)
580 return err;
581
582 return mlx5_set_mtppse(mdev, pin, 0,
583 MLX5_EVENT_MODE_REPETETIVE & on);
584 }
585
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)586 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
587 {
588 struct mlx5_clock *clock = &mdev->clock;
589 u64 cycles_now, cycles_delta;
590 u64 nsec_now, nsec_delta;
591 struct mlx5_timer *timer;
592 unsigned long flags;
593
594 timer = &clock->timer;
595
596 cycles_now = mlx5_read_time(mdev, NULL, false);
597 write_seqlock_irqsave(&clock->lock, flags);
598 nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
599 nsec_delta = target_ns - nsec_now;
600 cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
601 timer->cycles.mult);
602 write_sequnlock_irqrestore(&clock->lock, flags);
603
604 return cycles_now + cycles_delta;
605 }
606
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)607 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
608 {
609 struct timespec64 ts = {};
610 s64 target_ns;
611
612 ts.tv_sec = sec;
613 target_ns = timespec64_to_ns(&ts);
614
615 return find_target_cycles(mdev, target_ns);
616 }
617
perout_conf_real_time(s64 sec,u32 nsec)618 static u64 perout_conf_real_time(s64 sec, u32 nsec)
619 {
620 return (u64)nsec | (u64)sec << 32;
621 }
622
perout_conf_1pps(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u64 * time_stamp,bool real_time)623 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
624 u64 *time_stamp, bool real_time)
625 {
626 struct timespec64 ts;
627 s64 ns;
628
629 ts.tv_nsec = rq->perout.period.nsec;
630 ts.tv_sec = rq->perout.period.sec;
631 ns = timespec64_to_ns(&ts);
632
633 if ((ns >> 1) != 500000000LL)
634 return -EINVAL;
635
636 *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
637 perout_conf_internal_timer(mdev, rq->perout.start.sec);
638
639 return 0;
640 }
641
642 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * out_pulse_duration_ns)643 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
644 struct ptp_clock_request *rq,
645 u32 *out_pulse_duration_ns)
646 {
647 struct mlx5_pps *pps_info = &mdev->clock.pps_info;
648 u32 out_pulse_duration;
649 struct timespec64 ts;
650
651 if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
652 ts.tv_sec = rq->perout.on.sec;
653 ts.tv_nsec = rq->perout.on.nsec;
654 out_pulse_duration = (u32)timespec64_to_ns(&ts);
655 } else {
656 /* out_pulse_duration_ns should be up to 50% of the
657 * pulse period as default
658 */
659 ts.tv_sec = rq->perout.period.sec;
660 ts.tv_nsec = rq->perout.period.nsec;
661 out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
662 }
663
664 if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
665 out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
666 mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
667 out_pulse_duration, pps_info->min_out_pulse_duration_ns,
668 MLX5_MAX_PULSE_DURATION);
669 return -EINVAL;
670 }
671 *out_pulse_duration_ns = out_pulse_duration;
672
673 return 0;
674 }
675
perout_conf_npps_real_time(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * field_select,u32 * out_pulse_duration_ns,u64 * period,u64 * time_stamp)676 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
677 u32 *field_select, u32 *out_pulse_duration_ns,
678 u64 *period, u64 *time_stamp)
679 {
680 struct mlx5_pps *pps_info = &mdev->clock.pps_info;
681 struct ptp_clock_time *time = &rq->perout.start;
682 struct timespec64 ts;
683
684 ts.tv_sec = rq->perout.period.sec;
685 ts.tv_nsec = rq->perout.period.nsec;
686 if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
687 mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
688 pps_info->min_npps_period);
689 return -EINVAL;
690 }
691 *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
692
693 if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
694 return -EINVAL;
695
696 *time_stamp = perout_conf_real_time(time->sec, time->nsec);
697 *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
698 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
699
700 return 0;
701 }
702
mlx5_perout_verify_flags(struct mlx5_core_dev * mdev,unsigned int flags)703 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
704 {
705 return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
706 (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
707 }
708
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)709 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
710 struct ptp_clock_request *rq,
711 int on)
712 {
713 struct mlx5_clock *clock =
714 container_of(ptp, struct mlx5_clock, ptp_info);
715 struct mlx5_core_dev *mdev =
716 container_of(clock, struct mlx5_core_dev, clock);
717 bool rt_mode = mlx5_real_time_mode(mdev);
718 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
719 u32 out_pulse_duration_ns = 0;
720 u32 field_select = 0;
721 u64 npps_period = 0;
722 u64 time_stamp = 0;
723 u8 pin_mode = 0;
724 u8 pattern = 0;
725 int pin = -1;
726 int err = 0;
727
728 if (!MLX5_PPS_CAP(mdev))
729 return -EOPNOTSUPP;
730
731 /* Reject requests with unsupported flags */
732 if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
733 return -EOPNOTSUPP;
734
735 if (rq->perout.index >= clock->ptp_info.n_pins)
736 return -EINVAL;
737
738 field_select = MLX5_MTPPS_FS_ENABLE;
739 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
740 if (pin < 0)
741 return -EBUSY;
742
743 if (on) {
744 bool rt_mode = mlx5_real_time_mode(mdev);
745
746 pin_mode = MLX5_PIN_MODE_OUT;
747 pattern = MLX5_OUT_PATTERN_PERIODIC;
748
749 if (rt_mode && rq->perout.start.sec > U32_MAX)
750 return -EINVAL;
751
752 field_select |= MLX5_MTPPS_FS_PIN_MODE |
753 MLX5_MTPPS_FS_PATTERN |
754 MLX5_MTPPS_FS_TIME_STAMP;
755
756 if (mlx5_npps_real_time_supported(mdev))
757 err = perout_conf_npps_real_time(mdev, rq, &field_select,
758 &out_pulse_duration_ns, &npps_period,
759 &time_stamp);
760 else
761 err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
762 if (err)
763 return err;
764 }
765
766 MLX5_SET(mtpps_reg, in, pin, pin);
767 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
768 MLX5_SET(mtpps_reg, in, pattern, pattern);
769 MLX5_SET(mtpps_reg, in, enable, on);
770 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
771 MLX5_SET(mtpps_reg, in, field_select, field_select);
772 MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
773 MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
774 err = mlx5_set_mtpps(mdev, in, sizeof(in));
775 if (err)
776 return err;
777
778 if (rt_mode)
779 return 0;
780
781 return mlx5_set_mtppse(mdev, pin, 0,
782 MLX5_EVENT_MODE_REPETETIVE & on);
783 }
784
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)785 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
786 struct ptp_clock_request *rq,
787 int on)
788 {
789 struct mlx5_clock *clock =
790 container_of(ptp, struct mlx5_clock, ptp_info);
791
792 clock->pps_info.enabled = !!on;
793 return 0;
794 }
795
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)796 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
797 struct ptp_clock_request *rq,
798 int on)
799 {
800 switch (rq->type) {
801 case PTP_CLK_REQ_EXTTS:
802 return mlx5_extts_configure(ptp, rq, on);
803 case PTP_CLK_REQ_PEROUT:
804 return mlx5_perout_configure(ptp, rq, on);
805 case PTP_CLK_REQ_PPS:
806 return mlx5_pps_configure(ptp, rq, on);
807 default:
808 return -EOPNOTSUPP;
809 }
810 return 0;
811 }
812
813 enum {
814 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
815 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
816 };
817
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)818 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
819 enum ptp_pin_function func, unsigned int chan)
820 {
821 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
822 ptp_info);
823
824 switch (func) {
825 case PTP_PF_NONE:
826 return 0;
827 case PTP_PF_EXTTS:
828 return !(clock->pps_info.pin_caps[pin] &
829 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
830 case PTP_PF_PEROUT:
831 return !(clock->pps_info.pin_caps[pin] &
832 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
833 default:
834 return -EOPNOTSUPP;
835 }
836 }
837
838 static const struct ptp_clock_info mlx5_ptp_clock_info = {
839 .owner = THIS_MODULE,
840 .name = "mlx5_ptp",
841 .max_adj = 50000000,
842 .n_alarm = 0,
843 .n_ext_ts = 0,
844 .n_per_out = 0,
845 .n_pins = 0,
846 .pps = 0,
847 .adjfine = mlx5_ptp_adjfine,
848 .adjphase = mlx5_ptp_adjphase,
849 .getmaxphase = mlx5_ptp_getmaxphase,
850 .adjtime = mlx5_ptp_adjtime,
851 .gettimex64 = mlx5_ptp_gettimex,
852 .settime64 = mlx5_ptp_settime,
853 .enable = NULL,
854 .verify = NULL,
855 .do_aux_work = mlx5_timestamp_overflow,
856 };
857
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)858 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
859 u32 *mtpps, u32 mtpps_size)
860 {
861 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
862
863 MLX5_SET(mtpps_reg, in, pin, pin);
864
865 return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
866 mtpps_size, MLX5_REG_MTPPS, 0, 0);
867 }
868
mlx5_get_pps_pin_mode(struct mlx5_clock * clock,u8 pin)869 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
870 {
871 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
872
873 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
874 u8 mode;
875 int err;
876
877 err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
878 if (err || !MLX5_GET(mtpps_reg, out, enable))
879 return PTP_PF_NONE;
880
881 mode = MLX5_GET(mtpps_reg, out, pin_mode);
882
883 if (mode == MLX5_PIN_MODE_IN)
884 return PTP_PF_EXTTS;
885 else if (mode == MLX5_PIN_MODE_OUT)
886 return PTP_PF_PEROUT;
887
888 return PTP_PF_NONE;
889 }
890
mlx5_init_pin_config(struct mlx5_clock * clock)891 static void mlx5_init_pin_config(struct mlx5_clock *clock)
892 {
893 int i;
894
895 if (!clock->ptp_info.n_pins)
896 return;
897
898 clock->ptp_info.pin_config =
899 kcalloc(clock->ptp_info.n_pins,
900 sizeof(*clock->ptp_info.pin_config),
901 GFP_KERNEL);
902 if (!clock->ptp_info.pin_config)
903 return;
904 clock->ptp_info.enable = mlx5_ptp_enable;
905 clock->ptp_info.verify = mlx5_ptp_verify;
906 clock->ptp_info.pps = 1;
907
908 for (i = 0; i < clock->ptp_info.n_pins; i++) {
909 snprintf(clock->ptp_info.pin_config[i].name,
910 sizeof(clock->ptp_info.pin_config[i].name),
911 "mlx5_pps%d", i);
912 clock->ptp_info.pin_config[i].index = i;
913 clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
914 clock->ptp_info.pin_config[i].chan = 0;
915 }
916 }
917
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)918 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
919 {
920 struct mlx5_clock *clock = &mdev->clock;
921 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
922
923 mlx5_query_mtpps(mdev, out, sizeof(out));
924
925 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
926 cap_number_of_pps_pins);
927 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
928 cap_max_num_of_pps_in_pins);
929 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
930 cap_max_num_of_pps_out_pins);
931
932 if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
933 clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
934 cap_log_min_npps_period);
935 if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
936 clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
937 cap_log_min_out_pulse_duration_ns);
938
939 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
940 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
941 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
942 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
943 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
944 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
945 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
946 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
947 }
948
ts_next_sec(struct timespec64 * ts)949 static void ts_next_sec(struct timespec64 *ts)
950 {
951 ts->tv_sec += 1;
952 ts->tv_nsec = 0;
953 }
954
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)955 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
956 struct mlx5_clock *clock)
957 {
958 struct timespec64 ts;
959 s64 target_ns;
960
961 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
962 ts_next_sec(&ts);
963 target_ns = timespec64_to_ns(&ts);
964
965 return find_target_cycles(mdev, target_ns);
966 }
967
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)968 static int mlx5_pps_event(struct notifier_block *nb,
969 unsigned long type, void *data)
970 {
971 struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
972 struct ptp_clock_event ptp_event;
973 struct mlx5_eqe *eqe = data;
974 int pin = eqe->data.pps.pin;
975 struct mlx5_core_dev *mdev;
976 unsigned long flags;
977 u64 ns;
978
979 mdev = container_of(clock, struct mlx5_core_dev, clock);
980
981 switch (clock->ptp_info.pin_config[pin].func) {
982 case PTP_PF_EXTTS:
983 ptp_event.index = pin;
984 ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
985 mlx5_real_time_cyc2time(clock,
986 be64_to_cpu(eqe->data.pps.time_stamp)) :
987 mlx5_timecounter_cyc2time(clock,
988 be64_to_cpu(eqe->data.pps.time_stamp));
989 if (clock->pps_info.enabled) {
990 ptp_event.type = PTP_CLOCK_PPSUSR;
991 ptp_event.pps_times.ts_real =
992 ns_to_timespec64(ptp_event.timestamp);
993 } else {
994 ptp_event.type = PTP_CLOCK_EXTTS;
995 }
996 /* TODOL clock->ptp can be NULL if ptp_clock_register fails */
997 ptp_clock_event(clock->ptp, &ptp_event);
998 break;
999 case PTP_PF_PEROUT:
1000 ns = perout_conf_next_event_timer(mdev, clock);
1001 write_seqlock_irqsave(&clock->lock, flags);
1002 clock->pps_info.start[pin] = ns;
1003 write_sequnlock_irqrestore(&clock->lock, flags);
1004 schedule_work(&clock->pps_info.out_work);
1005 break;
1006 default:
1007 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
1008 clock->ptp_info.pin_config[pin].func);
1009 }
1010
1011 return NOTIFY_OK;
1012 }
1013
mlx5_timecounter_init(struct mlx5_core_dev * mdev)1014 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
1015 {
1016 struct mlx5_clock *clock = &mdev->clock;
1017 struct mlx5_timer *timer = &clock->timer;
1018 u32 dev_freq;
1019
1020 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
1021 timer->cycles.read = read_internal_timer;
1022 timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
1023 timer->cycles.mult = clocksource_khz2mult(dev_freq,
1024 timer->cycles.shift);
1025 timer->nominal_c_mult = timer->cycles.mult;
1026 timer->cycles.mask = CLOCKSOURCE_MASK(41);
1027
1028 timecounter_init(&timer->tc, &timer->cycles,
1029 ktime_to_ns(ktime_get_real()));
1030 }
1031
mlx5_init_overflow_period(struct mlx5_clock * clock)1032 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
1033 {
1034 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
1035 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
1036 struct mlx5_timer *timer = &clock->timer;
1037 u64 overflow_cycles;
1038 u64 frac = 0;
1039 u64 ns;
1040
1041 /* Calculate period in seconds to call the overflow watchdog - to make
1042 * sure counter is checked at least twice every wrap around.
1043 * The period is calculated as the minimum between max HW cycles count
1044 * (The clock source mask) and max amount of cycles that can be
1045 * multiplied by clock multiplier where the result doesn't exceed
1046 * 64bits.
1047 */
1048 overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
1049 overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
1050
1051 ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
1052 frac, &frac);
1053 do_div(ns, NSEC_PER_SEC / HZ);
1054 timer->overflow_period = ns;
1055
1056 if (!timer->overflow_period) {
1057 timer->overflow_period = HZ;
1058 mlx5_core_warn(mdev,
1059 "invalid overflow period, overflow_work is scheduled once per second\n");
1060 }
1061
1062 if (clock_info)
1063 clock_info->overflow_period = timer->overflow_period;
1064 }
1065
mlx5_init_clock_info(struct mlx5_core_dev * mdev)1066 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
1067 {
1068 struct mlx5_clock *clock = &mdev->clock;
1069 struct mlx5_ib_clock_info *info;
1070 struct mlx5_timer *timer;
1071
1072 mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
1073 if (!mdev->clock_info) {
1074 mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
1075 return;
1076 }
1077
1078 info = mdev->clock_info;
1079 timer = &clock->timer;
1080
1081 info->nsec = timer->tc.nsec;
1082 info->cycles = timer->tc.cycle_last;
1083 info->mask = timer->cycles.mask;
1084 info->mult = timer->nominal_c_mult;
1085 info->shift = timer->cycles.shift;
1086 info->frac = timer->tc.frac;
1087 }
1088
mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev * mdev)1089 static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
1090 {
1091 struct mlx5_clock *clock = &mdev->clock;
1092 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1093 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1094 u8 log_max_freq_adjustment = 0;
1095 int err;
1096
1097 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1098 MLX5_REG_MTUTC, 0, 0);
1099 if (!err)
1100 log_max_freq_adjustment =
1101 MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
1102
1103 if (log_max_freq_adjustment)
1104 clock->ptp_info.max_adj =
1105 min(S32_MAX, 1 << log_max_freq_adjustment);
1106 }
1107
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)1108 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
1109 {
1110 struct mlx5_clock *clock = &mdev->clock;
1111
1112 /* Configure the PHC */
1113 clock->ptp_info = mlx5_ptp_clock_info;
1114
1115 if (MLX5_CAP_MCAM_REG(mdev, mtutc))
1116 mlx5_init_timer_max_freq_adjustment(mdev);
1117
1118 #ifdef CONFIG_X86
1119 if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
1120 MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
1121 clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
1122 #endif /* CONFIG_X86 */
1123
1124 mlx5_timecounter_init(mdev);
1125 mlx5_init_clock_info(mdev);
1126 mlx5_init_overflow_period(clock);
1127
1128 if (mlx5_real_time_mode(mdev)) {
1129 struct timespec64 ts;
1130
1131 ktime_get_real_ts64(&ts);
1132 mlx5_ptp_settime(&clock->ptp_info, &ts);
1133 }
1134 }
1135
mlx5_init_pps(struct mlx5_core_dev * mdev)1136 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
1137 {
1138 struct mlx5_clock *clock = &mdev->clock;
1139
1140 if (!MLX5_PPS_CAP(mdev))
1141 return;
1142
1143 mlx5_get_pps_caps(mdev);
1144 mlx5_init_pin_config(clock);
1145 }
1146
mlx5_init_clock(struct mlx5_core_dev * mdev)1147 void mlx5_init_clock(struct mlx5_core_dev *mdev)
1148 {
1149 struct mlx5_clock *clock = &mdev->clock;
1150
1151 if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1152 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1153 return;
1154 }
1155
1156 seqlock_init(&clock->lock);
1157 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
1158
1159 /* Initialize the device clock */
1160 mlx5_init_timer_clock(mdev);
1161
1162 /* Initialize 1PPS data structures */
1163 mlx5_init_pps(mdev);
1164
1165 clock->ptp = ptp_clock_register(&clock->ptp_info,
1166 &mdev->pdev->dev);
1167 if (IS_ERR(clock->ptp)) {
1168 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
1169 PTR_ERR(clock->ptp));
1170 clock->ptp = NULL;
1171 }
1172
1173 MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
1174 mlx5_eq_notifier_register(mdev, &clock->pps_nb);
1175
1176 if (clock->ptp)
1177 ptp_schedule_worker(clock->ptp, 0);
1178 }
1179
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)1180 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1181 {
1182 struct mlx5_clock *clock = &mdev->clock;
1183
1184 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1185 return;
1186
1187 mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
1188 if (clock->ptp) {
1189 ptp_clock_unregister(clock->ptp);
1190 clock->ptp = NULL;
1191 }
1192
1193 cancel_work_sync(&clock->pps_info.out_work);
1194
1195 if (mdev->clock_info) {
1196 free_page((unsigned long)mdev->clock_info);
1197 mdev->clock_info = NULL;
1198 }
1199
1200 kfree(clock->ptp_info.pin_config);
1201 }
1202