1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <linux/dpll.h>
5 #include <linux/mlx5/driver.h>
6
7 /* This structure represents a reference to DPLL, one is created
8 * per mdev instance.
9 */
10 struct mlx5_dpll {
11 struct dpll_device *dpll;
12 struct dpll_pin *dpll_pin;
13 struct mlx5_core_dev *mdev;
14 struct workqueue_struct *wq;
15 struct delayed_work work;
16 struct {
17 bool valid;
18 enum dpll_lock_status lock_status;
19 enum dpll_pin_state pin_state;
20 } last;
21 struct notifier_block mdev_nb;
22 struct net_device *tracking_netdev;
23 };
24
mlx5_dpll_clock_id_get(struct mlx5_core_dev * mdev,u64 * clock_id)25 static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
26 {
27 u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
28 u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
29 int err;
30
31 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
32 MLX5_REG_MSECQ, 0, 0);
33 if (err)
34 return err;
35 *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity);
36 return 0;
37 }
38
39 struct mlx5_dpll_synce_status {
40 enum mlx5_msees_admin_status admin_status;
41 enum mlx5_msees_oper_status oper_status;
42 bool ho_acq;
43 bool oper_freq_measure;
44 enum mlx5_msees_failure_reason failure_reason;
45 s32 frequency_diff;
46 };
47
48 static int
mlx5_dpll_synce_status_get(struct mlx5_core_dev * mdev,struct mlx5_dpll_synce_status * synce_status)49 mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
50 struct mlx5_dpll_synce_status *synce_status)
51 {
52 u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
53 u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
54 int err;
55
56 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
57 MLX5_REG_MSEES, 0, 0);
58 if (err)
59 return err;
60 synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status);
61 synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
62 synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
63 synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
64 synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
65 synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
66 return 0;
67 }
68
69 static int
mlx5_dpll_synce_status_set(struct mlx5_core_dev * mdev,enum mlx5_msees_admin_status admin_status)70 mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
71 enum mlx5_msees_admin_status admin_status)
72 {
73 u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
74 u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
75
76 MLX5_SET(msees_reg, in, field_select,
77 MLX5_MSEES_FIELD_SELECT_ENABLE |
78 MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE |
79 MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
80 MLX5_SET(msees_reg, in, admin_status, admin_status);
81 MLX5_SET(msees_reg, in, admin_freq_measure, true);
82 return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
83 MLX5_REG_MSEES, 0, 1);
84 }
85
86 static enum dpll_lock_status
mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status * synce_status)87 mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
88 {
89 switch (synce_status->oper_status) {
90 case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
91 fallthrough;
92 case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
93 return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
94 DPLL_LOCK_STATUS_LOCKED;
95 case MLX5_MSEES_OPER_STATUS_HOLDOVER:
96 fallthrough;
97 case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
98 return DPLL_LOCK_STATUS_HOLDOVER;
99 default:
100 return DPLL_LOCK_STATUS_UNLOCKED;
101 }
102 }
103
104 static enum dpll_lock_status_error
mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status * synce_status)105 mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
106 {
107 switch (synce_status->oper_status) {
108 case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
109 fallthrough;
110 case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
111 switch (synce_status->failure_reason) {
112 case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
113 return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
114 case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
115 return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
116 default:
117 return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
118 }
119 default:
120 return DPLL_LOCK_STATUS_ERROR_NONE;
121 }
122 }
123
124 static enum dpll_pin_state
mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status * synce_status)125 mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
126 {
127 return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
128 (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
129 synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
130 DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
131 }
132
133 static int
mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status * synce_status,s64 * ffo)134 mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
135 s64 *ffo)
136 {
137 if (!synce_status->oper_freq_measure)
138 return -ENODATA;
139 *ffo = synce_status->frequency_diff;
140 return 0;
141 }
142
143 static int
mlx5_dpll_device_lock_status_get(const struct dpll_device * dpll,void * priv,enum dpll_lock_status * status,enum dpll_lock_status_error * status_error,struct netlink_ext_ack * extack)144 mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
145 enum dpll_lock_status *status,
146 enum dpll_lock_status_error *status_error,
147 struct netlink_ext_ack *extack)
148 {
149 struct mlx5_dpll_synce_status synce_status;
150 struct mlx5_dpll *mdpll = priv;
151 int err;
152
153 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
154 if (err)
155 return err;
156 *status = mlx5_dpll_lock_status_get(&synce_status);
157 *status_error = mlx5_dpll_lock_status_error_get(&synce_status);
158 return 0;
159 }
160
mlx5_dpll_device_mode_get(const struct dpll_device * dpll,void * priv,enum dpll_mode * mode,struct netlink_ext_ack * extack)161 static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
162 void *priv, enum dpll_mode *mode,
163 struct netlink_ext_ack *extack)
164 {
165 *mode = DPLL_MODE_MANUAL;
166 return 0;
167 }
168
169 enum {
170 MLX5_DPLL_SSM_CODE_PRC = 0b0010,
171 MLX5_DPLL_SSM_CODE_SSU_A = 0b0100,
172 MLX5_DPLL_SSM_CODE_SSU_B = 0b1000,
173 MLX5_DPLL_SSM_CODE_EEC1 = 0b1011,
174 MLX5_DPLL_SSM_CODE_PRTC = 0b0010,
175 MLX5_DPLL_SSM_CODE_EPRTC = 0b0010,
176 MLX5_DPLL_SSM_CODE_EEEC = 0b1011,
177 MLX5_DPLL_SSM_CODE_EPRC = 0b0010,
178 };
179
180 enum {
181 MLX5_DPLL_ENHANCED_SSM_CODE_PRC = 0xff,
182 MLX5_DPLL_ENHANCED_SSM_CODE_SSU_A = 0xff,
183 MLX5_DPLL_ENHANCED_SSM_CODE_SSU_B = 0xff,
184 MLX5_DPLL_ENHANCED_SSM_CODE_EEC1 = 0xff,
185 MLX5_DPLL_ENHANCED_SSM_CODE_PRTC = 0x20,
186 MLX5_DPLL_ENHANCED_SSM_CODE_EPRTC = 0x21,
187 MLX5_DPLL_ENHANCED_SSM_CODE_EEEC = 0x22,
188 MLX5_DPLL_ENHANCED_SSM_CODE_EPRC = 0x23,
189 };
190
191 #define __MLX5_DPLL_SSM_COMBINED_CODE(ssm_code, enhanced_ssm_code) \
192 ((ssm_code) | ((enhanced_ssm_code) << 8))
193
194 #define MLX5_DPLL_SSM_COMBINED_CODE(type) \
195 __MLX5_DPLL_SSM_COMBINED_CODE(MLX5_DPLL_SSM_CODE_##type, \
196 MLX5_DPLL_ENHANCED_SSM_CODE_##type)
197
mlx5_dpll_clock_quality_level_get(const struct dpll_device * dpll,void * priv,unsigned long * qls,struct netlink_ext_ack * extack)198 static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
199 void *priv, unsigned long *qls,
200 struct netlink_ext_ack *extack)
201 {
202 u8 network_option, ssm_code, enhanced_ssm_code;
203 u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
204 u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
205 struct mlx5_dpll *mdpll = priv;
206 int err;
207
208 err = mlx5_core_access_reg(mdpll->mdev, in, sizeof(in),
209 out, sizeof(out), MLX5_REG_MSECQ, 0, 0);
210 if (err)
211 return err;
212 network_option = MLX5_GET(msecq_reg, out, network_option);
213 if (network_option != 1)
214 goto errout;
215 ssm_code = MLX5_GET(msecq_reg, out, local_ssm_code);
216 enhanced_ssm_code = MLX5_GET(msecq_reg, out, local_enhanced_ssm_code);
217
218 switch (__MLX5_DPLL_SSM_COMBINED_CODE(ssm_code, enhanced_ssm_code)) {
219 case MLX5_DPLL_SSM_COMBINED_CODE(PRC):
220 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRC, qls);
221 return 0;
222 case MLX5_DPLL_SSM_COMBINED_CODE(SSU_A):
223 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_A, qls);
224 return 0;
225 case MLX5_DPLL_SSM_COMBINED_CODE(SSU_B):
226 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_B, qls);
227 return 0;
228 case MLX5_DPLL_SSM_COMBINED_CODE(EEC1):
229 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEC1, qls);
230 return 0;
231 case MLX5_DPLL_SSM_COMBINED_CODE(PRTC):
232 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRTC, qls);
233 return 0;
234 case MLX5_DPLL_SSM_COMBINED_CODE(EPRTC):
235 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRTC, qls);
236 return 0;
237 case MLX5_DPLL_SSM_COMBINED_CODE(EEEC):
238 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEEC, qls);
239 return 0;
240 case MLX5_DPLL_SSM_COMBINED_CODE(EPRC):
241 __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRC, qls);
242 return 0;
243 }
244 errout:
245 NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
246 return -EINVAL;
247 }
248
249 static const struct dpll_device_ops mlx5_dpll_device_ops = {
250 .lock_status_get = mlx5_dpll_device_lock_status_get,
251 .mode_get = mlx5_dpll_device_mode_get,
252 .clock_quality_level_get = mlx5_dpll_clock_quality_level_get,
253 };
254
mlx5_dpll_pin_direction_get(const struct dpll_pin * pin,void * pin_priv,const struct dpll_device * dpll,void * dpll_priv,enum dpll_pin_direction * direction,struct netlink_ext_ack * extack)255 static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
256 void *pin_priv,
257 const struct dpll_device *dpll,
258 void *dpll_priv,
259 enum dpll_pin_direction *direction,
260 struct netlink_ext_ack *extack)
261 {
262 *direction = DPLL_PIN_DIRECTION_INPUT;
263 return 0;
264 }
265
mlx5_dpll_state_on_dpll_get(const struct dpll_pin * pin,void * pin_priv,const struct dpll_device * dpll,void * dpll_priv,enum dpll_pin_state * state,struct netlink_ext_ack * extack)266 static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
267 void *pin_priv,
268 const struct dpll_device *dpll,
269 void *dpll_priv,
270 enum dpll_pin_state *state,
271 struct netlink_ext_ack *extack)
272 {
273 struct mlx5_dpll_synce_status synce_status;
274 struct mlx5_dpll *mdpll = pin_priv;
275 int err;
276
277 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
278 if (err)
279 return err;
280 *state = mlx5_dpll_pin_state_get(&synce_status);
281 return 0;
282 }
283
mlx5_dpll_state_on_dpll_set(const struct dpll_pin * pin,void * pin_priv,const struct dpll_device * dpll,void * dpll_priv,enum dpll_pin_state state,struct netlink_ext_ack * extack)284 static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
285 void *pin_priv,
286 const struct dpll_device *dpll,
287 void *dpll_priv,
288 enum dpll_pin_state state,
289 struct netlink_ext_ack *extack)
290 {
291 struct mlx5_dpll *mdpll = pin_priv;
292
293 return mlx5_dpll_synce_status_set(mdpll->mdev,
294 state == DPLL_PIN_STATE_CONNECTED ?
295 MLX5_MSEES_ADMIN_STATUS_TRACK :
296 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
297 }
298
mlx5_dpll_ffo_get(const struct dpll_pin * pin,void * pin_priv,const struct dpll_device * dpll,void * dpll_priv,s64 * ffo,struct netlink_ext_ack * extack)299 static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv,
300 const struct dpll_device *dpll, void *dpll_priv,
301 s64 *ffo, struct netlink_ext_ack *extack)
302 {
303 struct mlx5_dpll_synce_status synce_status;
304 struct mlx5_dpll *mdpll = pin_priv;
305 int err;
306
307 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
308 if (err)
309 return err;
310 return mlx5_dpll_pin_ffo_get(&synce_status, ffo);
311 }
312
313 static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
314 .direction_get = mlx5_dpll_pin_direction_get,
315 .state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
316 .state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
317 .ffo_get = mlx5_dpll_ffo_get,
318 };
319
320 static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
321 .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT,
322 .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE,
323 };
324
325 #define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */
326
mlx5_dpll_periodic_work_queue(struct mlx5_dpll * mdpll)327 static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll)
328 {
329 queue_delayed_work(mdpll->wq, &mdpll->work,
330 msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL));
331 }
332
mlx5_dpll_periodic_work(struct work_struct * work)333 static void mlx5_dpll_periodic_work(struct work_struct *work)
334 {
335 struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
336 work.work);
337 struct mlx5_dpll_synce_status synce_status;
338 enum dpll_lock_status lock_status;
339 enum dpll_pin_state pin_state;
340 int err;
341
342 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
343 if (err)
344 goto err_out;
345 lock_status = mlx5_dpll_lock_status_get(&synce_status);
346 pin_state = mlx5_dpll_pin_state_get(&synce_status);
347
348 if (!mdpll->last.valid)
349 goto invalid_out;
350
351 if (mdpll->last.lock_status != lock_status)
352 dpll_device_change_ntf(mdpll->dpll);
353 if (mdpll->last.pin_state != pin_state)
354 dpll_pin_change_ntf(mdpll->dpll_pin);
355
356 invalid_out:
357 mdpll->last.lock_status = lock_status;
358 mdpll->last.pin_state = pin_state;
359 mdpll->last.valid = true;
360 err_out:
361 mlx5_dpll_periodic_work_queue(mdpll);
362 }
363
mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll * mdpll,struct net_device * netdev)364 static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
365 struct net_device *netdev)
366 {
367 if (mdpll->tracking_netdev)
368 return;
369 dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
370 mdpll->tracking_netdev = netdev;
371 }
372
mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll * mdpll)373 static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
374 {
375 if (!mdpll->tracking_netdev)
376 return;
377 dpll_netdev_pin_clear(mdpll->tracking_netdev);
378 mdpll->tracking_netdev = NULL;
379 }
380
mlx5_dpll_mdev_notifier_event(struct notifier_block * nb,unsigned long event,void * data)381 static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb,
382 unsigned long event, void *data)
383 {
384 struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb);
385 struct net_device *netdev = data;
386
387 switch (event) {
388 case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
389 if (netdev)
390 mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev);
391 else
392 mlx5_dpll_netdev_dpll_pin_clear(mdpll);
393 break;
394 default:
395 return NOTIFY_DONE;
396 }
397
398 return NOTIFY_OK;
399 }
400
mlx5_dpll_mdev_netdev_track(struct mlx5_dpll * mdpll,struct mlx5_core_dev * mdev)401 static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll,
402 struct mlx5_core_dev *mdev)
403 {
404 mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event;
405 mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb);
406 mlx5_core_uplink_netdev_event_replay(mdev);
407 }
408
mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll * mdpll,struct mlx5_core_dev * mdev)409 static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll,
410 struct mlx5_core_dev *mdev)
411 {
412 mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb);
413 mlx5_dpll_netdev_dpll_pin_clear(mdpll);
414 }
415
mlx5_dpll_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)416 static int mlx5_dpll_probe(struct auxiliary_device *adev,
417 const struct auxiliary_device_id *id)
418 {
419 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
420 struct mlx5_core_dev *mdev = edev->mdev;
421 struct mlx5_dpll *mdpll;
422 u64 clock_id;
423 int err;
424
425 err = mlx5_dpll_synce_status_set(mdev,
426 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
427 if (err)
428 return err;
429
430 err = mlx5_dpll_clock_id_get(mdev, &clock_id);
431 if (err)
432 return err;
433
434 mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL);
435 if (!mdpll)
436 return -ENOMEM;
437 mdpll->mdev = mdev;
438 auxiliary_set_drvdata(adev, mdpll);
439
440 /* Multiple mdev instances might share one DPLL device. */
441 mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
442 if (IS_ERR(mdpll->dpll)) {
443 err = PTR_ERR(mdpll->dpll);
444 goto err_free_mdpll;
445 }
446
447 err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC,
448 &mlx5_dpll_device_ops, mdpll);
449 if (err)
450 goto err_put_dpll_device;
451
452 /* Multiple mdev instances might share one DPLL pin. */
453 mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
454 THIS_MODULE, &mlx5_dpll_pin_properties);
455 if (IS_ERR(mdpll->dpll_pin)) {
456 err = PTR_ERR(mdpll->dpll_pin);
457 goto err_unregister_dpll_device;
458 }
459
460 err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin,
461 &mlx5_dpll_pins_ops, mdpll);
462 if (err)
463 goto err_put_dpll_pin;
464
465 mdpll->wq = create_singlethread_workqueue("mlx5_dpll");
466 if (!mdpll->wq) {
467 err = -ENOMEM;
468 goto err_unregister_dpll_pin;
469 }
470
471 mlx5_dpll_mdev_netdev_track(mdpll, mdev);
472
473 INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
474 mlx5_dpll_periodic_work_queue(mdpll);
475
476 return 0;
477
478 err_unregister_dpll_pin:
479 dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
480 &mlx5_dpll_pins_ops, mdpll);
481 err_put_dpll_pin:
482 dpll_pin_put(mdpll->dpll_pin);
483 err_unregister_dpll_device:
484 dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
485 err_put_dpll_device:
486 dpll_device_put(mdpll->dpll);
487 err_free_mdpll:
488 kfree(mdpll);
489 return err;
490 }
491
mlx5_dpll_remove(struct auxiliary_device * adev)492 static void mlx5_dpll_remove(struct auxiliary_device *adev)
493 {
494 struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
495 struct mlx5_core_dev *mdev = mdpll->mdev;
496
497 cancel_delayed_work_sync(&mdpll->work);
498 mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
499 destroy_workqueue(mdpll->wq);
500 dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
501 &mlx5_dpll_pins_ops, mdpll);
502 dpll_pin_put(mdpll->dpll_pin);
503 dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
504 dpll_device_put(mdpll->dpll);
505 kfree(mdpll);
506
507 mlx5_dpll_synce_status_set(mdev,
508 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
509 }
510
mlx5_dpll_suspend(struct auxiliary_device * adev,pm_message_t state)511 static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state)
512 {
513 return 0;
514 }
515
mlx5_dpll_resume(struct auxiliary_device * adev)516 static int mlx5_dpll_resume(struct auxiliary_device *adev)
517 {
518 return 0;
519 }
520
521 static const struct auxiliary_device_id mlx5_dpll_id_table[] = {
522 { .name = MLX5_ADEV_NAME ".dpll", },
523 {},
524 };
525
526 MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table);
527
528 static struct auxiliary_driver mlx5_dpll_driver = {
529 .name = "dpll",
530 .probe = mlx5_dpll_probe,
531 .remove = mlx5_dpll_remove,
532 .suspend = mlx5_dpll_suspend,
533 .resume = mlx5_dpll_resume,
534 .id_table = mlx5_dpll_id_table,
535 };
536
mlx5_dpll_init(void)537 static int __init mlx5_dpll_init(void)
538 {
539 return auxiliary_driver_register(&mlx5_dpll_driver);
540 }
541
mlx5_dpll_exit(void)542 static void __exit mlx5_dpll_exit(void)
543 {
544 auxiliary_driver_unregister(&mlx5_dpll_driver);
545 }
546
547 module_init(mlx5_dpll_init);
548 module_exit(mlx5_dpll_exit);
549
550 MODULE_AUTHOR("Jiri Pirko <[email protected]>");
551 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver");
552 MODULE_LICENSE("Dual BSD/GPL");
553