1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Marvell. */
3
4 #include <linux/interrupt.h>
5 #include <linux/io-64-nonatomic-lo-hi.h>
6 #include <linux/module.h>
7 #include <linux/iommu.h>
8 #include "octep_vdpa.h"
9
10 #define OCTEP_VDPA_DRIVER_NAME "octep_vdpa"
11
12 struct octep_pf {
13 u8 __iomem *base[PCI_STD_NUM_BARS];
14 struct pci_dev *pdev;
15 struct resource res;
16 u64 vf_base;
17 int enabled_vfs;
18 u32 vf_stride;
19 u16 vf_devid;
20 };
21
22 struct octep_vdpa {
23 struct vdpa_device vdpa;
24 struct octep_hw *oct_hw;
25 struct pci_dev *pdev;
26 };
27
28 struct octep_vdpa_mgmt_dev {
29 struct vdpa_mgmt_dev mdev;
30 struct octep_hw oct_hw;
31 struct pci_dev *pdev;
32 /* Work entry to handle device setup */
33 struct work_struct setup_task;
34 /* Device status */
35 atomic_t status;
36 };
37
vdpa_to_octep_hw(struct vdpa_device * vdpa_dev)38 static struct octep_hw *vdpa_to_octep_hw(struct vdpa_device *vdpa_dev)
39 {
40 struct octep_vdpa *oct_vdpa;
41
42 oct_vdpa = container_of(vdpa_dev, struct octep_vdpa, vdpa);
43
44 return oct_vdpa->oct_hw;
45 }
46
octep_vdpa_intr_handler(int irq,void * data)47 static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
48 {
49 struct octep_hw *oct_hw = data;
50 int i;
51
52 /* Each device has multiple interrupts (nb_irqs) shared among rings
53 * (nr_vring). Device interrupts are mapped to the rings in a
54 * round-robin fashion.
55 *
56 * For example, if nb_irqs = 8 and nr_vring = 64:
57 * 0 -> 0, 8, 16, 24, 32, 40, 48, 56;
58 * 1 -> 1, 9, 17, 25, 33, 41, 49, 57;
59 * ...
60 * 7 -> 7, 15, 23, 31, 39, 47, 55, 63;
61 */
62
63 for (i = irq - oct_hw->irqs[0]; i < oct_hw->nr_vring; i += oct_hw->nb_irqs) {
64 if (ioread8(oct_hw->vqs[i].cb_notify_addr)) {
65 /* Acknowledge the per ring notification to the device */
66 iowrite8(0, oct_hw->vqs[i].cb_notify_addr);
67
68 if (likely(oct_hw->vqs[i].cb.callback))
69 oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
70 break;
71 }
72 }
73
74 /* Check for config interrupt. Config uses the first interrupt */
75 if (unlikely(irq == oct_hw->irqs[0] && ioread8(oct_hw->isr))) {
76 iowrite8(0, oct_hw->isr);
77
78 if (oct_hw->config_cb.callback)
79 oct_hw->config_cb.callback(oct_hw->config_cb.private);
80 }
81
82 return IRQ_HANDLED;
83 }
84
octep_free_irqs(struct octep_hw * oct_hw)85 static void octep_free_irqs(struct octep_hw *oct_hw)
86 {
87 struct pci_dev *pdev = oct_hw->pdev;
88 int irq;
89
90 if (!oct_hw->irqs)
91 return;
92
93 for (irq = 0; irq < oct_hw->nb_irqs; irq++) {
94 if (!oct_hw->irqs[irq])
95 break;
96
97 devm_free_irq(&pdev->dev, oct_hw->irqs[irq], oct_hw);
98 }
99
100 pci_free_irq_vectors(pdev);
101 devm_kfree(&pdev->dev, oct_hw->irqs);
102 oct_hw->irqs = NULL;
103 }
104
octep_request_irqs(struct octep_hw * oct_hw)105 static int octep_request_irqs(struct octep_hw *oct_hw)
106 {
107 struct pci_dev *pdev = oct_hw->pdev;
108 int ret, irq, idx;
109
110 oct_hw->irqs = devm_kcalloc(&pdev->dev, oct_hw->nb_irqs, sizeof(int), GFP_KERNEL);
111 if (!oct_hw->irqs)
112 return -ENOMEM;
113
114 ret = pci_alloc_irq_vectors(pdev, 1, oct_hw->nb_irqs, PCI_IRQ_MSIX);
115 if (ret < 0) {
116 dev_err(&pdev->dev, "Failed to alloc msix vector");
117 return ret;
118 }
119
120 for (idx = 0; idx < oct_hw->nb_irqs; idx++) {
121 irq = pci_irq_vector(pdev, idx);
122 ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
123 dev_name(&pdev->dev), oct_hw);
124 if (ret) {
125 dev_err(&pdev->dev, "Failed to register interrupt handler\n");
126 goto free_irqs;
127 }
128 oct_hw->irqs[idx] = irq;
129 }
130
131 return 0;
132
133 free_irqs:
134 octep_free_irqs(oct_hw);
135 return ret;
136 }
137
octep_vdpa_get_device_features(struct vdpa_device * vdpa_dev)138 static u64 octep_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
139 {
140 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
141
142 return oct_hw->features;
143 }
144
octep_vdpa_set_driver_features(struct vdpa_device * vdpa_dev,u64 features)145 static int octep_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
146 {
147 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
148 int ret;
149
150 pr_debug("Driver Features: %llx\n", features);
151
152 ret = octep_verify_features(features);
153 if (ret) {
154 dev_warn(&oct_hw->pdev->dev,
155 "Must negotiate minimum features 0x%llx for this device",
156 BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_NOTIFICATION_DATA) |
157 BIT_ULL(VIRTIO_F_RING_PACKED));
158 return ret;
159 }
160 octep_hw_set_drv_features(oct_hw, features);
161
162 return 0;
163 }
164
octep_vdpa_get_driver_features(struct vdpa_device * vdpa_dev)165 static u64 octep_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
166 {
167 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
168
169 return octep_hw_get_drv_features(oct_hw);
170 }
171
octep_vdpa_get_status(struct vdpa_device * vdpa_dev)172 static u8 octep_vdpa_get_status(struct vdpa_device *vdpa_dev)
173 {
174 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
175
176 return octep_hw_get_status(oct_hw);
177 }
178
octep_vdpa_set_status(struct vdpa_device * vdpa_dev,u8 status)179 static void octep_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
180 {
181 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
182 u8 status_old;
183
184 status_old = octep_hw_get_status(oct_hw);
185
186 if (status_old == status)
187 return;
188
189 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
190 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
191 if (octep_request_irqs(oct_hw))
192 status = status_old | VIRTIO_CONFIG_S_FAILED;
193 }
194 octep_hw_set_status(oct_hw, status);
195 }
196
octep_vdpa_reset(struct vdpa_device * vdpa_dev)197 static int octep_vdpa_reset(struct vdpa_device *vdpa_dev)
198 {
199 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
200 u8 status = octep_hw_get_status(oct_hw);
201 u16 qid;
202
203 if (status == 0)
204 return 0;
205
206 for (qid = 0; qid < oct_hw->nr_vring; qid++) {
207 oct_hw->vqs[qid].cb.callback = NULL;
208 oct_hw->vqs[qid].cb.private = NULL;
209 oct_hw->config_cb.callback = NULL;
210 oct_hw->config_cb.private = NULL;
211 }
212 octep_hw_reset(oct_hw);
213
214 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
215 octep_free_irqs(oct_hw);
216
217 return 0;
218 }
219
octep_vdpa_get_vq_num_max(struct vdpa_device * vdpa_dev)220 static u16 octep_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
221 {
222 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
223
224 return octep_get_vq_size(oct_hw);
225 }
226
octep_vdpa_get_vq_state(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_vq_state * state)227 static int octep_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
228 struct vdpa_vq_state *state)
229 {
230 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
231
232 return octep_get_vq_state(oct_hw, qid, state);
233 }
234
octep_vdpa_set_vq_state(struct vdpa_device * vdpa_dev,u16 qid,const struct vdpa_vq_state * state)235 static int octep_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
236 const struct vdpa_vq_state *state)
237 {
238 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
239
240 return octep_set_vq_state(oct_hw, qid, state);
241 }
242
octep_vdpa_set_vq_cb(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_callback * cb)243 static void octep_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_callback *cb)
244 {
245 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
246
247 oct_hw->vqs[qid].cb = *cb;
248 }
249
octep_vdpa_set_vq_ready(struct vdpa_device * vdpa_dev,u16 qid,bool ready)250 static void octep_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
251 {
252 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
253
254 octep_set_vq_ready(oct_hw, qid, ready);
255 }
256
octep_vdpa_get_vq_ready(struct vdpa_device * vdpa_dev,u16 qid)257 static bool octep_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
258 {
259 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
260
261 return octep_get_vq_ready(oct_hw, qid);
262 }
263
octep_vdpa_set_vq_num(struct vdpa_device * vdpa_dev,u16 qid,u32 num)264 static void octep_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
265 {
266 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
267
268 octep_set_vq_num(oct_hw, qid, num);
269 }
270
octep_vdpa_set_vq_address(struct vdpa_device * vdpa_dev,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)271 static int octep_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, u64 desc_area,
272 u64 driver_area, u64 device_area)
273 {
274 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
275
276 pr_debug("qid[%d]: desc_area: %llx\n", qid, desc_area);
277 pr_debug("qid[%d]: driver_area: %llx\n", qid, driver_area);
278 pr_debug("qid[%d]: device_area: %llx\n\n", qid, device_area);
279
280 return octep_set_vq_address(oct_hw, qid, desc_area, driver_area, device_area);
281 }
282
octep_vdpa_kick_vq(struct vdpa_device * vdpa_dev,u16 qid)283 static void octep_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
284 {
285 /* Not supported */
286 }
287
octep_vdpa_kick_vq_with_data(struct vdpa_device * vdpa_dev,u32 data)288 static void octep_vdpa_kick_vq_with_data(struct vdpa_device *vdpa_dev, u32 data)
289 {
290 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
291 u16 idx = data & 0xFFFF;
292
293 vp_iowrite32(data, oct_hw->vqs[idx].notify_addr);
294 }
295
octep_vdpa_get_generation(struct vdpa_device * vdpa_dev)296 static u32 octep_vdpa_get_generation(struct vdpa_device *vdpa_dev)
297 {
298 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
299
300 return vp_ioread8(&oct_hw->common_cfg->config_generation);
301 }
302
octep_vdpa_get_device_id(struct vdpa_device * vdpa_dev)303 static u32 octep_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
304 {
305 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
306
307 return oct_hw->dev_id;
308 }
309
octep_vdpa_get_vendor_id(struct vdpa_device * vdpa_dev)310 static u32 octep_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
311 {
312 return PCI_VENDOR_ID_CAVIUM;
313 }
314
octep_vdpa_get_vq_align(struct vdpa_device * vdpa_dev)315 static u32 octep_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
316 {
317 return PAGE_SIZE;
318 }
319
octep_vdpa_get_config_size(struct vdpa_device * vdpa_dev)320 static size_t octep_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
321 {
322 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
323
324 return oct_hw->config_size;
325 }
326
octep_vdpa_get_config(struct vdpa_device * vdpa_dev,unsigned int offset,void * buf,unsigned int len)327 static void octep_vdpa_get_config(struct vdpa_device *vdpa_dev, unsigned int offset, void *buf,
328 unsigned int len)
329 {
330 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
331
332 octep_read_dev_config(oct_hw, offset, buf, len);
333 }
334
octep_vdpa_set_config(struct vdpa_device * vdpa_dev,unsigned int offset,const void * buf,unsigned int len)335 static void octep_vdpa_set_config(struct vdpa_device *vdpa_dev, unsigned int offset,
336 const void *buf, unsigned int len)
337 {
338 /* Not supported */
339 }
340
octep_vdpa_set_config_cb(struct vdpa_device * vdpa_dev,struct vdpa_callback * cb)341 static void octep_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, struct vdpa_callback *cb)
342 {
343 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
344
345 oct_hw->config_cb.callback = cb->callback;
346 oct_hw->config_cb.private = cb->private;
347 }
348
octep_get_vq_notification(struct vdpa_device * vdpa_dev,u16 idx)349 static struct vdpa_notification_area octep_get_vq_notification(struct vdpa_device *vdpa_dev,
350 u16 idx)
351 {
352 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
353 struct vdpa_notification_area area;
354
355 area.addr = oct_hw->vqs[idx].notify_pa;
356 area.size = PAGE_SIZE;
357
358 return area;
359 }
360
361 static struct vdpa_config_ops octep_vdpa_ops = {
362 .get_device_features = octep_vdpa_get_device_features,
363 .set_driver_features = octep_vdpa_set_driver_features,
364 .get_driver_features = octep_vdpa_get_driver_features,
365 .get_status = octep_vdpa_get_status,
366 .set_status = octep_vdpa_set_status,
367 .reset = octep_vdpa_reset,
368 .get_vq_num_max = octep_vdpa_get_vq_num_max,
369 .get_vq_state = octep_vdpa_get_vq_state,
370 .set_vq_state = octep_vdpa_set_vq_state,
371 .set_vq_cb = octep_vdpa_set_vq_cb,
372 .set_vq_ready = octep_vdpa_set_vq_ready,
373 .get_vq_ready = octep_vdpa_get_vq_ready,
374 .set_vq_num = octep_vdpa_set_vq_num,
375 .set_vq_address = octep_vdpa_set_vq_address,
376 .get_vq_irq = NULL,
377 .kick_vq = octep_vdpa_kick_vq,
378 .kick_vq_with_data = octep_vdpa_kick_vq_with_data,
379 .get_generation = octep_vdpa_get_generation,
380 .get_device_id = octep_vdpa_get_device_id,
381 .get_vendor_id = octep_vdpa_get_vendor_id,
382 .get_vq_align = octep_vdpa_get_vq_align,
383 .get_config_size = octep_vdpa_get_config_size,
384 .get_config = octep_vdpa_get_config,
385 .set_config = octep_vdpa_set_config,
386 .set_config_cb = octep_vdpa_set_config_cb,
387 .get_vq_notification = octep_get_vq_notification,
388 };
389
octep_iomap_region(struct pci_dev * pdev,u8 __iomem ** tbl,u8 bar)390 static int octep_iomap_region(struct pci_dev *pdev, u8 __iomem **tbl, u8 bar)
391 {
392 int ret;
393
394 ret = pci_request_region(pdev, bar, OCTEP_VDPA_DRIVER_NAME);
395 if (ret) {
396 dev_err(&pdev->dev, "Failed to request BAR:%u region\n", bar);
397 return ret;
398 }
399
400 tbl[bar] = pci_iomap(pdev, bar, pci_resource_len(pdev, bar));
401 if (!tbl[bar]) {
402 dev_err(&pdev->dev, "Failed to iomap BAR:%u\n", bar);
403 pci_release_region(pdev, bar);
404 ret = -ENOMEM;
405 }
406
407 return ret;
408 }
409
octep_iounmap_region(struct pci_dev * pdev,u8 __iomem ** tbl,u8 bar)410 static void octep_iounmap_region(struct pci_dev *pdev, u8 __iomem **tbl, u8 bar)
411 {
412 pci_iounmap(pdev, tbl[bar]);
413 pci_release_region(pdev, bar);
414 }
415
octep_vdpa_pf_bar_shrink(struct octep_pf * octpf)416 static void octep_vdpa_pf_bar_shrink(struct octep_pf *octpf)
417 {
418 struct pci_dev *pf_dev = octpf->pdev;
419 struct resource *res = pf_dev->resource + PCI_STD_RESOURCES + 4;
420 struct pci_bus_region bus_region;
421
422 octpf->res.start = res->start;
423 octpf->res.end = res->end;
424 octpf->vf_base = res->start;
425
426 bus_region.start = res->start;
427 bus_region.end = res->start - 1;
428
429 pcibios_bus_to_resource(pf_dev->bus, res, &bus_region);
430 }
431
octep_vdpa_pf_bar_expand(struct octep_pf * octpf)432 static void octep_vdpa_pf_bar_expand(struct octep_pf *octpf)
433 {
434 struct pci_dev *pf_dev = octpf->pdev;
435 struct resource *res = pf_dev->resource + PCI_STD_RESOURCES + 4;
436 struct pci_bus_region bus_region;
437
438 bus_region.start = octpf->res.start;
439 bus_region.end = octpf->res.end;
440
441 pcibios_bus_to_resource(pf_dev->bus, res, &bus_region);
442 }
443
octep_vdpa_remove_pf(struct pci_dev * pdev)444 static void octep_vdpa_remove_pf(struct pci_dev *pdev)
445 {
446 struct octep_pf *octpf = pci_get_drvdata(pdev);
447
448 pci_disable_sriov(pdev);
449
450 if (octpf->base[OCTEP_HW_CAPS_BAR])
451 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_CAPS_BAR);
452
453 if (octpf->base[OCTEP_HW_MBOX_BAR])
454 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
455
456 octep_vdpa_pf_bar_expand(octpf);
457 }
458
octep_vdpa_vf_bar_shrink(struct pci_dev * pdev)459 static void octep_vdpa_vf_bar_shrink(struct pci_dev *pdev)
460 {
461 struct resource *vf_res = pdev->resource + PCI_STD_RESOURCES + 4;
462
463 memset(vf_res, 0, sizeof(*vf_res));
464 }
465
octep_vdpa_remove_vf(struct pci_dev * pdev)466 static void octep_vdpa_remove_vf(struct pci_dev *pdev)
467 {
468 struct octep_vdpa_mgmt_dev *mgmt_dev = pci_get_drvdata(pdev);
469 struct octep_hw *oct_hw;
470 int status;
471
472 oct_hw = &mgmt_dev->oct_hw;
473 status = atomic_read(&mgmt_dev->status);
474 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_UNINIT);
475
476 cancel_work_sync(&mgmt_dev->setup_task);
477 if (status == OCTEP_VDPA_DEV_STATUS_READY)
478 vdpa_mgmtdev_unregister(&mgmt_dev->mdev);
479
480 if (oct_hw->base[OCTEP_HW_CAPS_BAR])
481 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR);
482
483 if (oct_hw->base[OCTEP_HW_MBOX_BAR])
484 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_MBOX_BAR);
485
486 octep_vdpa_vf_bar_shrink(pdev);
487 }
488
octep_vdpa_remove(struct pci_dev * pdev)489 static void octep_vdpa_remove(struct pci_dev *pdev)
490 {
491 if (pdev->is_virtfn)
492 octep_vdpa_remove_vf(pdev);
493 else
494 octep_vdpa_remove_pf(pdev);
495 }
496
octep_vdpa_dev_add(struct vdpa_mgmt_dev * mdev,const char * name,const struct vdpa_dev_set_config * config)497 static int octep_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
498 const struct vdpa_dev_set_config *config)
499 {
500 struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(mdev, struct octep_vdpa_mgmt_dev, mdev);
501 struct octep_hw *oct_hw = &mgmt_dev->oct_hw;
502 struct pci_dev *pdev = oct_hw->pdev;
503 struct vdpa_device *vdpa_dev;
504 struct octep_vdpa *oct_vdpa;
505 u64 device_features;
506 int ret;
507
508 oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops, 1, 1,
509 NULL, false);
510 if (IS_ERR(oct_vdpa)) {
511 dev_err(&pdev->dev, "Failed to allocate vDPA structure for octep vdpa device");
512 return PTR_ERR(oct_vdpa);
513 }
514
515 oct_vdpa->pdev = pdev;
516 oct_vdpa->vdpa.dma_dev = &pdev->dev;
517 oct_vdpa->vdpa.mdev = mdev;
518 oct_vdpa->oct_hw = oct_hw;
519 vdpa_dev = &oct_vdpa->vdpa;
520
521 device_features = oct_hw->features;
522 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
523 if (config->device_features & ~device_features) {
524 dev_err(&pdev->dev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
525 config->device_features, device_features);
526 ret = -EINVAL;
527 goto vdpa_dev_put;
528 }
529 device_features &= config->device_features;
530 }
531
532 oct_hw->features = device_features;
533 dev_info(&pdev->dev, "Vdpa management device features : %llx\n", device_features);
534
535 ret = octep_verify_features(device_features);
536 if (ret) {
537 dev_warn(mdev->device,
538 "Must provision minimum features 0x%llx for this device",
539 BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) |
540 BIT_ULL(VIRTIO_F_NOTIFICATION_DATA) | BIT_ULL(VIRTIO_F_RING_PACKED));
541 goto vdpa_dev_put;
542 }
543 if (name)
544 ret = dev_set_name(&vdpa_dev->dev, "%s", name);
545 else
546 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
547
548 ret = _vdpa_register_device(&oct_vdpa->vdpa, oct_hw->nr_vring);
549 if (ret) {
550 dev_err(&pdev->dev, "Failed to register to vDPA bus");
551 goto vdpa_dev_put;
552 }
553 return 0;
554
555 vdpa_dev_put:
556 put_device(&oct_vdpa->vdpa.dev);
557 return ret;
558 }
559
octep_vdpa_dev_del(struct vdpa_mgmt_dev * mdev,struct vdpa_device * vdpa_dev)560 static void octep_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *vdpa_dev)
561 {
562 _vdpa_unregister_device(vdpa_dev);
563 }
564
565 static const struct vdpa_mgmtdev_ops octep_vdpa_mgmt_dev_ops = {
566 .dev_add = octep_vdpa_dev_add,
567 .dev_del = octep_vdpa_dev_del
568 };
569
get_device_ready_status(u8 __iomem * addr)570 static bool get_device_ready_status(u8 __iomem *addr)
571 {
572 u64 signature = readq(addr + OCTEP_VF_MBOX_DATA(0));
573
574 if (signature == OCTEP_DEV_READY_SIGNATURE) {
575 writeq(0, addr + OCTEP_VF_MBOX_DATA(0));
576 return true;
577 }
578
579 return false;
580 }
581
582 static struct virtio_device_id id_table[] = {
583 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
584 { 0 },
585 };
586
octep_vdpa_setup_task(struct work_struct * work)587 static void octep_vdpa_setup_task(struct work_struct *work)
588 {
589 struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(work, struct octep_vdpa_mgmt_dev,
590 setup_task);
591 struct pci_dev *pdev = mgmt_dev->pdev;
592 struct device *dev = &pdev->dev;
593 struct octep_hw *oct_hw;
594 unsigned long timeout;
595 u64 val;
596 int ret;
597
598 oct_hw = &mgmt_dev->oct_hw;
599
600 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_WAIT_FOR_BAR_INIT);
601
602 /* Wait for a maximum of 5 sec */
603 timeout = jiffies + msecs_to_jiffies(5000);
604 while (!time_after(jiffies, timeout)) {
605 if (get_device_ready_status(oct_hw->base[OCTEP_HW_MBOX_BAR])) {
606 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_INIT);
607 break;
608 }
609
610 if (atomic_read(&mgmt_dev->status) >= OCTEP_VDPA_DEV_STATUS_READY) {
611 dev_info(dev, "Stopping vDPA setup task.\n");
612 return;
613 }
614
615 usleep_range(1000, 1500);
616 }
617
618 if (atomic_read(&mgmt_dev->status) != OCTEP_VDPA_DEV_STATUS_INIT) {
619 dev_err(dev, "BAR initialization is timed out\n");
620 return;
621 }
622
623 ret = octep_iomap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR);
624 if (ret)
625 return;
626
627 val = readq(oct_hw->base[OCTEP_HW_MBOX_BAR] + OCTEP_VF_IN_CTRL(0));
628 oct_hw->nb_irqs = OCTEP_VF_IN_CTRL_RPVF(val);
629 if (!oct_hw->nb_irqs || oct_hw->nb_irqs > OCTEP_MAX_CB_INTR) {
630 dev_err(dev, "Invalid number of interrupts %d\n", oct_hw->nb_irqs);
631 goto unmap_region;
632 }
633
634 ret = octep_hw_caps_read(oct_hw, pdev);
635 if (ret < 0)
636 goto unmap_region;
637
638 mgmt_dev->mdev.ops = &octep_vdpa_mgmt_dev_ops;
639 mgmt_dev->mdev.id_table = id_table;
640 mgmt_dev->mdev.max_supported_vqs = oct_hw->nr_vring;
641 mgmt_dev->mdev.supported_features = oct_hw->features;
642 mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
643 mgmt_dev->mdev.device = dev;
644
645 ret = vdpa_mgmtdev_register(&mgmt_dev->mdev);
646 if (ret) {
647 dev_err(dev, "Failed to register vdpa management interface\n");
648 goto unmap_region;
649 }
650
651 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_READY);
652
653 return;
654
655 unmap_region:
656 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR);
657 oct_hw->base[OCTEP_HW_CAPS_BAR] = NULL;
658 }
659
octep_vdpa_probe_vf(struct pci_dev * pdev)660 static int octep_vdpa_probe_vf(struct pci_dev *pdev)
661 {
662 struct octep_vdpa_mgmt_dev *mgmt_dev;
663 struct device *dev = &pdev->dev;
664 int ret;
665
666 ret = pcim_enable_device(pdev);
667 if (ret) {
668 dev_err(dev, "Failed to enable device\n");
669 return ret;
670 }
671
672 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
673 if (ret) {
674 dev_err(dev, "No usable DMA configuration\n");
675 return ret;
676 }
677 pci_set_master(pdev);
678
679 mgmt_dev = devm_kzalloc(dev, sizeof(struct octep_vdpa_mgmt_dev), GFP_KERNEL);
680 if (!mgmt_dev)
681 return -ENOMEM;
682
683 ret = octep_iomap_region(pdev, mgmt_dev->oct_hw.base, OCTEP_HW_MBOX_BAR);
684 if (ret)
685 return ret;
686
687 mgmt_dev->pdev = pdev;
688 pci_set_drvdata(pdev, mgmt_dev);
689
690 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_ALLOC);
691 INIT_WORK(&mgmt_dev->setup_task, octep_vdpa_setup_task);
692 schedule_work(&mgmt_dev->setup_task);
693 dev_info(&pdev->dev, "octep vdpa mgmt device setup task is queued\n");
694
695 return 0;
696 }
697
octep_vdpa_assign_barspace(struct pci_dev * vf_dev,struct pci_dev * pf_dev,u8 idx)698 static void octep_vdpa_assign_barspace(struct pci_dev *vf_dev, struct pci_dev *pf_dev, u8 idx)
699 {
700 struct resource *vf_res = vf_dev->resource + PCI_STD_RESOURCES + 4;
701 struct resource *pf_res = pf_dev->resource + PCI_STD_RESOURCES + 4;
702 struct octep_pf *pf = pci_get_drvdata(pf_dev);
703 struct pci_bus_region bus_region;
704
705 vf_res->name = pci_name(vf_dev);
706 vf_res->flags = pf_res->flags;
707 vf_res->parent = (pf_dev->resource + PCI_STD_RESOURCES)->parent;
708
709 bus_region.start = pf->vf_base + idx * pf->vf_stride;
710 bus_region.end = bus_region.start + pf->vf_stride - 1;
711 pcibios_bus_to_resource(vf_dev->bus, vf_res, &bus_region);
712 }
713
octep_sriov_enable(struct pci_dev * pdev,int num_vfs)714 static int octep_sriov_enable(struct pci_dev *pdev, int num_vfs)
715 {
716 struct octep_pf *pf = pci_get_drvdata(pdev);
717 u8 __iomem *addr = pf->base[OCTEP_HW_MBOX_BAR];
718 struct pci_dev *vf_pdev = NULL;
719 bool done = false;
720 int index = 0;
721 int ret, i;
722
723 ret = pci_enable_sriov(pdev, num_vfs);
724 if (ret)
725 return ret;
726
727 pf->enabled_vfs = num_vfs;
728
729 while ((vf_pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, vf_pdev))) {
730 if (vf_pdev->device != pf->vf_devid)
731 continue;
732
733 octep_vdpa_assign_barspace(vf_pdev, pdev, index);
734 if (++index == num_vfs) {
735 done = true;
736 break;
737 }
738 }
739
740 if (done) {
741 for (i = 0; i < pf->enabled_vfs; i++)
742 writeq(OCTEP_DEV_READY_SIGNATURE, addr + OCTEP_PF_MBOX_DATA(i));
743 }
744
745 return num_vfs;
746 }
747
octep_sriov_disable(struct pci_dev * pdev)748 static int octep_sriov_disable(struct pci_dev *pdev)
749 {
750 struct octep_pf *pf = pci_get_drvdata(pdev);
751
752 if (!pci_num_vf(pdev))
753 return 0;
754
755 pci_disable_sriov(pdev);
756 pf->enabled_vfs = 0;
757
758 return 0;
759 }
760
octep_vdpa_sriov_configure(struct pci_dev * pdev,int num_vfs)761 static int octep_vdpa_sriov_configure(struct pci_dev *pdev, int num_vfs)
762 {
763 if (num_vfs > 0)
764 return octep_sriov_enable(pdev, num_vfs);
765 else
766 return octep_sriov_disable(pdev);
767 }
768
octep_get_vf_devid(struct pci_dev * pdev)769 static u16 octep_get_vf_devid(struct pci_dev *pdev)
770 {
771 u16 did;
772
773 switch (pdev->device) {
774 case OCTEP_VDPA_DEVID_CN106K_PF:
775 did = OCTEP_VDPA_DEVID_CN106K_VF;
776 break;
777 case OCTEP_VDPA_DEVID_CN105K_PF:
778 did = OCTEP_VDPA_DEVID_CN105K_VF;
779 break;
780 case OCTEP_VDPA_DEVID_CN103K_PF:
781 did = OCTEP_VDPA_DEVID_CN103K_VF;
782 break;
783 default:
784 did = 0xFFFF;
785 break;
786 }
787
788 return did;
789 }
790
octep_vdpa_pf_setup(struct octep_pf * octpf)791 static int octep_vdpa_pf_setup(struct octep_pf *octpf)
792 {
793 u8 __iomem *addr = octpf->base[OCTEP_HW_MBOX_BAR];
794 struct pci_dev *pdev = octpf->pdev;
795 int totalvfs;
796 size_t len;
797 u64 val;
798
799 totalvfs = pci_sriov_get_totalvfs(pdev);
800 if (unlikely(!totalvfs)) {
801 dev_info(&pdev->dev, "Total VFs are %d in PF sriov configuration\n", totalvfs);
802 return 0;
803 }
804
805 addr = octpf->base[OCTEP_HW_MBOX_BAR];
806 val = readq(addr + OCTEP_EPF_RINFO(0));
807 if (val == 0) {
808 dev_err(&pdev->dev, "Invalid device configuration\n");
809 return -EINVAL;
810 }
811
812 len = pci_resource_len(pdev, OCTEP_HW_CAPS_BAR);
813
814 octpf->vf_stride = len / totalvfs;
815 octpf->vf_devid = octep_get_vf_devid(pdev);
816
817 octep_vdpa_pf_bar_shrink(octpf);
818
819 return 0;
820 }
821
octep_vdpa_probe_pf(struct pci_dev * pdev)822 static int octep_vdpa_probe_pf(struct pci_dev *pdev)
823 {
824 struct device *dev = &pdev->dev;
825 struct octep_pf *octpf;
826 int ret;
827
828 ret = pcim_enable_device(pdev);
829 if (ret) {
830 dev_err(dev, "Failed to enable device\n");
831 return ret;
832 }
833
834 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
835 if (ret) {
836 dev_err(dev, "No usable DMA configuration\n");
837 return ret;
838 }
839 octpf = devm_kzalloc(dev, sizeof(*octpf), GFP_KERNEL);
840 if (!octpf)
841 return -ENOMEM;
842
843 ret = octep_iomap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
844 if (ret)
845 return ret;
846
847 pci_set_master(pdev);
848 pci_set_drvdata(pdev, octpf);
849 octpf->pdev = pdev;
850
851 ret = octep_vdpa_pf_setup(octpf);
852 if (ret)
853 goto unmap_region;
854
855 return 0;
856
857 unmap_region:
858 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
859 return ret;
860 }
861
octep_vdpa_probe(struct pci_dev * pdev,const struct pci_device_id * id)862 static int octep_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
863 {
864 if (pdev->is_virtfn)
865 return octep_vdpa_probe_vf(pdev);
866 else
867 return octep_vdpa_probe_pf(pdev);
868 }
869
870 static struct pci_device_id octep_pci_vdpa_map[] = {
871 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN106K_PF) },
872 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN106K_VF) },
873 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN105K_PF) },
874 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN105K_VF) },
875 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN103K_PF) },
876 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN103K_VF) },
877 { 0 },
878 };
879
880 static struct pci_driver octep_pci_vdpa = {
881 .name = OCTEP_VDPA_DRIVER_NAME,
882 .id_table = octep_pci_vdpa_map,
883 .probe = octep_vdpa_probe,
884 .remove = octep_vdpa_remove,
885 .sriov_configure = octep_vdpa_sriov_configure
886 };
887
888 module_pci_driver(octep_pci_vdpa);
889
890 MODULE_AUTHOR("Marvell");
891 MODULE_DESCRIPTION("Marvell Octeon PCIe endpoint vDPA driver");
892 MODULE_LICENSE("GPL");
893