Lines Matching +full:cfg +full:- +full:space
1 // SPDX-License-Identifier: GPL-2.0-only
20 #define MBOX_RSP_TO_ERR(val) (-(((val) & MBOX_RC_MASK) >> 2))
53 return (struct octep_mbox __iomem *)(oct_hw->dev_cfg + MBOX_OFFSET); in octep_get_mbox()
60 return readx_poll_timeout(ioread32, &mbox->sts, val, MBOX_AVAIL(val), 10, in octep_wait_for_mbox_avail()
68 return readx_poll_timeout(ioread32, &mbox->sts, val, MBOX_RSP(val), 10, in octep_wait_for_mbox_rsp()
74 iowrite16(id, &mbox->hdr.id); in octep_write_hdr()
75 iowrite16(sig, &mbox->hdr.sig); in octep_write_hdr()
80 return ioread16(&mbox->hdr.sig); in octep_read_sig()
85 iowrite32(sts, &mbox->sts); in octep_write_sts()
90 return ioread32(&mbox->sts); in octep_read_sts()
95 return ioread32(&mbox->data[word_idx]); in octep_read32_word()
100 return iowrite32(word, &mbox->data[word_idx]); in octep_write32_word()
107 struct pci_dev *pdev = oct_hw->pdev; in octep_process_mbox()
114 return -EINVAL; in octep_process_mbox()
116 /* Make sure mbox space is available */ in octep_process_mbox()
119 dev_warn(&pdev->dev, "Timeout waiting for previous mbox data to be consumed\n"); in octep_process_mbox()
137 dev_warn(&pdev->dev, "Timeout waiting for mbox : %d response\n", id); in octep_process_mbox()
143 dev_warn(&pdev->dev, "Invalid Signature from mbox : %d response\n", id); in octep_process_mbox()
144 return -EINVAL; in octep_process_mbox()
150 dev_warn(&pdev->dev, "Error while processing mbox : %d, err %d\n", id, ret); in octep_process_mbox()
163 iowrite32(1, &mbox->sts); in octep_mbox_init()
170 return -EOPNOTSUPP; in octep_verify_features()
173 return -EOPNOTSUPP; in octep_verify_features()
176 return -EOPNOTSUPP; in octep_verify_features()
183 return ioread8(&oct_hw->common_cfg->device_status); in octep_hw_get_status()
188 iowrite8(status, &oct_hw->common_cfg->device_status); in octep_hw_set_status()
196 if (readx_poll_timeout(ioread8, &oct_hw->common_cfg->device_status, val, !val, 10, in octep_hw_reset()
198 dev_warn(&oct_hw->pdev->dev, "Octeon device reset timeout\n"); in octep_hw_reset()
210 dev_warn(&oct_hw->pdev->dev, "Feature select%d write timeout\n", select); in feature_sel_write_with_timeout()
211 return -1; in feature_sel_write_with_timeout()
220 if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->device_feature_select)) in octep_hw_get_dev_features()
223 features_lo = ioread32(&oct_hw->common_cfg->device_feature); in octep_hw_get_dev_features()
225 if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->device_feature_select)) in octep_hw_get_dev_features()
228 features_hi = ioread32(&oct_hw->common_cfg->device_feature); in octep_hw_get_dev_features()
237 if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->guest_feature_select)) in octep_hw_get_drv_features()
240 features_lo = ioread32(&oct_hw->common_cfg->guest_feature); in octep_hw_get_drv_features()
242 if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->guest_feature_select)) in octep_hw_get_drv_features()
245 features_hi = ioread32(&oct_hw->common_cfg->guest_feature); in octep_hw_get_drv_features()
252 if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->guest_feature_select)) in octep_hw_set_drv_features()
255 iowrite32(features & (BIT_ULL(32) - 1), &oct_hw->common_cfg->guest_feature); in octep_hw_set_drv_features()
257 if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->guest_feature_select)) in octep_hw_set_drv_features()
260 iowrite32(features >> 32, &oct_hw->common_cfg->guest_feature); in octep_hw_set_drv_features()
267 iowrite16(queue_id | BIT(QUEUE_SEL_ACK_BIT), &oct_hw->common_cfg->queue_select); in octep_write_queue_select()
269 if (readx_poll_timeout(ioread16, &oct_hw->common_cfg->queue_select, val, val == queue_id, in octep_write_queue_select()
271 dev_warn(&oct_hw->pdev->dev, "Queue select write timeout\n"); in octep_write_queue_select()
278 iowrite16(qid, oct_hw->vqs[qid].notify_addr); in octep_notify_queue()
286 if (WARN_ON(offset + length > oct_hw->config_size)) in octep_read_dev_config()
290 old_gen = ioread8(&oct_hw->common_cfg->config_generation); in octep_read_dev_config()
293 *p++ = ioread8(oct_hw->dev_cfg + offset + i); in octep_read_dev_config()
295 new_gen = ioread8(&oct_hw->common_cfg->config_generation); in octep_read_dev_config()
302 struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg; in octep_set_vq_address() local
305 vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo, in octep_set_vq_address()
306 &cfg->queue_desc_hi); in octep_set_vq_address()
307 vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo, in octep_set_vq_address()
308 &cfg->queue_avail_hi); in octep_set_vq_address()
309 vp_iowrite64_twopart(device_area, &cfg->queue_used_lo, in octep_set_vq_address()
310 &cfg->queue_used_hi); in octep_set_vq_address()
332 struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg; in octep_set_vq_num() local
335 iowrite16(num, &cfg->queue_size); in octep_set_vq_num()
340 struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg; in octep_set_vq_ready() local
343 iowrite16(ready, &cfg->queue_enable); in octep_set_vq_ready()
348 struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg; in octep_get_vq_ready() local
351 return ioread16(&cfg->queue_enable); in octep_get_vq_ready()
357 return ioread16(&oct_hw->common_cfg->queue_size); in octep_get_vq_size()
362 switch (oct_hw->dev_id) { in octep_get_config_size()
374 struct device *dev = &oct_hw->pdev->dev; in octep_get_cap_addr()
375 u32 length = le32_to_cpu(cap->length); in octep_get_cap_addr()
376 u32 offset = le32_to_cpu(cap->offset); in octep_get_cap_addr()
377 u8 bar = cap->bar; in octep_get_cap_addr()
389 len = pci_resource_len(oct_hw->pdev, bar); in octep_get_cap_addr()
391 dev_err(dev, "invalid cap: overflows bar space: %u > %u\n", in octep_get_cap_addr()
395 return oct_hw->base[bar] + offset; in octep_get_cap_addr()
398 /* In Octeon DPU device, the virtio config space is completely
404 u8 __iomem *bar = oct_hw->base[OCTEP_HW_CAPS_BAR]; in octep_pci_caps_read()
419 return -1; in octep_pci_signature_verify()
422 return -1; in octep_pci_signature_verify()
432 switch (vndr_data->id) { in octep_vndr_data_process()
434 oct_hw->dev_id = (u8)vndr_data->data; in octep_vndr_data_process()
437 dev_err(&oct_hw->pdev->dev, "Invalid vendor data id %u\n", in octep_vndr_data_process()
438 vndr_data->id); in octep_vndr_data_process()
447 struct device *dev = &pdev->dev; in octep_hw_caps_read()
453 oct_hw->pdev = pdev; in octep_hw_caps_read()
457 return -EIO; in octep_hw_caps_read()
472 dev_info(dev, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u\n", in octep_hw_caps_read()
477 oct_hw->common_cfg = octep_get_cap_addr(oct_hw, &cap); in octep_hw_caps_read()
480 octep_pci_caps_read(oct_hw, &oct_hw->notify_off_multiplier, in octep_hw_caps_read()
483 oct_hw->notify_base = octep_get_cap_addr(oct_hw, &cap); in octep_hw_caps_read()
484 oct_hw->notify_bar = cap.bar; in octep_hw_caps_read()
485 oct_hw->notify_base_pa = pci_resource_start(pdev, cap.bar) + in octep_hw_caps_read()
489 oct_hw->dev_cfg = octep_get_cap_addr(oct_hw, &cap); in octep_hw_caps_read()
492 oct_hw->isr = octep_get_cap_addr(oct_hw, &cap); in octep_hw_caps_read()
498 return -EINVAL; in octep_hw_caps_read()
507 if (!oct_hw->common_cfg || !oct_hw->notify_base || in octep_hw_caps_read()
508 !oct_hw->dev_cfg || !oct_hw->isr) { in octep_hw_caps_read()
510 return -EIO; in octep_hw_caps_read()
512 dev_info(dev, "common cfg mapped at: %p\n", oct_hw->common_cfg); in octep_hw_caps_read()
513 dev_info(dev, "device cfg mapped at: %p\n", oct_hw->dev_cfg); in octep_hw_caps_read()
514 dev_info(dev, "isr cfg mapped at: %p\n", oct_hw->isr); in octep_hw_caps_read()
516 oct_hw->notify_base, oct_hw->notify_off_multiplier); in octep_hw_caps_read()
518 oct_hw->config_size = octep_get_config_size(oct_hw); in octep_hw_caps_read()
519 oct_hw->features = octep_hw_get_dev_features(oct_hw); in octep_hw_caps_read()
521 ret = octep_verify_features(oct_hw->features); in octep_hw_caps_read()
523 dev_err(&pdev->dev, "Couldn't read features from the device FW\n"); in octep_hw_caps_read()
526 oct_hw->nr_vring = vp_ioread16(&oct_hw->common_cfg->num_queues); in octep_hw_caps_read()
528 oct_hw->vqs = devm_kcalloc(&pdev->dev, oct_hw->nr_vring, sizeof(*oct_hw->vqs), GFP_KERNEL); in octep_hw_caps_read()
529 if (!oct_hw->vqs) in octep_hw_caps_read()
530 return -ENOMEM; in octep_hw_caps_read()
532 dev_info(&pdev->dev, "Device features : %llx\n", oct_hw->features); in octep_hw_caps_read()
533 dev_info(&pdev->dev, "Maximum queues : %u\n", oct_hw->nr_vring); in octep_hw_caps_read()
535 for (i = 0; i < oct_hw->nr_vring; i++) { in octep_hw_caps_read()
537 notify_off = vp_ioread16(&oct_hw->common_cfg->queue_notify_off); in octep_hw_caps_read()
538 oct_hw->vqs[i].notify_addr = oct_hw->notify_base + in octep_hw_caps_read()
539 notify_off * oct_hw->notify_off_multiplier; in octep_hw_caps_read()
540 oct_hw->vqs[i].cb_notify_addr = (u32 __iomem *)oct_hw->vqs[i].notify_addr + 1; in octep_hw_caps_read()
541 oct_hw->vqs[i].notify_pa = oct_hw->notify_base_pa + in octep_hw_caps_read()
542 notify_off * oct_hw->notify_off_multiplier; in octep_hw_caps_read()