Lines Matching +full:vp +full:- +full:p
1 // SPDX-License-Identifier: GPL-2.0-or-later
30 #include <asm/xive-regs.h>
34 #include "xive-internal.h"
60 return -EINVAL; in xive_native_populate_irq_data()
65 data->flags |= XIVE_IRQ_FLAG_STORE_EOI; in xive_native_populate_irq_data()
67 data->flags |= XIVE_IRQ_FLAG_STORE_EOI; in xive_native_populate_irq_data()
69 data->flags |= XIVE_IRQ_FLAG_LSI; in xive_native_populate_irq_data()
70 data->eoi_page = be64_to_cpu(eoi_page); in xive_native_populate_irq_data()
71 data->trig_page = be64_to_cpu(trig_page); in xive_native_populate_irq_data()
72 data->esb_shift = be32_to_cpu(esb_shift); in xive_native_populate_irq_data()
73 data->src_chip = be32_to_cpu(src_chip); in xive_native_populate_irq_data()
75 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); in xive_native_populate_irq_data()
76 if (!data->eoi_mmio) { in xive_native_populate_irq_data()
78 return -ENOMEM; in xive_native_populate_irq_data()
81 data->hw_irq = hw_irq; in xive_native_populate_irq_data()
83 if (!data->trig_page) in xive_native_populate_irq_data()
85 if (data->trig_page == data->eoi_page) { in xive_native_populate_irq_data()
86 data->trig_mmio = data->eoi_mmio; in xive_native_populate_irq_data()
90 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); in xive_native_populate_irq_data()
91 if (!data->trig_mmio) { in xive_native_populate_irq_data()
93 return -ENOMEM; in xive_native_populate_irq_data()
109 return rc == 0 ? 0 : -ENXIO; in xive_native_configure_irq()
117 __be64 vp; in xive_native_get_irq_config() local
120 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq); in xive_native_get_irq_config()
122 *target = be64_to_cpu(vp); in xive_native_get_irq_config()
125 return rc == 0 ? 0 : -ENXIO; in xive_native_get_irq_config()
128 #define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__) argument
142 return -EINVAL; in xive_native_configure_queue()
148 q->msk = order ? ((1u << (order - 2)) - 1) : 0; in xive_native_configure_queue()
149 q->idx = 0; in xive_native_configure_queue()
150 q->toggle = 0; in xive_native_configure_queue()
158 rc = -EIO; in xive_native_configure_queue()
161 q->eoi_phys = be64_to_cpu(qeoi_page_be); in xive_native_configure_queue()
168 q->esc_irq = be32_to_cpu(esc_irq_be); in xive_native_configure_queue()
181 rc = -EIO; in xive_native_configure_queue()
185 * q->qpage is set due to how it manages IPI EOIs in xive_native_configure_queue()
188 q->qpage = qpage; in xive_native_configure_queue()
218 struct xive_q *q = &xc->queue[prio]; in xive_native_setup_queue()
231 struct xive_q *q = &xc->queue[prio]; in xive_native_cleanup_queue()
240 free_pages((unsigned long)q->qpage, alloc_order); in xive_native_cleanup_queue()
241 q->qpage = NULL; in xive_native_cleanup_queue()
246 return of_device_is_compatible(node, "ibm,opal-xive-vc"); in xive_native_match()
267 irq = opal_xive_allocate_irq(xc->chip_id); in xive_native_get_ipi()
274 return -ENXIO; in xive_native_get_ipi()
276 xc->hw_ipi = irq; in xive_native_get_ipi()
316 if (xc->hw_ipi == XIVE_BAD_IRQ) in xive_native_put_ipi()
319 rc = opal_xive_free_irq(xc->hw_ipi); in xive_native_put_ipi()
324 xc->hw_ipi = XIVE_BAD_IRQ; in xive_native_put_ipi()
365 xc->pending_prio |= 1 << cppr; in xive_native_update_pending()
371 if (cppr >= xc->cppr) in xive_native_update_pending()
373 smp_processor_id(), cppr, xc->cppr); in xive_native_update_pending()
376 xc->cppr = cppr; in xive_native_update_pending()
388 xc->chip_id = cpu_to_chip_id(cpu); in xive_native_prepare_cpu()
394 u32 vp; in xive_native_setup_cpu() local
401 /* Check if pool VP already active, if it is, pull it */ in xive_native_setup_cpu()
405 /* Enable the pool VP */ in xive_native_setup_cpu()
406 vp = xive_pool_vps + cpu; in xive_native_setup_cpu()
408 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0); in xive_native_setup_cpu()
414 pr_err("Failed to enable pool VP on CPU %d\n", cpu); in xive_native_setup_cpu()
419 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL); in xive_native_setup_cpu()
421 pr_err("Failed to get pool VP info CPU %d\n", cpu); in xive_native_setup_cpu()
434 u32 vp; in xive_native_teardown_cpu() local
439 /* Pull the pool VP from the CPU */ in xive_native_teardown_cpu()
443 vp = xive_pool_vps + cpu; in xive_native_teardown_cpu()
445 rc = opal_xive_set_vp_info(vp, 0, 0); in xive_native_teardown_cpu()
467 debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore); in xive_native_debug_create()
499 if (of_property_read_u32(np, "ibm,xive-provision-page-size", in xive_parse_provisioning()
502 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4); in xive_parse_provisioning()
516 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips", in xive_parse_provisioning()
524 xive_provision_cache = kmem_cache_create("xive-provision", in xive_parse_provisioning()
538 pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids); in xive_native_setup_pools()
542 pr_err("Failed to allocate pool VP, KVM might not function\n"); in xive_native_setup_pools()
570 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe"); in xive_native_init()
589 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0) in xive_native_init()
590 max_prio = val - 1; in xive_native_init()
593 of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) { in xive_native_init()
600 xive_has_single_esc = of_property_read_bool(np, "single-escalation-support"); in xive_native_init()
602 xive_has_save_restore = of_property_read_bool(np, "vp-save-restore"); in xive_native_init()
636 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); in xive_native_init()
647 void *p; in xive_native_provision_pages() local
656 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL); in xive_native_provision_pages()
657 if (!p) { in xive_native_provision_pages()
661 kmemleak_ignore(p); in xive_native_provision_pages()
662 opal_xive_donate_page(chip, __pa(p)); in xive_native_provision_pages()
672 order = fls(max_vcpus) - 1; in xive_native_alloc_vp_block()
676 pr_debug("VP block alloc, for max VCPUs %d use order %d\n", in xive_native_alloc_vp_block()
710 pr_warn("OPAL error %lld freeing VP block\n", rc); in xive_native_free_vp_block()
728 vp_err(vp_id, "Failed to enable VP : %lld\n", rc); in xive_native_enable_vp()
729 return rc ? -EIO : 0; in xive_native_enable_vp()
744 vp_err(vp_id, "Failed to disable VP : %lld\n", rc); in xive_native_disable_vp()
745 return rc ? -EIO : 0; in xive_native_disable_vp()
757 vp_err(vp_id, "Failed to get VP info : %lld\n", rc); in xive_native_get_vp_info()
758 return -EIO; in xive_native_get_vp_info()
797 return -EIO; in xive_native_get_queue_info()
825 return -EIO; in xive_native_get_queue_state()
844 return -EIO; in xive_native_set_queue_state()
865 vp_err(vp_id, "failed to get vp state : %lld\n", rc); in xive_native_get_vp_state()
866 return -EIO; in xive_native_get_vp_state()