Lines Matching +full:free +full:- +full:flowing
1 // SPDX-License-Identifier: GPL-2.0
6 * Copyright (C) 2020-2022 OpenSynergy.
7 * Copyright (C) 2021-2024 ARM Ltd.
13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
36 ((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD)
40 * struct scmi_vio_channel - Transport channel information
49 * @pending_cmds_list: List of pre-fetched commands queueud for later processing
60 /* lock to protect access to the free list. */
87 * struct scmi_vio_msg - Transport PDU information
124 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_ready()
125 cinfo->transport_info = vioch; in scmi_vio_channel_ready()
127 vioch->cinfo = cinfo; in scmi_vio_channel_ready()
128 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_ready()
130 refcount_set(&vioch->users, 1); in scmi_vio_channel_ready()
135 return refcount_inc_not_zero(&vioch->users); in scmi_vio_channel_acquire()
140 if (refcount_dec_and_test(&vioch->users)) { in scmi_vio_channel_release()
143 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_release()
144 if (vioch->shutdown_done) { in scmi_vio_channel_release()
145 vioch->cinfo = NULL; in scmi_vio_channel_release()
146 complete(vioch->shutdown_done); in scmi_vio_channel_release()
148 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_release()
161 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
162 if (!vioch->cinfo || vioch->shutdown_done) { in scmi_vio_channel_cleanup_sync()
163 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
167 vioch->shutdown_done = &vioch_shutdown_done; in scmi_vio_channel_cleanup_sync()
168 if (!vioch->is_rx && vioch->deferred_tx_wq) in scmi_vio_channel_cleanup_sync()
170 vioch->deferred_tx_wq = NULL; in scmi_vio_channel_cleanup_sync()
171 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_channel_cleanup_sync()
176 wait_for_completion(vioch->shutdown_done); in scmi_vio_channel_cleanup_sync()
186 spin_lock_irqsave(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
187 if (list_empty(&vioch->free_list)) { in scmi_virtio_get_free_msg()
188 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
192 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); in scmi_virtio_get_free_msg()
193 list_del_init(&msg->list); in scmi_virtio_get_free_msg()
194 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_virtio_get_free_msg()
197 msg->poll_status = VIO_MSG_NOT_POLLED; in scmi_virtio_get_free_msg()
198 refcount_set(&msg->users, 1); in scmi_virtio_get_free_msg()
205 return refcount_inc_not_zero(&msg->users); in scmi_vio_msg_acquire()
214 ret = refcount_dec_and_test(&msg->users); in scmi_vio_msg_release()
218 spin_lock_irqsave(&vioch->free_lock, flags); in scmi_vio_msg_release()
219 list_add_tail(&msg->list, &vioch->free_list); in scmi_vio_msg_release()
220 spin_unlock_irqrestore(&vioch->free_lock, flags); in scmi_vio_msg_release()
237 struct device *dev = &vioch->vqueue->vdev->dev; in scmi_vio_feed_vq_rx()
239 sg_init_one(&sg_in, msg->input, msg->max_len); in scmi_vio_feed_vq_rx()
241 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
243 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); in scmi_vio_feed_vq_rx()
247 virtqueue_kick(vioch->vqueue); in scmi_vio_feed_vq_rx()
249 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_feed_vq_rx()
256 * vioch->lock MUST NOT have been already acquired.
261 if (vioch->is_rx) in scmi_finalize_message()
275 if (WARN_ON_ONCE(!vqueue->vdev->priv)) in scmi_vio_complete_cb()
277 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; in scmi_vio_complete_cb()
283 spin_lock_irqsave(&vioch->lock, flags); in scmi_vio_complete_cb()
292 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_complete_cb()
298 spin_unlock_irqrestore(&vioch->lock, flags); in scmi_vio_complete_cb()
301 msg->rx_len = length; in scmi_vio_complete_cb()
302 core->rx_callback(vioch->cinfo, in scmi_vio_complete_cb()
303 core->msg->read_header(msg->input), in scmi_vio_complete_cb()
332 * Process pre-fetched messages: these could be non-polled messages or in scmi_vio_deferred_tx_worker()
333 * late timed-out replies to polled messages dequeued by chance while in scmi_vio_deferred_tx_worker()
335 * the valid non-expired messages and anyway finally free all of them. in scmi_vio_deferred_tx_worker()
337 spin_lock_irqsave(&vioch->pending_lock, flags); in scmi_vio_deferred_tx_worker()
339 /* Scan the list of possibly pre-fetched messages during polling. */ in scmi_vio_deferred_tx_worker()
340 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { in scmi_vio_deferred_tx_worker()
341 list_del(&msg->list); in scmi_vio_deferred_tx_worker()
347 if (msg->poll_status == VIO_MSG_NOT_POLLED) in scmi_vio_deferred_tx_worker()
348 core->rx_callback(vioch->cinfo, in scmi_vio_deferred_tx_worker()
349 core->msg->read_header(msg->input), in scmi_vio_deferred_tx_worker()
352 /* Free the processed message once done */ in scmi_vio_deferred_tx_worker()
356 spin_unlock_irqrestore(&vioch->pending_lock, flags); in scmi_vio_deferred_tx_worker()
359 scmi_vio_complete_cb(vioch->vqueue); in scmi_vio_deferred_tx_worker()
371 struct scmi_vio_channel *vioch = base_cinfo->transport_info; in virtio_get_max_msg()
373 return vioch->max_msg; in virtio_get_max_msg()
383 channels = (struct scmi_vio_channel *)scmi_vdev->priv; in virtio_chan_available()
397 return vioch && !vioch->cinfo; in virtio_chan_available()
413 return -EPROBE_DEFER; in virtio_chan_setup()
415 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; in virtio_chan_setup()
418 if (tx && !vioch->deferred_tx_wq) { in virtio_chan_setup()
421 vioch->deferred_tx_wq = in virtio_chan_setup()
422 alloc_workqueue(dev_name(&scmi_vdev->dev), in virtio_chan_setup()
425 if (!vioch->deferred_tx_wq) in virtio_chan_setup()
426 return -ENOMEM; in virtio_chan_setup()
429 vioch->deferred_tx_wq); in virtio_chan_setup()
433 INIT_WORK(&vioch->deferred_tx_work, in virtio_chan_setup()
437 for (i = 0; i < vioch->max_msg; i++) { in virtio_chan_setup()
442 return -ENOMEM; in virtio_chan_setup()
444 msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo); in virtio_chan_setup()
446 msg->request = devm_kzalloc(dev, msg->max_len, in virtio_chan_setup()
448 if (!msg->request) in virtio_chan_setup()
449 return -ENOMEM; in virtio_chan_setup()
450 spin_lock_init(&msg->poll_lock); in virtio_chan_setup()
451 refcount_set(&msg->users, 1); in virtio_chan_setup()
454 msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL); in virtio_chan_setup()
455 if (!msg->input) in virtio_chan_setup()
456 return -ENOMEM; in virtio_chan_setup()
469 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_chan_free()
472 * Break device to inhibit further traffic flowing while shutting down in virtio_chan_free()
473 * the channels: doing it later holding vioch->lock creates unsafe in virtio_chan_free()
476 virtio_break_device(vioch->vqueue->vdev); in virtio_chan_free()
485 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_send_message()
494 return -EINVAL; in virtio_send_message()
499 return -EBUSY; in virtio_send_message()
502 core->msg->tx_prepare(msg->request, xfer); in virtio_send_message()
504 sg_init_one(&sg_out, msg->request, core->msg->command_size(xfer)); in virtio_send_message()
505 sg_init_one(&sg_in, msg->input, core->msg->response_size(xfer)); in virtio_send_message()
507 spin_lock_irqsave(&vioch->lock, flags); in virtio_send_message()
511 * - retrieve last used index (will be used as polling reference) in virtio_send_message()
512 * - bind the polled message to the xfer via .priv in virtio_send_message()
513 * - grab an additional msg refcount for the poll-path in virtio_send_message()
515 if (xfer->hdr.poll_completion) { in virtio_send_message()
516 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); in virtio_send_message()
518 msg->poll_status = VIO_MSG_POLLING; in virtio_send_message()
521 smp_store_mb(xfer->priv, msg); in virtio_send_message()
524 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); in virtio_send_message()
526 dev_err(vioch->cinfo->dev, in virtio_send_message()
529 virtqueue_kick(vioch->vqueue); in virtio_send_message()
531 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_send_message()
534 /* Ensure order between xfer->priv clear and vq feeding */ in virtio_send_message()
535 smp_store_mb(xfer->priv, NULL); in virtio_send_message()
536 if (xfer->hdr.poll_completion) in virtio_send_message()
549 struct scmi_vio_msg *msg = xfer->priv; in virtio_fetch_response()
552 core->msg->fetch_response(msg->input, msg->rx_len, xfer); in virtio_fetch_response()
558 struct scmi_vio_msg *msg = xfer->priv; in virtio_fetch_notification()
561 core->msg->fetch_notification(msg->input, msg->rx_len, in virtio_fetch_notification()
566 * virtio_mark_txdone - Mark transmission done
568 * Free only completed polling transfer messages.
571 * outstanding but timed-out messages by forcibly re-adding them to the
572 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
576 * This approach was deemed preferable since those pending timed-out buffers are
584 * For this same reason, here, we take care to free only the polled messages
586 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
587 * any timed-out polled message if that indeed appears to have been at least
592 * Possible late replies to timed-out polled messages will be eventually freed
604 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_mark_txdone()
605 struct scmi_vio_msg *msg = xfer->priv; in virtio_mark_txdone()
611 smp_store_mb(xfer->priv, NULL); in virtio_mark_txdone()
614 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { in virtio_mark_txdone()
619 spin_lock_irqsave(&msg->poll_lock, flags); in virtio_mark_txdone()
620 /* Do not free timedout polled messages only if still inflight */ in virtio_mark_txdone()
621 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) in virtio_mark_txdone()
623 else if (msg->poll_status == VIO_MSG_POLLING) in virtio_mark_txdone()
624 msg->poll_status = VIO_MSG_POLL_TIMEOUT; in virtio_mark_txdone()
625 spin_unlock_irqrestore(&msg->poll_lock, flags); in virtio_mark_txdone()
631 * virtio_poll_done - Provide polling support for VirtIO transport
643 * we were poll-waiting for: if that is the case such early fetched buffers are
647 * So, basically, once something new is spotted we proceed to de-queue all the
653 * busy-waiting helper.
655 * Finally, we delegate to the deferred worker also the final free of any timed
658 * Note that, since we do NOT have per-message suppress notification mechanism,
673 struct scmi_vio_msg *next_msg, *msg = xfer->priv; in virtio_poll_done()
674 struct scmi_vio_channel *vioch = cinfo->transport_info; in virtio_poll_done()
694 if (msg->poll_status == VIO_MSG_POLL_DONE) in virtio_poll_done()
701 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); in virtio_poll_done()
707 spin_lock_irqsave(&vioch->lock, flags); in virtio_poll_done()
708 virtqueue_disable_cb(vioch->vqueue); in virtio_poll_done()
711 * Process all new messages till the polled-for message is found OR in virtio_poll_done()
714 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { in virtio_poll_done()
721 spin_lock(&next_msg->poll_lock); in virtio_poll_done()
722 if (next_msg->poll_status == VIO_MSG_POLLING) { in virtio_poll_done()
723 next_msg->poll_status = VIO_MSG_POLL_DONE; in virtio_poll_done()
726 spin_unlock(&next_msg->poll_lock); in virtio_poll_done()
728 next_msg->rx_len = length; in virtio_poll_done()
739 * Enqueue for later processing any non-polled message and any in virtio_poll_done()
740 * timed-out polled one that we happen to have dequeued. in virtio_poll_done()
742 spin_lock(&next_msg->poll_lock); in virtio_poll_done()
743 if (next_msg->poll_status == VIO_MSG_NOT_POLLED || in virtio_poll_done()
744 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { in virtio_poll_done()
745 spin_unlock(&next_msg->poll_lock); in virtio_poll_done()
748 spin_lock(&vioch->pending_lock); in virtio_poll_done()
749 list_add_tail(&next_msg->list, in virtio_poll_done()
750 &vioch->pending_cmds_list); in virtio_poll_done()
751 spin_unlock(&vioch->pending_lock); in virtio_poll_done()
753 spin_unlock(&next_msg->poll_lock); in virtio_poll_done()
766 pending = !virtqueue_enable_cb(vioch->vqueue); in virtio_poll_done()
768 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); in virtio_poll_done()
769 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); in virtio_poll_done()
772 if (vioch->deferred_tx_wq && (any_prefetched || pending)) in virtio_poll_done()
773 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); in virtio_poll_done()
775 spin_unlock_irqrestore(&vioch->lock, flags); in virtio_poll_done()
796 /* for non-realtime virtio devices */
804 { .compatible = "arm,scmi-virtio" },
813 struct device *dev = &vdev->dev; in scmi_vio_probe()
825 return -EBUSY; in scmi_vio_probe()
833 return -ENOMEM; in scmi_vio_probe()
869 vdev->priv = channels; in scmi_vio_probe()
876 vdev->priv = NULL; in scmi_vio_probe()
877 vdev->config->del_vqs(vdev); in scmi_vio_probe()
899 vdev->config->del_vqs(vdev); in scmi_vio_remove()
908 dev_err(&vdev->dev, in scmi_vio_validate()
910 return -EINVAL; in scmi_vio_validate()
927 .driver.name = "scmi-virtio",