Lines Matching full:cmdq
374 struct arm_smmu_cmdq *cmdq = NULL; in arm_smmu_get_cmdq() local
377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); in arm_smmu_get_cmdq()
379 return cmdq ?: &smmu->cmdq; in arm_smmu_get_cmdq()
383 struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_needs_busy_polling() argument
385 if (cmdq == &smmu->cmdq) in arm_smmu_cmdq_needs_busy_polling()
392 struct arm_smmu_cmdq *cmdq, u32 prod) in arm_smmu_cmdq_build_sync_cmd() argument
394 struct arm_smmu_queue *q = &cmdq->q; in arm_smmu_cmdq_build_sync_cmd()
409 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) in arm_smmu_cmdq_build_sync_cmd()
414 struct arm_smmu_cmdq *cmdq) in __arm_smmu_cmdq_skip_err() argument
422 struct arm_smmu_queue *q = &cmdq->q; in __arm_smmu_cmdq_skip_err()
432 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in __arm_smmu_cmdq_skip_err()
456 * not to touch any of the shadow cmdq state. in __arm_smmu_cmdq_skip_err()
465 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) in __arm_smmu_cmdq_skip_err()
473 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq); in arm_smmu_cmdq_skip_err()
488 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument
498 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
502 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
503 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
506 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument
508 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
511 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_tryunlock() argument
513 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
516 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_shared_tryunlock()
520 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ argument
524 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
530 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ argument
532 atomic_set_release(&cmdq->lock, 0); \
576 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_set_valid_map() argument
581 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
596 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
623 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_set_valid_map() argument
626 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); in arm_smmu_cmdq_set_valid_map()
630 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_valid_map() argument
633 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); in arm_smmu_cmdq_poll_valid_map()
638 struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_until_not_full() argument
646 * Try to update our copy of cons by grabbing exclusive cmdq access. If in arm_smmu_cmdq_poll_until_not_full()
649 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { in arm_smmu_cmdq_poll_until_not_full()
650 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
651 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); in arm_smmu_cmdq_poll_until_not_full()
652 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
658 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
670 * Must be called with the cmdq lock held in some capacity.
673 struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_until_msi() argument
678 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
694 * Must be called with the cmdq lock held in some capacity.
697 struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_until_consumed() argument
705 llq->val = READ_ONCE(cmdq->q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
720 * cmdq->q.llq.cons. Roughly speaking: in __arm_smmu_cmdq_poll_until_consumed()
740 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
747 struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_until_sync() argument
751 !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) in arm_smmu_cmdq_poll_until_sync()
752 return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq); in arm_smmu_cmdq_poll_until_sync()
754 return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq); in arm_smmu_cmdq_poll_until_sync()
757 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, in arm_smmu_cmdq_write_entries() argument
762 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
770 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_write_entries()
791 struct arm_smmu_cmdq *cmdq, u64 *cmds, int n, in arm_smmu_cmdq_issue_cmdlist() argument
801 llq.max_n_shift = cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_issue_cmdlist()
805 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
811 if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq)) in arm_smmu_cmdq_issue_cmdlist()
812 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); in arm_smmu_cmdq_issue_cmdlist()
820 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
834 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); in arm_smmu_cmdq_issue_cmdlist()
837 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod); in arm_smmu_cmdq_issue_cmdlist()
838 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_issue_cmdlist()
843 * We achieve that by taking the cmdq lock as shared before in arm_smmu_cmdq_issue_cmdlist()
846 arm_smmu_cmdq_shared_lock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
851 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); in arm_smmu_cmdq_issue_cmdlist()
856 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
860 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
868 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); in arm_smmu_cmdq_issue_cmdlist()
874 writel_relaxed(prod, cmdq->q.prod_reg); in arm_smmu_cmdq_issue_cmdlist()
881 atomic_set_release(&cmdq->owner_prod, prod); in arm_smmu_cmdq_issue_cmdlist()
887 ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq); in arm_smmu_cmdq_issue_cmdlist()
892 readl_relaxed(cmdq->q.prod_reg), in arm_smmu_cmdq_issue_cmdlist()
893 readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_issue_cmdlist()
897 * Try to unlock the cmdq lock. This will fail if we're the last in arm_smmu_cmdq_issue_cmdlist()
898 * reader, in which case we can safely update cmdq->q.llq.cons in arm_smmu_cmdq_issue_cmdlist()
900 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { in arm_smmu_cmdq_issue_cmdlist()
901 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
902 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
917 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in __arm_smmu_cmdq_issue_cmd()
943 cmds->cmdq = arm_smmu_get_cmdq(smmu, ent); in arm_smmu_cmdq_batch_init()
950 bool unsupported_cmd = !arm_smmu_cmdq_supports_cmd(cmds->cmdq, cmd); in arm_smmu_cmdq_batch_add()
956 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, in arm_smmu_cmdq_batch_add()
962 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, in arm_smmu_cmdq_batch_add()
969 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in arm_smmu_cmdq_batch_add()
980 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, in arm_smmu_cmdq_batch_submit()
2058 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); in arm_smmu_gerror_handler()
3707 struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_init() argument
3709 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
3711 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init()
3712 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
3714 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, in arm_smmu_cmdq_init()
3716 if (!cmdq->valid_map) in arm_smmu_cmdq_init()
3726 /* cmdq */ in arm_smmu_init_queues()
3727 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base, in arm_smmu_init_queues()
3729 CMDQ_ENT_DWORDS, "cmdq"); in arm_smmu_init_queues()
3733 ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq); in arm_smmu_init_queues()
3927 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ in arm_smmu_setup_msis()
4102 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
4103 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
4104 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
4373 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
4375 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()