Lines Matching full:qc

689 static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)  in ata_set_tf_cdl()  argument
691 struct ata_taskfile *tf = &qc->tf; in ata_set_tf_cdl()
703 qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF; in ata_set_tf_cdl()
708 * @qc: Metadata associated with the taskfile to build
718 * Build ATA taskfile for the command @qc for read/write request described
726 int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block, in ata_build_rw_tf() argument
729 struct ata_taskfile *tf = &qc->tf; in ata_build_rw_tf()
730 struct ata_device *dev = qc->dev; in ata_build_rw_tf()
748 tf->nsect = qc->hw_tag << 3; in ata_build_rw_tf()
768 ata_set_tf_cdl(qc, cdl); in ata_build_rw_tf()
774 ata_set_tf_cdl(qc, cdl); in ata_build_rw_tf()
778 !(qc->flags & ATA_QCFLAG_HAS_CDL) && in ata_build_rw_tf()
1493 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) in ata_qc_complete_internal() argument
1495 struct completion *waiting = qc->private_data; in ata_qc_complete_internal()
1530 struct ata_queued_cmd *qc; in ata_exec_internal() local
1553 /* Initialize internal qc */ in ata_exec_internal()
1554 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL); in ata_exec_internal()
1556 qc->tag = ATA_TAG_INTERNAL; in ata_exec_internal()
1557 qc->hw_tag = 0; in ata_exec_internal()
1558 qc->scsicmd = NULL; in ata_exec_internal()
1559 qc->ap = ap; in ata_exec_internal()
1560 qc->dev = dev; in ata_exec_internal()
1561 ata_qc_reinit(qc); in ata_exec_internal()
1572 /* Prepare and issue qc */ in ata_exec_internal()
1573 qc->tf = *tf; in ata_exec_internal()
1575 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); in ata_exec_internal()
1580 qc->tf.feature |= ATAPI_DMADIR; in ata_exec_internal()
1582 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_exec_internal()
1583 qc->dma_dir = dma_dir; in ata_exec_internal()
1586 ata_sg_init(qc, &sgl, 1); in ata_exec_internal()
1587 qc->nbytes = buflen; in ata_exec_internal()
1590 qc->private_data = &wait; in ata_exec_internal()
1591 qc->complete_fn = ata_qc_complete_internal; in ata_exec_internal()
1593 ata_qc_issue(qc); in ata_exec_internal()
1617 * prevents us from completing the qc twice. If we win, the port in ata_exec_internal()
1621 if (qc->flags & ATA_QCFLAG_ACTIVE) { in ata_exec_internal()
1622 qc->err_mask |= AC_ERR_TIMEOUT; in ata_exec_internal()
1624 ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n", in ata_exec_internal()
1631 ap->ops->post_internal_cmd(qc); in ata_exec_internal()
1634 if (qc->flags & ATA_QCFLAG_EH) { in ata_exec_internal()
1635 if (qc->result_tf.status & (ATA_ERR | ATA_DF)) in ata_exec_internal()
1636 qc->err_mask |= AC_ERR_DEV; in ata_exec_internal()
1638 if (!qc->err_mask) in ata_exec_internal()
1639 qc->err_mask |= AC_ERR_OTHER; in ata_exec_internal()
1641 if (qc->err_mask & ~AC_ERR_OTHER) in ata_exec_internal()
1642 qc->err_mask &= ~AC_ERR_OTHER; in ata_exec_internal()
1643 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { in ata_exec_internal()
1644 qc->result_tf.status |= ATA_SENSE; in ata_exec_internal()
1650 *tf = qc->result_tf; in ata_exec_internal()
1651 err_mask = qc->err_mask; in ata_exec_internal()
1653 ata_qc_free(qc); in ata_exec_internal()
4565 * @qc: Metadata associated with taskfile to check
4577 int atapi_check_dma(struct ata_queued_cmd *qc) in atapi_check_dma() argument
4579 struct ata_port *ap = qc->ap; in atapi_check_dma()
4584 if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) && in atapi_check_dma()
4585 unlikely(qc->nbytes & 15)) in atapi_check_dma()
4589 return ap->ops->check_atapi_dma(qc); in atapi_check_dma()
4595 * ata_std_qc_defer - Check whether a qc needs to be deferred
4596 * @qc: ATA command in question
4601 * whether a new command @qc can be issued.
4609 int ata_std_qc_defer(struct ata_queued_cmd *qc) in ata_std_qc_defer() argument
4611 struct ata_link *link = qc->dev->link; in ata_std_qc_defer()
4613 if (ata_is_ncq(qc->tf.protocol)) { in ata_std_qc_defer()
4627 * @qc: Command to be associated
4631 * Initialize the data-related elements of queued_cmd @qc
4638 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, in ata_sg_init() argument
4641 qc->sg = sg; in ata_sg_init()
4642 qc->n_elem = n_elem; in ata_sg_init()
4643 qc->cursg = qc->sg; in ata_sg_init()
4650 * @qc: Command containing DMA memory to be released
4657 static void ata_sg_clean(struct ata_queued_cmd *qc) in ata_sg_clean() argument
4659 struct ata_port *ap = qc->ap; in ata_sg_clean()
4660 struct scatterlist *sg = qc->sg; in ata_sg_clean()
4661 int dir = qc->dma_dir; in ata_sg_clean()
4665 if (qc->n_elem) in ata_sg_clean()
4666 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); in ata_sg_clean()
4668 qc->flags &= ~ATA_QCFLAG_DMAMAP; in ata_sg_clean()
4669 qc->sg = NULL; in ata_sg_clean()
4674 * @qc: Command with scatter-gather table to be mapped.
4676 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4685 static int ata_sg_setup(struct ata_queued_cmd *qc) in ata_sg_setup() argument
4687 struct ata_port *ap = qc->ap; in ata_sg_setup()
4690 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); in ata_sg_setup()
4694 qc->orig_n_elem = qc->n_elem; in ata_sg_setup()
4695 qc->n_elem = n_elem; in ata_sg_setup()
4696 qc->flags |= ATA_QCFLAG_DMAMAP; in ata_sg_setup()
4703 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {} in ata_sg_clean() argument
4704 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; } in ata_sg_setup() argument
4732 * @qc: Command to complete
4740 void ata_qc_free(struct ata_queued_cmd *qc) in ata_qc_free() argument
4742 qc->flags = 0; in ata_qc_free()
4743 if (ata_tag_valid(qc->tag)) in ata_qc_free()
4744 qc->tag = ATA_TAG_POISON; in ata_qc_free()
4747 void __ata_qc_complete(struct ata_queued_cmd *qc) in __ata_qc_complete() argument
4752 if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE))) in __ata_qc_complete()
4755 ap = qc->ap; in __ata_qc_complete()
4756 link = qc->dev->link; in __ata_qc_complete()
4758 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) in __ata_qc_complete()
4759 ata_sg_clean(qc); in __ata_qc_complete()
4761 /* command should be marked inactive atomically with qc completion */ in __ata_qc_complete()
4762 if (ata_is_ncq(qc->tf.protocol)) { in __ata_qc_complete()
4763 link->sactive &= ~(1 << qc->hw_tag); in __ata_qc_complete()
4772 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && in __ata_qc_complete()
4777 * Mark qc as inactive to prevent the port interrupt handler from in __ata_qc_complete()
4781 qc->flags &= ~ATA_QCFLAG_ACTIVE; in __ata_qc_complete()
4782 ap->qc_active &= ~(1ULL << qc->tag); in __ata_qc_complete()
4785 qc->complete_fn(qc); in __ata_qc_complete()
4788 static void fill_result_tf(struct ata_queued_cmd *qc) in fill_result_tf() argument
4790 struct ata_port *ap = qc->ap; in fill_result_tf()
4796 if (qc->flags & ATA_QCFLAG_RTF_FILLED) in fill_result_tf()
4799 qc->result_tf.flags = qc->tf.flags; in fill_result_tf()
4800 ap->ops->qc_fill_rtf(qc); in fill_result_tf()
4801 qc->flags |= ATA_QCFLAG_RTF_FILLED; in fill_result_tf()
4804 static void ata_verify_xfer(struct ata_queued_cmd *qc) in ata_verify_xfer() argument
4806 struct ata_device *dev = qc->dev; in ata_verify_xfer()
4808 if (!ata_is_data(qc->tf.protocol)) in ata_verify_xfer()
4811 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) in ata_verify_xfer()
4819 * @qc: Command to complete
4832 void ata_qc_complete(struct ata_queued_cmd *qc) in ata_qc_complete() argument
4834 struct ata_port *ap = qc->ap; in ata_qc_complete()
4835 struct ata_device *dev = qc->dev; in ata_qc_complete()
4839 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE)); in ata_qc_complete()
4842 * In order to synchronize EH with the regular execution path, a qc that in ata_qc_complete()
4845 * The normal execution path is responsible for not accessing a qc owned in ata_qc_complete()
4849 if (unlikely(qc->err_mask)) in ata_qc_complete()
4850 qc->flags |= ATA_QCFLAG_EH; in ata_qc_complete()
4856 if (unlikely(ata_tag_internal(qc->tag))) { in ata_qc_complete()
4857 fill_result_tf(qc); in ata_qc_complete()
4858 trace_ata_qc_complete_internal(qc); in ata_qc_complete()
4859 __ata_qc_complete(qc); in ata_qc_complete()
4863 /* Non-internal qc has failed. Fill the result TF and summon EH. */ in ata_qc_complete()
4864 if (unlikely(qc->flags & ATA_QCFLAG_EH)) { in ata_qc_complete()
4865 fill_result_tf(qc); in ata_qc_complete()
4866 trace_ata_qc_complete_failed(qc); in ata_qc_complete()
4867 ata_qc_schedule_eh(qc); in ata_qc_complete()
4874 if (qc->flags & ATA_QCFLAG_RESULT_TF) in ata_qc_complete()
4875 fill_result_tf(qc); in ata_qc_complete()
4877 trace_ata_qc_complete_done(qc); in ata_qc_complete()
4885 if (qc->flags & ATA_QCFLAG_HAS_CDL && in ata_qc_complete()
4886 qc->result_tf.status & ATA_SENSE) { in ata_qc_complete()
4891 qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS; in ata_qc_complete()
4892 qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD; in ata_qc_complete()
4900 ata_qc_schedule_eh(qc); in ata_qc_complete()
4905 switch (qc->tf.command) { in ata_qc_complete()
4907 if (qc->tf.feature != SETFEATURES_WC_ON && in ata_qc_complete()
4908 qc->tf.feature != SETFEATURES_WC_OFF && in ata_qc_complete()
4909 qc->tf.feature != SETFEATURES_RA_ON && in ata_qc_complete()
4910 qc->tf.feature != SETFEATURES_RA_OFF) in ata_qc_complete()
4926 ata_verify_xfer(qc); in ata_qc_complete()
4928 __ata_qc_complete(qc); in ata_qc_complete()
4958 * @qc: command to issue to device
4968 void ata_qc_issue(struct ata_queued_cmd *qc) in ata_qc_issue() argument
4970 struct ata_port *ap = qc->ap; in ata_qc_issue()
4971 struct ata_link *link = qc->dev->link; in ata_qc_issue()
4972 u8 prot = qc->tf.protocol; in ata_qc_issue()
4978 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag)); in ata_qc_issue()
4982 link->sactive |= 1 << qc->hw_tag; in ata_qc_issue()
4987 link->active_tag = qc->tag; in ata_qc_issue()
4990 qc->flags |= ATA_QCFLAG_ACTIVE; in ata_qc_issue()
4991 ap->qc_active |= 1ULL << qc->tag; in ata_qc_issue()
4997 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) in ata_qc_issue()
5002 if (ata_sg_setup(qc)) in ata_qc_issue()
5006 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { in ata_qc_issue()
5014 trace_ata_qc_prep(qc); in ata_qc_issue()
5015 qc->err_mask |= ap->ops->qc_prep(qc); in ata_qc_issue()
5016 if (unlikely(qc->err_mask)) in ata_qc_issue()
5020 trace_ata_qc_issue(qc); in ata_qc_issue()
5021 qc->err_mask |= ap->ops->qc_issue(qc); in ata_qc_issue()
5022 if (unlikely(qc->err_mask)) in ata_qc_issue()
5027 qc->err_mask |= AC_ERR_SYSTEM; in ata_qc_issue()
5029 ata_qc_complete(qc); in ata_qc_issue()
6662 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) in ata_dummy_qc_issue() argument