Lines Matching full:qc

301 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
302 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
303 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
325 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
326 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
327 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
758 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) in nv_adma_check_atapi_dma() argument
760 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
857 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr() local
869 /* DEV interrupt w/ no active qc? */ in nv_host_intr()
870 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
876 return ata_bmdma_port_intr(ap, qc); in nv_host_intr()
1088 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) in nv_adma_post_internal_cmd() argument
1090 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1093 ata_bmdma_post_internal_cmd(qc); in nv_adma_post_internal_cmd()
1294 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, in nv_adma_fill_aprd() argument
1300 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1302 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1313 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) in nv_adma_fill_sg() argument
1315 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1320 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1322 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; in nv_adma_fill_sg()
1323 nv_adma_fill_aprd(qc, sg, si, aprd); in nv_adma_fill_sg()
1326 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); in nv_adma_fill_sg()
1331 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) in nv_adma_use_reg_mode() argument
1333 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1338 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1341 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1342 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1348 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) in nv_adma_qc_prep() argument
1350 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1351 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; in nv_adma_qc_prep()
1355 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_prep()
1357 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1358 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1359 ata_bmdma_qc_prep(qc); in nv_adma_qc_prep()
1369 cpb->tag = qc->hw_tag; in nv_adma_qc_prep()
1373 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1376 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1378 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1379 nv_adma_fill_sg(qc, cpb); in nv_adma_qc_prep()
1394 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) in nv_adma_qc_issue() argument
1396 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1398 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1403 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1404 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1405 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1409 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_issue()
1412 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1413 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1414 return ata_bmdma_qc_issue(qc); in nv_adma_qc_issue()
1416 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1429 writew(qc->hw_tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1445 struct ata_queued_cmd *qc; in nv_generic_interrupt() local
1447 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1448 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1449 handled += ata_bmdma_port_intr(ap, qc); in nv_generic_interrupt()
1684 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) in nv_swncq_qc_to_dq() argument
1691 dq->defer_bits |= (1 << qc->hw_tag); in nv_swncq_qc_to_dq()
1692 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; in nv_swncq_qc_to_dq()
1744 struct ata_queued_cmd qc; in __ata_bmdma_stop() local
1746 qc.ap = ap; in __ata_bmdma_stop()
1747 ata_bmdma_stop(&qc); in __ata_bmdma_stop()
1757 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n", in nv_swncq_ncq_stop()
1950 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) in nv_swncq_qc_prep() argument
1952 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1953 ata_bmdma_qc_prep(qc); in nv_swncq_qc_prep()
1957 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
1960 nv_swncq_fill_sg(qc); in nv_swncq_qc_prep()
1965 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) in nv_swncq_fill_sg() argument
1967 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
1973 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; in nv_swncq_fill_sg()
1976 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
2002 struct ata_queued_cmd *qc) in nv_swncq_issue_atacmd() argument
2006 if (qc == NULL) in nv_swncq_issue_atacmd()
2009 writel((1 << qc->hw_tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2010 pp->last_issue_tag = qc->hw_tag; in nv_swncq_issue_atacmd()
2011 pp->dhfis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2012 pp->dmafis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2013 pp->qc_active |= (0x1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2015 trace_ata_tf_load(ap, &qc->tf); in nv_swncq_issue_atacmd()
2016 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2017 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag); in nv_swncq_issue_atacmd()
2018 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2023 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) in nv_swncq_qc_issue() argument
2025 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2028 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2029 return ata_bmdma_qc_issue(qc); in nv_swncq_qc_issue()
2032 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_qc_issue()
2034 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ in nv_swncq_qc_issue()
2066 struct ata_queued_cmd *qc; in nv_swncq_sdbfis() local
2113 ata_port_dbg(ap, "QC: qc_active 0x%llx," in nv_swncq_sdbfis()
2123 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2124 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2130 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_sdbfis()
2131 WARN_ON(qc == NULL); in nv_swncq_sdbfis()
2132 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2149 struct ata_queued_cmd *qc; in nv_swncq_dmafis() local
2159 qc = ata_qc_from_tag(ap, tag); in nv_swncq_dmafis()
2161 if (unlikely(!qc)) in nv_swncq_dmafis()
2164 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2167 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, in nv_swncq_dmafis()
2182 struct ata_queued_cmd *qc; in nv_swncq_host_interrupt() local
2255 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_host_interrupt()
2256 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_host_interrupt()