Lines Matching full:cqe

186 	struct t4_cqe cqe;  in insert_recv_cqe()  local
190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe()
196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe()
198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe()
199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
220 struct t4_cqe cqe; in insert_sq_cqe() local
224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe()
230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
231 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_sq_cqe()
232 cq->sw_queue[cq->sw_pidx] = cqe; in insert_sq_cqe()
284 * Insert this completed cqe into the swcq. in flush_completed_wrs()
286 pr_debug("moving cqe into swcq sq idx %u cq idx %u\n", in flush_completed_wrs()
288 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); in flush_completed_wrs()
289 cq->sw_queue[cq->sw_pidx] = swsqe->cqe; in flush_completed_wrs()
392 * Don't write to the HWCQ, create a new read req CQE in c4iw_flush_hw_cq()
405 swsqe->cqe = *hw_cqe; in c4iw_flush_hw_cq()
422 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) in cqe_completes_wr() argument
424 if (DRAIN_CQE(cqe)) { in cqe_completes_wr()
425 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); in cqe_completes_wr()
429 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) in cqe_completes_wr()
432 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) in cqe_completes_wr()
435 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) in cqe_completes_wr()
438 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) in cqe_completes_wr()
445 struct t4_cqe *cqe; in c4iw_count_rcqes() local
452 cqe = &cq->sw_queue[ptr]; in c4iw_count_rcqes()
453 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && in c4iw_count_rcqes()
454 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) in c4iw_count_rcqes()
500 …pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n", in reap_srq_cqe()
517 …pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id… in reap_srq_cqe()
532 * check the validity of the first CQE,
536 * cqe_flushed: 1 iff the CQE is flushed.
537 * cqe: copy of the polled CQE.
540 * 0 CQE returned ok.
541 * -EAGAIN CQE skipped, try again.
544 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, in poll_cq() argument
557 …pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag… in poll_cq()
564 * skip cqe's not affiliated with a QP. in poll_cq()
572 * skip hw cqe's if the wq is flushed. in poll_cq()
588 * Special cqe for drain WR completions... in poll_cq()
592 *cqe = *hw_cqe; in poll_cq()
598 * 1) the cqe doesn't contain the sq_wptr from the wr. in poll_cq()
637 * Don't write to the HWCQ, so create a new read req CQE in poll_cq()
686 swsqe->cqe = *hw_cqe; in poll_cq()
693 *cqe = *hw_cqe; in poll_cq()
743 pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n", in poll_cq()
747 pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n", in poll_cq()
757 struct t4_cqe cqe; in __c4iw_poll_cq_one() local
764 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit, in __c4iw_poll_cq_one()
771 wc->vendor_err = CQE_STATUS(&cqe); in __c4iw_poll_cq_one()
782 CQE_QPID(&cqe), in __c4iw_poll_cq_one()
783 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), in __c4iw_poll_cq_one()
784 CQE_STATUS(&cqe), CQE_LEN(&cqe), in __c4iw_poll_cq_one()
785 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), in __c4iw_poll_cq_one()
788 if (CQE_TYPE(&cqe) == 0) { in __c4iw_poll_cq_one()
789 if (!CQE_STATUS(&cqe)) in __c4iw_poll_cq_one()
790 wc->byte_len = CQE_LEN(&cqe); in __c4iw_poll_cq_one()
794 switch (CQE_OPCODE(&cqe)) { in __c4iw_poll_cq_one()
801 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); in __c4iw_poll_cq_one()
807 wc->ex.imm_data = CQE_IMM_DATA(&cqe); in __c4iw_poll_cq_one()
811 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", in __c4iw_poll_cq_one()
812 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
817 switch (CQE_OPCODE(&cqe)) { in __c4iw_poll_cq_one()
824 wc->byte_len = CQE_LEN(&cqe); in __c4iw_poll_cq_one()
843 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) in __c4iw_poll_cq_one()
845 CQE_WRID_FR_STAG(&cqe)); in __c4iw_poll_cq_one()
848 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", in __c4iw_poll_cq_one()
849 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
859 switch (CQE_STATUS(&cqe)) { in __c4iw_poll_cq_one()
904 CQE_STATUS(&cqe), CQE_QPID(&cqe)); in __c4iw_poll_cq_one()
916 * 0 cqe returned
1002 int entries = attr->cqe; in c4iw_create_cq()
1087 chp->ibcq.cqe = entries - 2; in c4iw_create_cq()
1116 * kernel driver supports 64B CQE in c4iw_create_cq()
1192 /* create a SRQ RECV CQE for srqidx */ in c4iw_flush_srqidx()