Lines Matching full:wq

184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)  in insert_recv_cqe()  argument
188 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", in insert_recv_cqe()
189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe()
203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument
206 int in_use = wq->rq.in_use - count; in c4iw_flush_rq()
208 pr_debug("wq %p cq %p rq.in_use %u skip count %u\n", in c4iw_flush_rq()
209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq()
211 insert_recv_cqe(wq, cq, 0); in c4iw_flush_rq()
217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument
222 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n", in insert_sq_cqe()
223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe()
236 static void advance_oldest_read(struct t4_wq *wq);
241 struct t4_wq *wq = &qhp->wq; in c4iw_flush_sq() local
247 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq()
248 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq()
249 idx = wq->sq.flush_cidx; in c4iw_flush_sq()
250 while (idx != wq->sq.pidx) { in c4iw_flush_sq()
251 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq()
253 insert_sq_cqe(wq, cq, swsqe); in c4iw_flush_sq()
254 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq()
255 advance_oldest_read(wq); in c4iw_flush_sq()
258 if (++idx == wq->sq.size) in c4iw_flush_sq()
261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq()
262 if (wq->sq.flush_cidx >= wq->sq.size) in c4iw_flush_sq()
263 wq->sq.flush_cidx -= wq->sq.size; in c4iw_flush_sq()
267 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) in flush_completed_wrs() argument
272 if (wq->sq.flush_cidx == -1) in flush_completed_wrs()
273 wq->sq.flush_cidx = wq->sq.cidx; in flush_completed_wrs()
274 cidx = wq->sq.flush_cidx; in flush_completed_wrs()
276 while (cidx != wq->sq.pidx) { in flush_completed_wrs()
277 swsqe = &wq->sq.sw_sq[cidx]; in flush_completed_wrs()
279 if (++cidx == wq->sq.size) in flush_completed_wrs()
292 if (++cidx == wq->sq.size) in flush_completed_wrs()
294 wq->sq.flush_cidx = cidx; in flush_completed_wrs()
300 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, in create_read_req_cqe() argument
303 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; in create_read_req_cqe()
304 read_cqe->len = htonl(wq->sq.oldest_read->read_len); in create_read_req_cqe()
312 static void advance_oldest_read(struct t4_wq *wq) in advance_oldest_read() argument
315 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; in advance_oldest_read()
317 if (rptr == wq->sq.size) in advance_oldest_read()
319 while (rptr != wq->sq.pidx) { in advance_oldest_read()
320 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; in advance_oldest_read()
322 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) in advance_oldest_read()
324 if (++rptr == wq->sq.size) in advance_oldest_read()
327 wq->sq.oldest_read = NULL; in advance_oldest_read()
362 if (qhp->wq.flushed == 1) in c4iw_flush_hw_cq()
386 if (!qhp->wq.sq.oldest_read->signaled) { in c4iw_flush_hw_cq()
387 advance_oldest_read(&qhp->wq); in c4iw_flush_hw_cq()
395 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); in c4iw_flush_hw_cq()
397 advance_oldest_read(&qhp->wq); in c4iw_flush_hw_cq()
404 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; in c4iw_flush_hw_cq()
407 flush_completed_wrs(&qhp->wq, &chp->cq); in c4iw_flush_hw_cq()
422 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) in cqe_completes_wr() argument
425 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); in cqe_completes_wr()
438 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) in cqe_completes_wr()
443 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) in c4iw_count_rcqes() argument
454 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) in c4iw_count_rcqes()
533 * supply the wq assicated with the qpid.
544 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, in poll_cq() argument
566 if (wq == NULL) { in poll_cq()
572 * skip hw cqe's if the wq is flushed. in poll_cq()
574 if (wq->flushed && !SW_CQE(hw_cqe)) { in poll_cq()
611 t4_set_wq_in_error(wq, 0); in poll_cq()
622 t4_set_wq_in_error(wq, 0); in poll_cq()
630 if (!wq->sq.oldest_read->signaled) { in poll_cq()
631 advance_oldest_read(wq); in poll_cq()
640 create_read_req_cqe(wq, hw_cqe, &read_cqe); in poll_cq()
642 advance_oldest_read(wq); in poll_cq()
645 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { in poll_cq()
647 t4_set_wq_in_error(wq, 0); in poll_cq()
658 * then we complete this with T4_ERR_MSN and mark the wq in in poll_cq()
662 CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { in poll_cq()
663 t4_set_wq_in_error(wq, 0); in poll_cq()
680 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { in poll_cq()
685 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; in poll_cq()
710 if (idx < wq->sq.cidx) in poll_cq()
711 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; in poll_cq()
713 wq->sq.in_use -= idx - wq->sq.cidx; in poll_cq()
715 wq->sq.cidx = (uint16_t)idx; in poll_cq()
716 pr_debug("completing sq idx %u\n", wq->sq.cidx); in poll_cq()
717 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; in poll_cq()
719 c4iw_log_wr_stats(wq, hw_cqe); in poll_cq()
720 t4_sq_consume(wq); in poll_cq()
723 pr_debug("completing rq idx %u\n", wq->rq.cidx); in poll_cq()
724 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq()
726 c4iw_log_wr_stats(wq, hw_cqe); in poll_cq()
727 t4_rq_consume(wq); in poll_cq()
731 wq->rq.msn++; in poll_cq()
739 flush_completed_wrs(wq, cq); in poll_cq()
758 struct t4_wq *wq = qhp ? &qhp->wq : NULL; in __c4iw_poll_cq_one() local
764 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit, in __c4iw_poll_cq_one()
765 srq ? &srq->wq : NULL); in __c4iw_poll_cq_one()
778 srq->wq.in_use < srq->srq_limit) in __c4iw_poll_cq_one()
1193 insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx); in c4iw_flush_srqidx()