Lines Matching full:qp

117 	struct rxe_qp *qp = from_timer(qp, t, retrans_timer);  in retransmit_timer()  local
120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer()
122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer()
123 if (qp->valid) { in retransmit_timer()
124 qp->comp.timeout = 1; in retransmit_timer()
125 rxe_sched_task(&qp->send_task); in retransmit_timer()
127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer()
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
133 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
134 rxe_sched_task(&qp->send_task); in rxe_comp_queue_pkt()
137 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument
146 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe()
165 static inline void reset_retry_counters(struct rxe_qp *qp) in reset_retry_counters() argument
167 qp->comp.retry_cnt = qp->attr.retry_cnt; in reset_retry_counters()
168 qp->comp.rnr_retry = qp->attr.rnr_retry; in reset_retry_counters()
169 qp->comp.started_retry = 0; in reset_retry_counters()
172 static inline enum comp_state check_psn(struct rxe_qp *qp, in check_psn() argument
187 reset_retry_counters(qp); in check_psn()
195 diff = psn_compare(pkt->psn, qp->comp.psn); in check_psn()
203 (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST || in check_psn()
204 qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE)) in check_psn()
215 static inline enum comp_state check_ack(struct rxe_qp *qp, in check_ack() argument
221 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack()
224 switch (qp->comp.opcode) { in check_ack()
281 reset_retry_counters(qp); in check_ack()
293 reset_retry_counters(qp); in check_ack()
300 reset_retry_counters(qp); in check_ack()
313 if (psn_compare(pkt->psn, qp->comp.psn) > 0) { in check_ack()
316 qp->comp.psn = pkt->psn; in check_ack()
317 if (qp->req.wait_psn) { in check_ack()
318 qp->req.wait_psn = 0; in check_ack()
319 qp->req.again = 1; in check_ack()
337 rxe_dbg_qp(qp, "unexpected nak %x\n", syn); in check_ack()
348 rxe_dbg_qp(qp, "unexpected opcode\n"); in check_ack()
354 static inline enum comp_state do_read(struct rxe_qp *qp, in do_read() argument
360 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_read()
374 static inline enum comp_state do_atomic(struct rxe_qp *qp, in do_atomic() argument
382 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_atomic()
393 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in make_send_cqe() argument
401 if (!qp->is_user) { in make_send_cqe()
404 wc->qp = &qp->ibqp; in make_send_cqe()
408 uwc->qp_num = qp->ibqp.qp_num; in make_send_cqe()
412 if (!qp->is_user) { in make_send_cqe()
427 rxe_err_qp(qp, "non-flush error status = %d\n", in make_send_cqe()
440 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in do_complete() argument
442 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
447 post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || in do_complete()
452 make_send_cqe(qp, wqe, &cqe); in do_complete()
454 queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in do_complete()
457 rxe_cq_post(qp->scq, &cqe, 0); in do_complete()
468 if (qp->req.wait_fence) { in do_complete()
469 qp->req.wait_fence = 0; in do_complete()
470 qp->req.again = 1; in do_complete()
474 static void comp_check_sq_drain_done(struct rxe_qp *qp) in comp_check_sq_drain_done() argument
478 spin_lock_irqsave(&qp->state_lock, flags); in comp_check_sq_drain_done()
479 if (unlikely(qp_state(qp) == IB_QPS_SQD)) { in comp_check_sq_drain_done()
480 if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { in comp_check_sq_drain_done()
481 qp->attr.sq_draining = 0; in comp_check_sq_drain_done()
482 spin_unlock_irqrestore(&qp->state_lock, flags); in comp_check_sq_drain_done()
484 if (qp->ibqp.event_handler) { in comp_check_sq_drain_done()
487 ev.device = qp->ibqp.device; in comp_check_sq_drain_done()
488 ev.element.qp = &qp->ibqp; in comp_check_sq_drain_done()
490 qp->ibqp.event_handler(&ev, in comp_check_sq_drain_done()
491 qp->ibqp.qp_context); in comp_check_sq_drain_done()
496 spin_unlock_irqrestore(&qp->state_lock, flags); in comp_check_sq_drain_done()
499 static inline enum comp_state complete_ack(struct rxe_qp *qp, in complete_ack() argument
505 atomic_inc(&qp->req.rd_atomic); in complete_ack()
506 if (qp->req.need_rd_atomic) { in complete_ack()
507 qp->comp.timeout_retry = 0; in complete_ack()
508 qp->req.need_rd_atomic = 0; in complete_ack()
509 qp->req.again = 1; in complete_ack()
513 comp_check_sq_drain_done(qp); in complete_ack()
515 do_complete(qp, wqe); in complete_ack()
517 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in complete_ack()
523 static inline enum comp_state complete_wqe(struct rxe_qp *qp, in complete_wqe() argument
528 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { in complete_wqe()
529 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; in complete_wqe()
530 qp->comp.opcode = -1; in complete_wqe()
533 if (qp->req.wait_psn) { in complete_wqe()
534 qp->req.wait_psn = 0; in complete_wqe()
535 qp->req.again = 1; in complete_wqe()
539 do_complete(qp, wqe); in complete_wqe()
545 static void drain_resp_pkts(struct rxe_qp *qp) in drain_resp_pkts() argument
549 while ((skb = skb_dequeue(&qp->resp_pkts))) { in drain_resp_pkts()
550 rxe_put(qp); in drain_resp_pkts()
552 ib_device_put(qp->ibqp.device); in drain_resp_pkts()
557 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in flush_send_wqe() argument
564 if (qp->is_user) { in flush_send_wqe()
567 uwc->qp_num = qp->ibqp.qp_num; in flush_send_wqe()
571 wc->qp = &qp->ibqp; in flush_send_wqe()
574 err = rxe_cq_post(qp->scq, &cqe, 0); in flush_send_wqe()
576 rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err); in flush_send_wqe()
585 static void flush_send_queue(struct rxe_qp *qp, bool notify) in flush_send_queue() argument
588 struct rxe_queue *q = qp->sq.queue; in flush_send_queue()
592 if (!qp->sq.queue) in flush_send_queue()
597 err = flush_send_wqe(qp, wqe); in flush_send_queue()
608 struct rxe_qp *qp = pkt->qp; in free_pkt() local
609 struct ib_device *dev = qp->ibqp.device; in free_pkt()
612 rxe_put(qp); in free_pkt()
617 * - QP is type RC
622 * - the QP is alive
624 static void reset_retry_timer(struct rxe_qp *qp) in reset_retry_timer() argument
628 if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { in reset_retry_timer()
629 spin_lock_irqsave(&qp->state_lock, flags); in reset_retry_timer()
630 if (qp_state(qp) >= IB_QPS_RTS && in reset_retry_timer()
631 psn_compare(qp->req.psn, qp->comp.psn) > 0) in reset_retry_timer()
632 mod_timer(&qp->retrans_timer, in reset_retry_timer()
633 jiffies + qp->qp_timeout_jiffies); in reset_retry_timer()
634 spin_unlock_irqrestore(&qp->state_lock, flags); in reset_retry_timer()
638 int rxe_completer(struct rxe_qp *qp) in rxe_completer() argument
640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_completer()
648 qp->req.again = 0; in rxe_completer()
650 spin_lock_irqsave(&qp->state_lock, flags); in rxe_completer()
651 if (!qp->valid || qp_state(qp) == IB_QPS_ERR || in rxe_completer()
652 qp_state(qp) == IB_QPS_RESET) { in rxe_completer()
653 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); in rxe_completer()
655 drain_resp_pkts(qp); in rxe_completer()
656 flush_send_queue(qp, notify); in rxe_completer()
657 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_completer()
660 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_completer()
662 if (qp->comp.timeout) { in rxe_completer()
663 qp->comp.timeout_retry = 1; in rxe_completer()
664 qp->comp.timeout = 0; in rxe_completer()
666 qp->comp.timeout_retry = 0; in rxe_completer()
669 if (qp->req.need_retry) in rxe_completer()
675 rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]); in rxe_completer()
678 skb = skb_dequeue(&qp->resp_pkts); in rxe_completer()
681 qp->comp.timeout_retry = 0; in rxe_completer()
687 state = get_wqe(qp, pkt, &wqe); in rxe_completer()
691 state = check_psn(qp, pkt, wqe); in rxe_completer()
695 state = check_ack(qp, pkt, wqe); in rxe_completer()
699 state = do_read(qp, pkt, wqe); in rxe_completer()
703 state = do_atomic(qp, pkt, wqe); in rxe_completer()
715 state = complete_ack(qp, pkt, wqe); in rxe_completer()
719 state = complete_wqe(qp, pkt, wqe); in rxe_completer()
724 qp->comp.opcode = -1; in rxe_completer()
726 qp->comp.opcode = pkt->opcode; in rxe_completer()
728 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in rxe_completer()
729 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in rxe_completer()
731 if (qp->req.wait_psn) { in rxe_completer()
732 qp->req.wait_psn = 0; in rxe_completer()
733 qp->req.again = 1; in rxe_completer()
743 if (qp->comp.timeout_retry && wqe) { in rxe_completer()
748 reset_retry_timer(qp); in rxe_completer()
767 if (qp->comp.started_retry && in rxe_completer()
768 !qp->comp.timeout_retry) in rxe_completer()
771 if (qp->comp.retry_cnt > 0) { in rxe_completer()
772 if (qp->comp.retry_cnt != 7) in rxe_completer()
773 qp->comp.retry_cnt--; in rxe_completer()
779 if (psn_compare(qp->req.psn, in rxe_completer()
780 qp->comp.psn) > 0) { in rxe_completer()
786 qp->req.need_retry = 1; in rxe_completer()
787 qp->comp.started_retry = 1; in rxe_completer()
788 qp->req.again = 1; in rxe_completer()
801 if (qp->comp.rnr_retry > 0) { in rxe_completer()
802 if (qp->comp.rnr_retry != 7) in rxe_completer()
803 qp->comp.rnr_retry--; in rxe_completer()
808 qp->req.wait_for_rnr_timer = 1; in rxe_completer()
809 rxe_dbg_qp(qp, "set rnr nak timer\n"); in rxe_completer()
811 mod_timer(&qp->rnr_nak_timer, in rxe_completer()
825 do_complete(qp, wqe); in rxe_completer()
826 rxe_qp_error(qp); in rxe_completer()
839 ret = (qp->req.again) ? 0 : -EAGAIN; in rxe_completer()
841 qp->req.again = 0; in rxe_completer()