Lines Matching full:wr
686 /* Complete SQ WR's without processing */
687 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, in siw_sq_flush_wr() argument
692 while (wr) { in siw_sq_flush_wr()
695 switch (wr->opcode) { in siw_sq_flush_wr()
725 sqe.id = wr->wr_id; in siw_sq_flush_wr()
731 *bad_wr = wr; in siw_sq_flush_wr()
734 wr = wr->next; in siw_sq_flush_wr()
739 /* Complete RQ WR's without processing */
740 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, in siw_rq_flush_wr() argument
746 while (wr) { in siw_rq_flush_wr()
747 rqe.id = wr->wr_id; in siw_rq_flush_wr()
751 *bad_wr = wr; in siw_rq_flush_wr()
754 wr = wr->next; in siw_rq_flush_wr()
762 * Post a list of S-WR's to a SQ.
765 * @wr: Null terminated list of user WR's
766 * @bad_wr: Points to failing WR in case of synchronous failure.
768 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, in siw_post_send() argument
777 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) { in siw_post_send()
778 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); in siw_post_send()
779 *bad_wr = wr; in siw_post_send()
798 rv = siw_sq_flush_wr(qp, wr, bad_wr); in siw_post_send()
802 *bad_wr = wr; in siw_post_send()
810 * Immediately flush this WR to CQ, if QP in siw_post_send()
812 * be empty, so WR complets in-order. in siw_post_send()
816 rv = siw_sq_flush_wr(qp, wr, bad_wr); in siw_post_send()
820 *bad_wr = wr; in siw_post_send()
828 while (wr) { in siw_post_send()
837 if (wr->num_sge > qp->attrs.sq_max_sges) { in siw_post_send()
838 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); in siw_post_send()
842 sqe->id = wr->wr_id; in siw_post_send()
844 if ((wr->send_flags & IB_SEND_SIGNALED) || in siw_post_send()
848 if (wr->send_flags & IB_SEND_FENCE) in siw_post_send()
851 switch (wr->opcode) { in siw_post_send()
854 if (wr->send_flags & IB_SEND_SOLICITED) in siw_post_send()
857 if (!(wr->send_flags & IB_SEND_INLINE)) { in siw_post_send()
858 siw_copy_sgl(wr->sg_list, sqe->sge, in siw_post_send()
859 wr->num_sge); in siw_post_send()
860 sqe->num_sge = wr->num_sge; in siw_post_send()
862 rv = siw_copy_inline_sgl(wr, sqe); in siw_post_send()
870 if (wr->opcode == IB_WR_SEND) in siw_post_send()
874 sqe->rkey = wr->ex.invalidate_rkey; in siw_post_send()
887 if (unlikely(wr->num_sge != 1)) { in siw_post_send()
891 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1); in siw_post_send()
895 sqe->raddr = rdma_wr(wr)->remote_addr; in siw_post_send()
896 sqe->rkey = rdma_wr(wr)->rkey; in siw_post_send()
899 if (wr->opcode == IB_WR_RDMA_READ) in siw_post_send()
906 if (!(wr->send_flags & IB_SEND_INLINE)) { in siw_post_send()
907 siw_copy_sgl(wr->sg_list, &sqe->sge[0], in siw_post_send()
908 wr->num_sge); in siw_post_send()
909 sqe->num_sge = wr->num_sge; in siw_post_send()
911 rv = siw_copy_inline_sgl(wr, sqe); in siw_post_send()
919 sqe->raddr = rdma_wr(wr)->remote_addr; in siw_post_send()
920 sqe->rkey = rdma_wr(wr)->rkey; in siw_post_send()
925 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; in siw_post_send()
926 sqe->rkey = reg_wr(wr)->key; in siw_post_send()
927 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; in siw_post_send()
932 sqe->rkey = wr->ex.invalidate_rkey; in siw_post_send()
937 siw_dbg_qp(qp, "ib wr type %d unsupported\n", in siw_post_send()
938 wr->opcode); in siw_post_send()
954 wr = wr->next; in siw_post_send()
995 *bad_wr = wr; in siw_post_send()
1002 * Post a list of R-WR's to a RQ.
1005 * @wr: Null terminated list of user WR's
1006 * @bad_wr: Points to failing WR in case of synchronous failure.
1008 int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, in siw_post_receive() argument
1016 *bad_wr = wr; in siw_post_receive()
1021 *bad_wr = wr; in siw_post_receive()
1040 rv = siw_rq_flush_wr(qp, wr, bad_wr); in siw_post_receive()
1044 *bad_wr = wr; in siw_post_receive()
1052 * Immediately flush this WR to CQ, if QP in siw_post_receive()
1054 * be empty, so WR complets in-order. in siw_post_receive()
1058 rv = siw_rq_flush_wr(qp, wr, bad_wr); in siw_post_receive()
1062 *bad_wr = wr; in siw_post_receive()
1074 while (wr) { in siw_post_receive()
1083 if (wr->num_sge > qp->attrs.rq_max_sges) { in siw_post_receive()
1084 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); in siw_post_receive()
1088 rqe->id = wr->wr_id; in siw_post_receive()
1089 rqe->num_sge = wr->num_sge; in siw_post_receive()
1090 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); in siw_post_receive()
1098 wr = wr->next; in siw_post_receive()
1106 *bad_wr = wr; in siw_post_receive()
1763 * @wr: List of R-WR's
1764 * @bad_wr: Updated to failing WR if posting fails.
1766 int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, in siw_post_srq_recv() argument
1786 while (wr) { in siw_post_srq_recv()
1795 if (unlikely(wr->num_sge > srq->max_sge)) { in siw_post_srq_recv()
1797 "[SRQ]: too many sge's: %d\n", wr->num_sge); in siw_post_srq_recv()
1801 rqe->id = wr->wr_id; in siw_post_srq_recv()
1802 rqe->num_sge = wr->num_sge; in siw_post_srq_recv()
1803 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge); in siw_post_srq_recv()
1811 wr = wr->next; in siw_post_srq_recv()
1817 *bad_wr = wr; in siw_post_srq_recv()