Lines Matching full:qp

50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)  in rxe_resp_queue_pkt()  argument
52 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
53 rxe_sched_task(&qp->recv_task); in rxe_resp_queue_pkt()
56 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
61 skb = skb_peek(&qp->req_pkts); in get_req()
67 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
70 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
73 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
74 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn()
76 switch (qp_type(qp)) { in check_psn()
79 if (qp->resp.sent_psn_nak) in check_psn()
82 qp->resp.sent_psn_nak = 1; in check_psn()
91 if (qp->resp.sent_psn_nak) in check_psn()
92 qp->resp.sent_psn_nak = 0; in check_psn()
97 if (qp->resp.drop_msg || diff != 0) { in check_psn()
99 qp->resp.drop_msg = 0; in check_psn()
103 qp->resp.drop_msg = 1; in check_psn()
114 static enum resp_states check_op_seq(struct rxe_qp *qp, in check_op_seq() argument
117 switch (qp_type(qp)) { in check_op_seq()
119 switch (qp->resp.opcode) { in check_op_seq()
160 switch (qp->resp.opcode) { in check_op_seq()
191 qp->resp.drop_msg = 1; in check_op_seq()
204 static bool check_qp_attr_access(struct rxe_qp *qp, in check_qp_attr_access() argument
208 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || in check_qp_attr_access()
210 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || in check_qp_attr_access()
212 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) in check_qp_attr_access()
219 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) || in check_qp_attr_access()
221 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT))) in check_qp_attr_access()
228 static enum resp_states check_op_valid(struct rxe_qp *qp, in check_op_valid() argument
231 switch (qp_type(qp)) { in check_op_valid()
233 if (!check_qp_attr_access(qp, pkt)) in check_op_valid()
240 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) { in check_op_valid()
241 qp->resp.drop_msg = 1; in check_op_valid()
259 static enum resp_states get_srq_wqe(struct rxe_qp *qp) in get_srq_wqe() argument
261 struct rxe_srq *srq = qp->srq; in get_srq_wqe()
283 rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n"); in get_srq_wqe()
287 memcpy(&qp->resp.srq_wqe, wqe, size); in get_srq_wqe()
289 qp->resp.wqe = &qp->resp.srq_wqe.wqe; in get_srq_wqe()
303 ev.device = qp->ibqp.device; in get_srq_wqe()
304 ev.element.srq = qp->ibqp.srq; in get_srq_wqe()
310 static enum resp_states check_resource(struct rxe_qp *qp, in check_resource() argument
313 struct rxe_srq *srq = qp->srq; in check_resource()
320 if (likely(qp->attr.max_dest_rd_atomic > 0)) in check_resource()
328 return get_srq_wqe(qp); in check_resource()
330 qp->resp.wqe = queue_head(qp->rq.queue, in check_resource()
332 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR; in check_resource()
338 static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, in rxe_resp_check_length() argument
347 if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { in rxe_resp_check_length()
352 for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) in rxe_resp_check_length()
353 recv_buffer_len += qp->resp.wqe->dma.sge[i].length; in rxe_resp_check_length()
355 rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); in rxe_resp_check_length()
360 if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || in rxe_resp_check_length()
361 (qp_type(qp) == IB_QPT_UC))) { in rxe_resp_check_length()
362 unsigned int mtu = qp->mtu; in rxe_resp_check_length()
368 rxe_dbg_qp(qp, "only packet too long\n"); in rxe_resp_check_length()
374 rxe_dbg_qp(qp, "first or middle packet not mtu\n"); in rxe_resp_check_length()
379 rxe_dbg_qp(qp, "last packet zero or too long\n"); in rxe_resp_check_length()
388 rxe_dbg_qp(qp, "dma length too long\n"); in rxe_resp_check_length()
401 * Instead set qp->resp.rkey to 0 which is an invalid rkey
404 static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in qp_resp_from_reth() argument
408 qp->resp.va = reth_va(pkt); in qp_resp_from_reth()
409 qp->resp.offset = 0; in qp_resp_from_reth()
410 qp->resp.resid = length; in qp_resp_from_reth()
411 qp->resp.length = length; in qp_resp_from_reth()
413 qp->resp.rkey = 0; in qp_resp_from_reth()
415 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth()
418 static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in qp_resp_from_atmeth() argument
420 qp->resp.va = atmeth_va(pkt); in qp_resp_from_atmeth()
421 qp->resp.offset = 0; in qp_resp_from_atmeth()
422 qp->resp.rkey = atmeth_rkey(pkt); in qp_resp_from_atmeth()
423 qp->resp.resid = sizeof(u64); in qp_resp_from_atmeth()
426 /* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
430 static enum resp_states check_rkey(struct rxe_qp *qp, in check_rkey() argument
439 int mtu = qp->mtu; in check_rkey()
449 qp_resp_from_reth(qp, pkt); in check_rkey()
457 qp_resp_from_reth(qp, pkt); in check_rkey()
464 qp_resp_from_atmeth(qp, pkt); in check_rkey()
476 qp->resp.mr = NULL; in check_rkey()
480 va = qp->resp.va; in check_rkey()
481 rkey = qp->resp.rkey; in check_rkey()
482 resid = qp->resp.resid; in check_rkey()
486 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey()
488 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); in check_rkey()
495 rxe_dbg_qp(qp, "MW doesn't have an MR\n"); in check_rkey()
501 qp->resp.offset = mw->addr; in check_rkey()
507 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); in check_rkey()
509 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); in check_rkey()
523 if (mr_check_range(mr, va + qp->resp.offset, resid)) { in check_rkey()
550 WARN_ON_ONCE(qp->resp.mr); in check_rkey()
552 qp->resp.mr = mr; in check_rkey()
556 qp->resp.mr = NULL; in check_rkey()
565 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, in send_data_in() argument
570 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, in send_data_in()
579 static enum resp_states write_data_in(struct rxe_qp *qp, in write_data_in() argument
586 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset, in write_data_in()
593 qp->resp.va += data_len; in write_data_in()
594 qp->resp.resid -= data_len; in write_data_in()
600 static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, in rxe_prepare_res() argument
607 res = &qp->resp.resources[qp->resp.res_head]; in rxe_prepare_res()
608 rxe_advance_resp_resource(qp); in rxe_prepare_res()
616 res->read.va = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
617 res->read.va_org = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
618 res->read.resid = qp->resp.resid; in rxe_prepare_res()
619 res->read.length = qp->resp.resid; in rxe_prepare_res()
620 res->read.rkey = qp->resp.rkey; in rxe_prepare_res()
622 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1); in rxe_prepare_res()
636 res->flush.va = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
637 res->flush.length = qp->resp.length; in rxe_prepare_res()
645 static enum resp_states process_flush(struct rxe_qp *qp, in process_flush() argument
649 struct rxe_mr *mr = qp->resp.mr; in process_flush()
650 struct resp_res *res = qp->resp.res; in process_flush()
656 res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK); in process_flush()
657 qp->resp.res = res; in process_flush()
678 qp->resp.msn++; in process_flush()
681 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in process_flush()
682 qp->resp.ack_psn = qp->resp.psn; in process_flush()
684 qp->resp.opcode = pkt->opcode; in process_flush()
685 qp->resp.status = IB_WC_SUCCESS; in process_flush()
690 static enum resp_states atomic_reply(struct rxe_qp *qp, in atomic_reply() argument
693 struct rxe_mr *mr = qp->resp.mr; in atomic_reply()
694 struct resp_res *res = qp->resp.res; in atomic_reply()
698 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK); in atomic_reply()
699 qp->resp.res = res; in atomic_reply()
703 u64 iova = qp->resp.va + qp->resp.offset; in atomic_reply()
712 qp->resp.msn++; in atomic_reply()
715 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in atomic_reply()
716 qp->resp.ack_psn = qp->resp.psn; in atomic_reply()
718 qp->resp.opcode = pkt->opcode; in atomic_reply()
719 qp->resp.status = IB_WC_SUCCESS; in atomic_reply()
725 static enum resp_states atomic_write_reply(struct rxe_qp *qp, in atomic_write_reply() argument
728 struct resp_res *res = qp->resp.res; in atomic_write_reply()
735 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); in atomic_write_reply()
736 qp->resp.res = res; in atomic_write_reply()
742 mr = qp->resp.mr; in atomic_write_reply()
744 iova = qp->resp.va + qp->resp.offset; in atomic_write_reply()
750 qp->resp.resid = 0; in atomic_write_reply()
751 qp->resp.msn++; in atomic_write_reply()
754 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in atomic_write_reply()
755 qp->resp.ack_psn = qp->resp.psn; in atomic_write_reply()
757 qp->resp.opcode = pkt->opcode; in atomic_write_reply()
758 qp->resp.status = IB_WC_SUCCESS; in atomic_write_reply()
763 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, in prepare_ack_packet() argument
770 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in prepare_ack_packet()
782 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); in prepare_ack_packet()
786 ack->qp = qp; in prepare_ack_packet()
793 qp->attr.dest_qp_num, 0, psn); in prepare_ack_packet()
797 aeth_set_msn(ack, qp->resp.msn); in prepare_ack_packet()
801 atmack_set_orig(ack, qp->resp.res->atomic.orig_val); in prepare_ack_packet()
803 err = rxe_prepare(&qp->pri_av, ack, skb); in prepare_ack_packet()
814 * @qp: the qp
829 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey) in rxe_recheck_mr() argument
831 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_recheck_mr()
868 static enum resp_states read_reply(struct rxe_qp *qp, in read_reply() argument
873 int mtu = qp->mtu; in read_reply()
878 struct resp_res *res = qp->resp.res; in read_reply()
882 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); in read_reply()
883 qp->resp.res = res; in read_reply()
887 if (!res->replay || qp->resp.length == 0) { in read_reply()
889 * otherwise qp->resp.mr holds a ref on mr in read_reply()
892 mr = qp->resp.mr; in read_reply()
893 qp->resp.mr = NULL; in read_reply()
895 mr = rxe_recheck_mr(qp, res->read.rkey); in read_reply()
909 mr = rxe_recheck_mr(qp, res->read.rkey); in read_reply()
923 skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, in read_reply()
945 err = rxe_xmit_packet(qp, &ack_pkt, skb); in read_reply()
958 qp->resp.res = NULL; in read_reply()
960 qp->resp.opcode = -1; in read_reply()
961 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) in read_reply()
962 qp->resp.psn = res->cur_psn; in read_reply()
972 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey) in invalidate_rkey() argument
975 return rxe_invalidate_mw(qp, rkey); in invalidate_rkey()
977 return rxe_invalidate_mr(qp, rkey); in invalidate_rkey()
983 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
990 if (qp_type(qp) == IB_QPT_UD || in execute()
991 qp_type(qp) == IB_QPT_GSI) { in execute()
997 err = send_data_in(qp, &hdr, sizeof(hdr)); in execute()
999 err = send_data_in(qp, ipv6_hdr(skb), in execute()
1005 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
1009 err = write_data_in(qp, pkt); in execute()
1014 qp->resp.msn++; in execute()
1030 err = invalidate_rkey(qp, rkey); in execute()
1037 qp->resp.msn++; in execute()
1040 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
1041 qp->resp.ack_psn = qp->resp.psn; in execute()
1043 qp->resp.opcode = pkt->opcode; in execute()
1044 qp->resp.status = IB_WC_SUCCESS; in execute()
1048 else if (qp_type(qp) == IB_QPT_RC) in execute()
1054 static enum resp_states do_complete(struct rxe_qp *qp, in do_complete() argument
1060 struct rxe_recv_wqe *wqe = qp->resp.wqe; in do_complete()
1061 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
1069 if (qp->rcq->is_user) { in do_complete()
1070 uwc->status = qp->resp.status; in do_complete()
1071 uwc->qp_num = qp->ibqp.qp_num; in do_complete()
1074 wc->status = qp->resp.status; in do_complete()
1075 wc->qp = &qp->ibqp; in do_complete()
1086 qp->resp.length : wqe->dma.length - wqe->dma.resid; in do_complete()
1091 if (qp->rcq->is_user) { in do_complete()
1107 uwc->port_num = qp->attr.port_num; in do_complete()
1135 wc->port_num = qp->attr.port_num; in do_complete()
1139 rxe_err_qp(qp, "non-flush error status = %d\n", in do_complete()
1144 if (!qp->srq) in do_complete()
1145 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT); in do_complete()
1147 qp->resp.wqe = NULL; in do_complete()
1149 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
1153 spin_lock_irqsave(&qp->state_lock, flags); in do_complete()
1154 if (unlikely(qp_state(qp) == IB_QPS_ERR)) { in do_complete()
1155 spin_unlock_irqrestore(&qp->state_lock, flags); in do_complete()
1158 spin_unlock_irqrestore(&qp->state_lock, flags); in do_complete()
1162 if (qp_type(qp) == IB_QPT_RC) in do_complete()
1169 static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, in send_common_ack() argument
1176 skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome); in send_common_ack()
1180 err = rxe_xmit_packet(qp, &ack_pkt, skb); in send_common_ack()
1182 rxe_dbg_qp(qp, "Failed sending %s\n", msg); in send_common_ack()
1187 static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_ack() argument
1189 return send_common_ack(qp, syndrome, psn, in send_ack()
1193 static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_atomic_ack() argument
1195 int ret = send_common_ack(qp, syndrome, psn, in send_atomic_ack()
1201 qp->resp.res = NULL; in send_atomic_ack()
1205 static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_read_response_ack() argument
1207 int ret = send_common_ack(qp, syndrome, psn, in send_read_response_ack()
1214 qp->resp.res = NULL; in send_read_response_ack()
1218 static enum resp_states acknowledge(struct rxe_qp *qp, in acknowledge() argument
1221 if (qp_type(qp) != IB_QPT_RC) in acknowledge()
1224 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) in acknowledge()
1225 send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1227 send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1229 send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1231 send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1236 static enum resp_states cleanup(struct rxe_qp *qp, in cleanup() argument
1242 skb = skb_dequeue(&qp->req_pkts); in cleanup()
1243 rxe_put(qp); in cleanup()
1245 ib_device_put(qp->ibqp.device); in cleanup()
1248 if (qp->resp.mr) { in cleanup()
1249 rxe_put(qp->resp.mr); in cleanup()
1250 qp->resp.mr = NULL; in cleanup()
1256 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn) in find_resource() argument
1260 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in find_resource()
1261 struct resp_res *res = &qp->resp.resources[i]; in find_resource()
1275 static enum resp_states duplicate_request(struct rxe_qp *qp, in duplicate_request() argument
1279 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; in duplicate_request()
1284 send_ack(qp, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1290 res = find_resource(qp, pkt->psn); in duplicate_request()
1294 qp->resp.res = res; in duplicate_request()
1305 res = find_resource(qp, pkt->psn); in duplicate_request()
1344 qp->resp.res = res; in duplicate_request()
1352 res = find_resource(qp, pkt->psn); in duplicate_request()
1356 qp->resp.res = res; in duplicate_request()
1372 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, in do_class_ac_error() argument
1375 qp->resp.aeth_syndrome = syndrome; in do_class_ac_error()
1376 qp->resp.status = status; in do_class_ac_error()
1379 qp->resp.goto_error = 1; in do_class_ac_error()
1382 static enum resp_states do_class_d1e_error(struct rxe_qp *qp) in do_class_d1e_error() argument
1385 if (qp->srq) { in do_class_d1e_error()
1387 qp->resp.drop_msg = 1; in do_class_d1e_error()
1388 if (qp->resp.wqe) { in do_class_d1e_error()
1389 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in do_class_d1e_error()
1400 if (qp->resp.wqe) { in do_class_d1e_error()
1401 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length; in do_class_d1e_error()
1402 qp->resp.wqe->dma.cur_sge = 0; in do_class_d1e_error()
1403 qp->resp.wqe->dma.sge_offset = 0; in do_class_d1e_error()
1404 qp->resp.opcode = -1; in do_class_d1e_error()
1407 if (qp->resp.mr) { in do_class_d1e_error()
1408 rxe_put(qp->resp.mr); in do_class_d1e_error()
1409 qp->resp.mr = NULL; in do_class_d1e_error()
1417 static void drain_req_pkts(struct rxe_qp *qp) in drain_req_pkts() argument
1421 while ((skb = skb_dequeue(&qp->req_pkts))) { in drain_req_pkts()
1422 rxe_put(qp); in drain_req_pkts()
1424 ib_device_put(qp->ibqp.device); in drain_req_pkts()
1429 static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) in flush_recv_wqe() argument
1436 if (qp->rcq->is_user) { in flush_recv_wqe()
1439 uwc->qp_num = qp_num(qp); in flush_recv_wqe()
1443 wc->qp = &qp->ibqp; in flush_recv_wqe()
1446 err = rxe_cq_post(qp->rcq, &cqe, 0); in flush_recv_wqe()
1448 rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err); in flush_recv_wqe()
1457 static void flush_recv_queue(struct rxe_qp *qp, bool notify) in flush_recv_queue() argument
1459 struct rxe_queue *q = qp->rq.queue; in flush_recv_queue()
1463 if (qp->srq) { in flush_recv_queue()
1464 if (notify && qp->ibqp.event_handler) { in flush_recv_queue()
1467 ev.device = qp->ibqp.device; in flush_recv_queue()
1468 ev.element.qp = &qp->ibqp; in flush_recv_queue()
1470 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in flush_recv_queue()
1476 if (!qp->rq.queue) in flush_recv_queue()
1481 err = flush_recv_wqe(qp, wqe); in flush_recv_queue()
1488 qp->resp.wqe = NULL; in flush_recv_queue()
1491 int rxe_receiver(struct rxe_qp *qp) in rxe_receiver() argument
1493 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_receiver()
1499 spin_lock_irqsave(&qp->state_lock, flags); in rxe_receiver()
1500 if (!qp->valid || qp_state(qp) == IB_QPS_ERR || in rxe_receiver()
1501 qp_state(qp) == IB_QPS_RESET) { in rxe_receiver()
1502 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); in rxe_receiver()
1504 drain_req_pkts(qp); in rxe_receiver()
1505 flush_recv_queue(qp, notify); in rxe_receiver()
1506 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_receiver()
1509 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_receiver()
1511 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; in rxe_receiver()
1516 rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]); in rxe_receiver()
1519 state = get_req(qp, &pkt); in rxe_receiver()
1522 state = check_psn(qp, pkt); in rxe_receiver()
1525 state = check_op_seq(qp, pkt); in rxe_receiver()
1528 state = check_op_valid(qp, pkt); in rxe_receiver()
1531 state = check_resource(qp, pkt); in rxe_receiver()
1534 state = rxe_resp_check_length(qp, pkt); in rxe_receiver()
1537 state = check_rkey(qp, pkt); in rxe_receiver()
1540 state = execute(qp, pkt); in rxe_receiver()
1543 state = do_complete(qp, pkt); in rxe_receiver()
1546 state = read_reply(qp, pkt); in rxe_receiver()
1549 state = atomic_reply(qp, pkt); in rxe_receiver()
1552 state = atomic_write_reply(qp, pkt); in rxe_receiver()
1555 state = process_flush(qp, pkt); in rxe_receiver()
1558 state = acknowledge(qp, pkt); in rxe_receiver()
1561 state = cleanup(qp, pkt); in rxe_receiver()
1564 state = duplicate_request(qp, pkt); in rxe_receiver()
1568 send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_receiver()
1578 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_receiver()
1584 state = do_class_d1e_error(qp); in rxe_receiver()
1587 if (qp_type(qp) == IB_QPT_RC) { in rxe_receiver()
1590 send_ack(qp, AETH_RNR_NAK | in rxe_receiver()
1592 qp->attr.min_rnr_timer), in rxe_receiver()
1596 qp->resp.drop_msg = 1; in rxe_receiver()
1602 if (qp_type(qp) == IB_QPT_RC) { in rxe_receiver()
1604 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR, in rxe_receiver()
1608 qp->resp.drop_msg = 1; in rxe_receiver()
1609 if (qp->srq) { in rxe_receiver()
1611 qp->resp.status = IB_WC_REM_ACCESS_ERR; in rxe_receiver()
1622 qp->resp.goto_error = 1; in rxe_receiver()
1623 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_receiver()
1628 if (qp_type(qp) == IB_QPT_RC) { in rxe_receiver()
1630 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_receiver()
1633 } else if (qp->srq) { in rxe_receiver()
1635 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_receiver()
1639 qp->resp.drop_msg = 1; in rxe_receiver()
1646 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR, in rxe_receiver()
1657 if (qp->resp.goto_error) { in rxe_receiver()
1665 if (qp->resp.goto_error) { in rxe_receiver()
1673 qp->resp.goto_error = 0; in rxe_receiver()
1674 rxe_dbg_qp(qp, "moved to error state\n"); in rxe_receiver()
1675 rxe_qp_error(qp); in rxe_receiver()