Lines Matching full:qp

92 			rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);  in rxe_qp_chk_init()
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
105 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
106 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
109 if (!qp->resp.resources) in alloc_rd_atomic_resources()
115 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
117 if (qp->resp.resources) { in free_rd_atomic_resources()
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
121 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources()
125 kfree(qp->resp.resources); in free_rd_atomic_resources()
126 qp->resp.resources = NULL; in free_rd_atomic_resources()
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) in cleanup_rd_atomic_resources() argument
140 if (qp->resp.resources) { in cleanup_rd_atomic_resources()
141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in cleanup_rd_atomic_resources()
142 res = &qp->resp.resources[i]; in cleanup_rd_atomic_resources()
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_init_misc() argument
154 qp->sq_sig_type = init->sq_sig_type; in rxe_qp_init_misc()
155 qp->attr.path_mtu = 1; in rxe_qp_init_misc()
156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); in rxe_qp_init_misc()
158 qpn = qp->elem.index; in rxe_qp_init_misc()
163 qp->ibqp.qp_num = 1; in rxe_qp_init_misc()
165 qp->attr.port_num = init->port_num; in rxe_qp_init_misc()
169 qp->ibqp.qp_num = qpn; in rxe_qp_init_misc()
173 spin_lock_init(&qp->state_lock); in rxe_qp_init_misc()
175 spin_lock_init(&qp->sq.sq_lock); in rxe_qp_init_misc()
176 spin_lock_init(&qp->rq.producer_lock); in rxe_qp_init_misc()
177 spin_lock_init(&qp->rq.consumer_lock); in rxe_qp_init_misc()
179 skb_queue_head_init(&qp->req_pkts); in rxe_qp_init_misc()
180 skb_queue_head_init(&qp->resp_pkts); in rxe_qp_init_misc()
182 atomic_set(&qp->ssn, 0); in rxe_qp_init_misc()
183 atomic_set(&qp->skb_out, 0); in rxe_qp_init_misc()
186 static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init, in rxe_init_sq() argument
190 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_init_sq()
194 qp->sq.max_wr = init->cap.max_send_wr; in rxe_init_sq()
197 qp->sq.max_sge = wqe_size / sizeof(struct ib_sge); in rxe_init_sq()
198 qp->sq.max_inline = wqe_size; in rxe_init_sq()
201 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size, in rxe_init_sq()
203 if (!qp->sq.queue) { in rxe_init_sq()
204 rxe_err_qp(qp, "Unable to allocate send queue\n"); in rxe_init_sq()
209 /* prepare info for caller to mmap send queue if user space qp */ in rxe_init_sq()
211 qp->sq.queue->buf, qp->sq.queue->buf_size, in rxe_init_sq()
212 &qp->sq.queue->ip); in rxe_init_sq()
214 rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err); in rxe_init_sq()
221 init->cap.max_send_wr = qp->sq.max_wr; in rxe_init_sq()
222 init->cap.max_send_sge = qp->sq.max_sge; in rxe_init_sq()
223 init->cap.max_inline_data = qp->sq.max_inline; in rxe_init_sq()
228 vfree(qp->sq.queue->buf); in rxe_init_sq()
229 kfree(qp->sq.queue); in rxe_init_sq()
230 qp->sq.queue = NULL; in rxe_init_sq()
235 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_init_req() argument
241 /* if we don't finish qp create make sure queue is valid */ in rxe_qp_init_req()
242 skb_queue_head_init(&qp->req_pkts); in rxe_qp_init_req()
244 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); in rxe_qp_init_req()
247 qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index; in rxe_qp_init_req()
249 /* pick a source UDP port number for this QP based on in rxe_qp_init_req()
252 * flow for a given QP to maintain packet order). in rxe_qp_init_req()
256 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); in rxe_qp_init_req()
258 err = rxe_init_sq(qp, init, udata, uresp); in rxe_qp_init_req()
262 qp->req.wqe_index = queue_get_producer(qp->sq.queue, in rxe_qp_init_req()
265 qp->req.opcode = -1; in rxe_qp_init_req()
266 qp->comp.opcode = -1; in rxe_qp_init_req()
268 rxe_init_task(&qp->send_task, qp, rxe_sender); in rxe_qp_init_req()
270 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ in rxe_qp_init_req()
272 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); in rxe_qp_init_req()
273 timer_setup(&qp->retrans_timer, retransmit_timer, 0); in rxe_qp_init_req()
278 static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init, in rxe_init_rq() argument
282 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_init_rq()
286 qp->rq.max_wr = init->cap.max_recv_wr; in rxe_init_rq()
287 qp->rq.max_sge = init->cap.max_recv_sge; in rxe_init_rq()
289 qp->rq.max_sge*sizeof(struct ib_sge); in rxe_init_rq()
291 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size, in rxe_init_rq()
293 if (!qp->rq.queue) { in rxe_init_rq()
294 rxe_err_qp(qp, "Unable to allocate recv queue\n"); in rxe_init_rq()
299 /* prepare info for caller to mmap recv queue if user space qp */ in rxe_init_rq()
301 qp->rq.queue->buf, qp->rq.queue->buf_size, in rxe_init_rq()
302 &qp->rq.queue->ip); in rxe_init_rq()
304 rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err); in rxe_init_rq()
311 init->cap.max_recv_wr = qp->rq.max_wr; in rxe_init_rq()
316 vfree(qp->rq.queue->buf); in rxe_init_rq()
317 kfree(qp->rq.queue); in rxe_init_rq()
318 qp->rq.queue = NULL; in rxe_init_rq()
323 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_init_resp() argument
330 /* if we don't finish qp create make sure queue is valid */ in rxe_qp_init_resp()
331 skb_queue_head_init(&qp->resp_pkts); in rxe_qp_init_resp()
333 if (!qp->srq) { in rxe_qp_init_resp()
334 err = rxe_init_rq(qp, init, udata, uresp); in rxe_qp_init_resp()
339 rxe_init_task(&qp->recv_task, qp, rxe_receiver); in rxe_qp_init_resp()
341 qp->resp.opcode = OPCODE_NONE; in rxe_qp_init_resp()
342 qp->resp.msn = 0; in rxe_qp_init_resp()
347 /* called by the create qp verb */
348 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, in rxe_qp_from_init() argument
366 qp->pd = pd; in rxe_qp_from_init()
367 qp->rcq = rcq; in rxe_qp_from_init()
368 qp->scq = scq; in rxe_qp_from_init()
369 qp->srq = srq; in rxe_qp_from_init()
374 rxe_qp_init_misc(rxe, qp, init); in rxe_qp_from_init()
376 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); in rxe_qp_from_init()
380 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); in rxe_qp_from_init()
384 spin_lock_irqsave(&qp->state_lock, flags); in rxe_qp_from_init()
385 qp->attr.qp_state = IB_QPS_RESET; in rxe_qp_from_init()
386 qp->valid = 1; in rxe_qp_from_init()
387 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_from_init()
392 rxe_queue_cleanup(qp->sq.queue); in rxe_qp_from_init()
393 qp->sq.queue = NULL; in rxe_qp_from_init()
398 qp->pd = NULL; in rxe_qp_from_init()
399 qp->rcq = NULL; in rxe_qp_from_init()
400 qp->scq = NULL; in rxe_qp_from_init()
401 qp->srq = NULL; in rxe_qp_from_init()
412 /* called by the query qp verb */
413 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) in rxe_qp_to_init() argument
415 init->event_handler = qp->ibqp.event_handler; in rxe_qp_to_init()
416 init->qp_context = qp->ibqp.qp_context; in rxe_qp_to_init()
417 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
418 init->recv_cq = qp->ibqp.recv_cq; in rxe_qp_to_init()
419 init->srq = qp->ibqp.srq; in rxe_qp_to_init()
421 init->cap.max_send_wr = qp->sq.max_wr; in rxe_qp_to_init()
422 init->cap.max_send_sge = qp->sq.max_sge; in rxe_qp_to_init()
423 init->cap.max_inline_data = qp->sq.max_inline; in rxe_qp_to_init()
425 if (!qp->srq) { in rxe_qp_to_init()
426 init->cap.max_recv_wr = qp->rq.max_wr; in rxe_qp_to_init()
427 init->cap.max_recv_sge = qp->rq.max_sge; in rxe_qp_to_init()
430 init->sq_sig_type = qp->sq_sig_type; in rxe_qp_to_init()
432 init->qp_type = qp->ibqp.qp_type; in rxe_qp_to_init()
438 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_chk_attr() argument
443 rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num); in rxe_qp_chk_attr()
448 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) in rxe_qp_chk_attr()
452 if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC)) in rxe_qp_chk_attr()
458 if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr)) in rxe_qp_chk_attr()
462 if (rxe_av_chk_attr(qp, &attr->alt_ah_attr)) in rxe_qp_chk_attr()
465 rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num); in rxe_qp_chk_attr()
469 rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n", in rxe_qp_chk_attr()
482 rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n", in rxe_qp_chk_attr()
491 rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n", in rxe_qp_chk_attr()
500 rxe_dbg_qp(qp, "invalid timeout %d > 31\n", in rxe_qp_chk_attr()
512 /* move the qp to the reset state */
513 static void rxe_qp_reset(struct rxe_qp *qp) in rxe_qp_reset() argument
516 rxe_disable_task(&qp->recv_task); in rxe_qp_reset()
517 rxe_disable_task(&qp->send_task); in rxe_qp_reset()
520 rxe_sender(qp); in rxe_qp_reset()
521 rxe_receiver(qp); in rxe_qp_reset()
523 if (qp->rq.queue) in rxe_qp_reset()
524 rxe_queue_reset(qp->rq.queue); in rxe_qp_reset()
525 if (qp->sq.queue) in rxe_qp_reset()
526 rxe_queue_reset(qp->sq.queue); in rxe_qp_reset()
529 atomic_set(&qp->ssn, 0); in rxe_qp_reset()
530 qp->req.opcode = -1; in rxe_qp_reset()
531 qp->req.need_retry = 0; in rxe_qp_reset()
532 qp->req.wait_for_rnr_timer = 0; in rxe_qp_reset()
533 qp->req.noack_pkts = 0; in rxe_qp_reset()
534 qp->resp.msn = 0; in rxe_qp_reset()
535 qp->resp.opcode = -1; in rxe_qp_reset()
536 qp->resp.drop_msg = 0; in rxe_qp_reset()
537 qp->resp.goto_error = 0; in rxe_qp_reset()
538 qp->resp.sent_psn_nak = 0; in rxe_qp_reset()
540 if (qp->resp.mr) { in rxe_qp_reset()
541 rxe_put(qp->resp.mr); in rxe_qp_reset()
542 qp->resp.mr = NULL; in rxe_qp_reset()
545 cleanup_rd_atomic_resources(qp); in rxe_qp_reset()
548 rxe_enable_task(&qp->recv_task); in rxe_qp_reset()
549 rxe_enable_task(&qp->send_task); in rxe_qp_reset()
552 /* move the qp to the error state */
553 void rxe_qp_error(struct rxe_qp *qp) in rxe_qp_error() argument
557 spin_lock_irqsave(&qp->state_lock, flags); in rxe_qp_error()
558 qp->attr.qp_state = IB_QPS_ERR; in rxe_qp_error()
561 rxe_sched_task(&qp->recv_task); in rxe_qp_error()
562 rxe_sched_task(&qp->send_task); in rxe_qp_error()
563 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_error()
566 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, in rxe_qp_sqd() argument
571 spin_lock_irqsave(&qp->state_lock, flags); in rxe_qp_sqd()
572 qp->attr.sq_draining = 1; in rxe_qp_sqd()
573 rxe_sched_task(&qp->send_task); in rxe_qp_sqd()
574 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_sqd()
577 /* caller should hold qp->state_lock */
578 static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr, in __qp_chk_state() argument
585 attr->cur_qp_state : qp->attr.qp_state; in __qp_chk_state()
589 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) in __qp_chk_state()
593 if (qp->attr.sq_draining && new_state != IB_QPS_ERR) in __qp_chk_state()
610 /* called by the modify qp verb */
611 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, in rxe_qp_from_attr() argument
617 qp->attr.cur_qp_state = attr->qp_state; in rxe_qp_from_attr()
622 spin_lock_irqsave(&qp->state_lock, flags); in rxe_qp_from_attr()
623 err = __qp_chk_state(qp, attr, mask); in rxe_qp_from_attr()
625 qp->attr.qp_state = attr->qp_state; in rxe_qp_from_attr()
626 rxe_dbg_qp(qp, "state -> %s\n", in rxe_qp_from_attr()
629 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_from_attr()
636 rxe_qp_reset(qp); in rxe_qp_from_attr()
639 rxe_qp_sqd(qp, attr, mask); in rxe_qp_from_attr()
642 rxe_qp_error(qp); in rxe_qp_from_attr()
653 qp->attr.max_rd_atomic = max_rd_atomic; in rxe_qp_from_attr()
654 atomic_set(&qp->req.rd_atomic, max_rd_atomic); in rxe_qp_from_attr()
661 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; in rxe_qp_from_attr()
663 free_rd_atomic_resources(qp); in rxe_qp_from_attr()
665 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); in rxe_qp_from_attr()
671 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; in rxe_qp_from_attr()
674 qp->attr.qp_access_flags = attr->qp_access_flags; in rxe_qp_from_attr()
677 qp->attr.pkey_index = attr->pkey_index; in rxe_qp_from_attr()
680 qp->attr.port_num = attr->port_num; in rxe_qp_from_attr()
683 qp->attr.qkey = attr->qkey; in rxe_qp_from_attr()
686 rxe_init_av(&attr->ah_attr, &qp->pri_av); in rxe_qp_from_attr()
689 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); in rxe_qp_from_attr()
690 qp->attr.alt_port_num = attr->alt_port_num; in rxe_qp_from_attr()
691 qp->attr.alt_pkey_index = attr->alt_pkey_index; in rxe_qp_from_attr()
692 qp->attr.alt_timeout = attr->alt_timeout; in rxe_qp_from_attr()
696 qp->attr.path_mtu = attr->path_mtu; in rxe_qp_from_attr()
697 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); in rxe_qp_from_attr()
701 qp->attr.timeout = attr->timeout; in rxe_qp_from_attr()
703 qp->qp_timeout_jiffies = 0; in rxe_qp_from_attr()
708 qp->qp_timeout_jiffies = j ? j : 1; in rxe_qp_from_attr()
713 qp->attr.retry_cnt = attr->retry_cnt; in rxe_qp_from_attr()
714 qp->comp.retry_cnt = attr->retry_cnt; in rxe_qp_from_attr()
715 rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt); in rxe_qp_from_attr()
719 qp->attr.rnr_retry = attr->rnr_retry; in rxe_qp_from_attr()
720 qp->comp.rnr_retry = attr->rnr_retry; in rxe_qp_from_attr()
721 rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry); in rxe_qp_from_attr()
725 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); in rxe_qp_from_attr()
726 qp->resp.psn = qp->attr.rq_psn; in rxe_qp_from_attr()
727 rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn); in rxe_qp_from_attr()
731 qp->attr.min_rnr_timer = attr->min_rnr_timer; in rxe_qp_from_attr()
732 rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n", in rxe_qp_from_attr()
737 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); in rxe_qp_from_attr()
738 qp->req.psn = qp->attr.sq_psn; in rxe_qp_from_attr()
739 qp->comp.psn = qp->attr.sq_psn; in rxe_qp_from_attr()
740 rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn); in rxe_qp_from_attr()
744 qp->attr.path_mig_state = attr->path_mig_state; in rxe_qp_from_attr()
747 qp->attr.dest_qp_num = attr->dest_qp_num; in rxe_qp_from_attr()
752 /* called by the query qp verb */
753 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) in rxe_qp_to_attr() argument
757 *attr = qp->attr; in rxe_qp_to_attr()
759 attr->rq_psn = qp->resp.psn; in rxe_qp_to_attr()
760 attr->sq_psn = qp->req.psn; in rxe_qp_to_attr()
762 attr->cap.max_send_wr = qp->sq.max_wr; in rxe_qp_to_attr()
763 attr->cap.max_send_sge = qp->sq.max_sge; in rxe_qp_to_attr()
764 attr->cap.max_inline_data = qp->sq.max_inline; in rxe_qp_to_attr()
766 if (!qp->srq) { in rxe_qp_to_attr()
767 attr->cap.max_recv_wr = qp->rq.max_wr; in rxe_qp_to_attr()
768 attr->cap.max_recv_sge = qp->rq.max_sge; in rxe_qp_to_attr()
771 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); in rxe_qp_to_attr()
772 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); in rxe_qp_to_attr()
777 spin_lock_irqsave(&qp->state_lock, flags); in rxe_qp_to_attr()
778 attr->cur_qp_state = qp_state(qp); in rxe_qp_to_attr()
779 if (qp->attr.sq_draining) { in rxe_qp_to_attr()
780 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_to_attr()
783 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_to_attr()
789 int rxe_qp_chk_destroy(struct rxe_qp *qp) in rxe_qp_chk_destroy() argument
792 * An attempt to destroy a QP while attached to a mcast group in rxe_qp_chk_destroy()
795 if (atomic_read(&qp->mcg_num)) { in rxe_qp_chk_destroy()
796 rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n"); in rxe_qp_chk_destroy()
803 /* called when the last reference to the qp is dropped */
806 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); in rxe_qp_do_cleanup() local
809 spin_lock_irqsave(&qp->state_lock, flags); in rxe_qp_do_cleanup()
810 qp->valid = 0; in rxe_qp_do_cleanup()
811 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_qp_do_cleanup()
812 qp->qp_timeout_jiffies = 0; in rxe_qp_do_cleanup()
814 if (qp_type(qp) == IB_QPT_RC) { in rxe_qp_do_cleanup()
815 del_timer_sync(&qp->retrans_timer); in rxe_qp_do_cleanup()
816 del_timer_sync(&qp->rnr_nak_timer); in rxe_qp_do_cleanup()
819 if (qp->recv_task.func) in rxe_qp_do_cleanup()
820 rxe_cleanup_task(&qp->recv_task); in rxe_qp_do_cleanup()
822 if (qp->send_task.func) in rxe_qp_do_cleanup()
823 rxe_cleanup_task(&qp->send_task); in rxe_qp_do_cleanup()
826 rxe_sender(qp); in rxe_qp_do_cleanup()
827 rxe_receiver(qp); in rxe_qp_do_cleanup()
829 if (qp->sq.queue) in rxe_qp_do_cleanup()
830 rxe_queue_cleanup(qp->sq.queue); in rxe_qp_do_cleanup()
832 if (qp->srq) in rxe_qp_do_cleanup()
833 rxe_put(qp->srq); in rxe_qp_do_cleanup()
835 if (qp->rq.queue) in rxe_qp_do_cleanup()
836 rxe_queue_cleanup(qp->rq.queue); in rxe_qp_do_cleanup()
838 if (qp->scq) { in rxe_qp_do_cleanup()
839 atomic_dec(&qp->scq->num_wq); in rxe_qp_do_cleanup()
840 rxe_put(qp->scq); in rxe_qp_do_cleanup()
843 if (qp->rcq) { in rxe_qp_do_cleanup()
844 atomic_dec(&qp->rcq->num_wq); in rxe_qp_do_cleanup()
845 rxe_put(qp->rcq); in rxe_qp_do_cleanup()
848 if (qp->pd) in rxe_qp_do_cleanup()
849 rxe_put(qp->pd); in rxe_qp_do_cleanup()
851 if (qp->resp.mr) in rxe_qp_do_cleanup()
852 rxe_put(qp->resp.mr); in rxe_qp_do_cleanup()
854 free_rd_atomic_resources(qp); in rxe_qp_do_cleanup()
856 if (qp->sk) { in rxe_qp_do_cleanup()
857 if (qp_type(qp) == IB_QPT_RC) in rxe_qp_do_cleanup()
858 sk_dst_reset(qp->sk->sk); in rxe_qp_do_cleanup()
860 kernel_sock_shutdown(qp->sk, SHUT_RDWR); in rxe_qp_do_cleanup()
861 sock_release(qp->sk); in rxe_qp_do_cleanup()
865 /* called when the last reference to the qp is dropped */
868 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); in rxe_qp_cleanup() local
870 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); in rxe_qp_cleanup()