Lines Matching full:ep
144 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
163 static void deref_qp(struct c4iw_ep *ep) in deref_qp() argument
165 c4iw_qp_rem_ref(&ep->com.qp->ibqp); in deref_qp()
166 clear_bit(QP_REFERENCED, &ep->com.flags); in deref_qp()
167 set_bit(QP_DEREFED, &ep->com.history); in deref_qp()
170 static void ref_qp(struct c4iw_ep *ep) in ref_qp() argument
172 set_bit(QP_REFERENCED, &ep->com.flags); in ref_qp()
173 set_bit(QP_REFED, &ep->com.history); in ref_qp()
174 c4iw_qp_add_ref(&ep->com.qp->ibqp); in ref_qp()
177 static void start_ep_timer(struct c4iw_ep *ep) in start_ep_timer() argument
179 pr_debug("ep %p\n", ep); in start_ep_timer()
180 if (timer_pending(&ep->timer)) { in start_ep_timer()
181 pr_err("%s timer already started! ep %p\n", in start_ep_timer()
182 __func__, ep); in start_ep_timer()
185 clear_bit(TIMEOUT, &ep->com.flags); in start_ep_timer()
186 c4iw_get_ep(&ep->com); in start_ep_timer()
187 ep->timer.expires = jiffies + ep_timeout_secs * HZ; in start_ep_timer()
188 add_timer(&ep->timer); in start_ep_timer()
191 static int stop_ep_timer(struct c4iw_ep *ep) in stop_ep_timer() argument
193 pr_debug("ep %p stopping\n", ep); in stop_ep_timer()
194 del_timer_sync(&ep->timer); in stop_ep_timer()
195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { in stop_ep_timer()
196 c4iw_put_ep(&ep->com); in stop_ep_timer()
248 static void set_emss(struct c4iw_ep *ep, u16 opt) in set_emss() argument
250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - in set_emss()
251 ((AF_INET == ep->com.remote_addr.ss_family) ? in set_emss()
254 ep->mss = ep->emss; in set_emss()
256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); in set_emss()
257 if (ep->emss < 128) in set_emss()
258 ep->emss = 128; in set_emss()
259 if (ep->emss & 7) in set_emss()
261 TCPOPT_MSS_G(opt), ep->mss, ep->emss); in set_emss()
262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss, in set_emss()
263 ep->emss); in set_emss()
325 pr_debug("alloc ep %p\n", epc); in alloc_ep()
330 static void remove_ep_tid(struct c4iw_ep *ep) in remove_ep_tid() argument
334 xa_lock_irqsave(&ep->com.dev->hwtids, flags); in remove_ep_tid()
335 __xa_erase(&ep->com.dev->hwtids, ep->hwtid); in remove_ep_tid()
336 if (xa_empty(&ep->com.dev->hwtids)) in remove_ep_tid()
337 wake_up(&ep->com.dev->wait); in remove_ep_tid()
338 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); in remove_ep_tid()
341 static int insert_ep_tid(struct c4iw_ep *ep) in insert_ep_tid() argument
346 xa_lock_irqsave(&ep->com.dev->hwtids, flags); in insert_ep_tid()
347 err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL); in insert_ep_tid()
348 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); in insert_ep_tid()
354 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
358 struct c4iw_ep *ep; in get_ep_from_tid() local
362 ep = xa_load(&dev->hwtids, tid); in get_ep_from_tid()
363 if (ep) in get_ep_from_tid()
364 c4iw_get_ep(&ep->com); in get_ep_from_tid()
366 return ep; in get_ep_from_tid()
370 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
375 struct c4iw_listen_ep *ep; in get_ep_from_stid() local
379 ep = xa_load(&dev->stids, stid); in get_ep_from_stid()
380 if (ep) in get_ep_from_stid()
381 c4iw_get_ep(&ep->com); in get_ep_from_stid()
383 return ep; in get_ep_from_stid()
388 struct c4iw_ep *ep; in _c4iw_free_ep() local
390 ep = container_of(kref, struct c4iw_ep, com.kref); in _c4iw_free_ep()
391 pr_debug("ep %p state %s\n", ep, states[ep->com.state]); in _c4iw_free_ep()
392 if (test_bit(QP_REFERENCED, &ep->com.flags)) in _c4iw_free_ep()
393 deref_qp(ep); in _c4iw_free_ep()
394 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { in _c4iw_free_ep()
395 if (ep->com.remote_addr.ss_family == AF_INET6) { in _c4iw_free_ep()
398 &ep->com.local_addr; in _c4iw_free_ep()
401 ep->com.dev->rdev.lldi.ports[0], in _c4iw_free_ep()
405 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, in _c4iw_free_ep()
406 ep->com.local_addr.ss_family); in _c4iw_free_ep()
407 dst_release(ep->dst); in _c4iw_free_ep()
408 cxgb4_l2t_release(ep->l2t); in _c4iw_free_ep()
409 kfree_skb(ep->mpa_skb); in _c4iw_free_ep()
411 if (!skb_queue_empty(&ep->com.ep_skb_list)) in _c4iw_free_ep()
412 skb_queue_purge(&ep->com.ep_skb_list); in _c4iw_free_ep()
413 c4iw_put_wr_wait(ep->com.wr_waitp); in _c4iw_free_ep()
414 kfree(ep); in _c4iw_free_ep()
417 static void release_ep_resources(struct c4iw_ep *ep) in release_ep_resources() argument
419 set_bit(RELEASE_RESOURCES, &ep->com.flags); in release_ep_resources()
424 * we have a race where one thread finds the ep ptr just in release_ep_resources()
425 * before the other thread is freeing the ep memory. in release_ep_resources()
427 if (ep->hwtid != -1) in release_ep_resources()
428 remove_ep_tid(ep); in release_ep_resources()
429 c4iw_put_ep(&ep->com); in release_ep_resources()
494 struct c4iw_ep *ep; in _put_ep_safe() local
496 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); in _put_ep_safe()
497 release_ep_resources(ep); in _put_ep_safe()
503 struct c4iw_ep *ep; in _put_pass_ep_safe() local
505 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); in _put_pass_ep_safe()
506 c4iw_put_ep(&ep->parent_ep->com); in _put_pass_ep_safe()
507 release_ep_resources(ep); in _put_pass_ep_safe()
513 * _put_ep_safe() in a safe context to free the ep resources. This is needed
517 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, in queue_arp_failure_cpl() argument
526 * Save ep in the skb->cb area, after where sched() will save the dev in queue_arp_failure_cpl()
529 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; in queue_arp_failure_cpl()
530 sched(ep->com.dev, skb); in queue_arp_failure_cpl()
536 struct c4iw_ep *ep = handle; in pass_accept_rpl_arp_failure() local
539 ep->hwtid); in pass_accept_rpl_arp_failure()
541 __state_set(&ep->com, DEAD); in pass_accept_rpl_arp_failure()
542 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); in pass_accept_rpl_arp_failure()
550 struct c4iw_ep *ep = handle; in act_open_req_arp_failure() local
553 connect_reply_upcall(ep, -EHOSTUNREACH); in act_open_req_arp_failure()
554 __state_set(&ep->com, DEAD); in act_open_req_arp_failure()
555 if (ep->com.remote_addr.ss_family == AF_INET6) { in act_open_req_arp_failure()
557 (struct sockaddr_in6 *)&ep->com.local_addr; in act_open_req_arp_failure()
558 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], in act_open_req_arp_failure()
561 xa_erase_irq(&ep->com.dev->atids, ep->atid); in act_open_req_arp_failure()
562 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); in act_open_req_arp_failure()
563 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); in act_open_req_arp_failure()
573 struct c4iw_ep *ep = handle; in abort_arp_failure() local
574 struct c4iw_rdev *rdev = &ep->com.dev->rdev; in abort_arp_failure()
582 __state_set(&ep->com, DEAD); in abort_arp_failure()
583 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); in abort_arp_failure()
588 static int send_flowc(struct c4iw_ep *ep) in send_flowc() argument
591 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); in send_flowc()
592 u16 vlan = ep->l2t->vlan; in send_flowc()
614 FW_WR_FLOWID_V(ep->hwtid)); in send_flowc()
618 (ep->com.dev->rdev.lldi.pf)); in send_flowc()
620 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); in send_flowc()
622 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); in send_flowc()
624 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); in send_flowc()
626 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); in send_flowc()
628 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); in send_flowc()
630 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); in send_flowc()
632 flowc->mnemval[7].val = cpu_to_be32(ep->emss); in send_flowc()
634 flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale); in send_flowc()
642 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_flowc()
643 return c4iw_ofld_send(&ep->com.dev->rdev, skb); in send_flowc()
646 static int send_halfclose(struct c4iw_ep *ep) in send_halfclose() argument
648 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); in send_halfclose()
651 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in send_halfclose()
655 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, in send_halfclose()
658 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in send_halfclose()
661 static void read_tcb(struct c4iw_ep *ep) in read_tcb() argument
671 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); in read_tcb()
674 INIT_TP_WR(req, ep->hwtid); in read_tcb()
675 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid)); in read_tcb()
676 req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid)); in read_tcb()
679 * keep a ref on the ep so the tcb is not unlocked before this in read_tcb()
682 c4iw_get_ep(&ep->com); in read_tcb()
683 if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb))) in read_tcb()
684 c4iw_put_ep(&ep->com); in read_tcb()
687 static int send_abort_req(struct c4iw_ep *ep) in send_abort_req() argument
690 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); in send_abort_req()
692 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in send_abort_req()
696 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx, in send_abort_req()
697 ep, abort_arp_failure); in send_abort_req()
699 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); in send_abort_req()
702 static int send_abort(struct c4iw_ep *ep) in send_abort() argument
704 if (!ep->com.qp || !ep->com.qp->srq) { in send_abort()
705 send_abort_req(ep); in send_abort()
708 set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags); in send_abort()
709 read_tcb(ep); in send_abort()
713 static int send_connect(struct c4iw_ep *ep) in send_connect() argument
728 &ep->com.local_addr; in send_connect()
730 &ep->com.remote_addr; in send_connect()
732 &ep->com.local_addr; in send_connect()
734 &ep->com.remote_addr; in send_connect()
736 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; in send_connect()
741 netdev = ep->com.dev->rdev.lldi.ports[0]; in send_connect()
762 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? in send_connect()
766 pr_debug("ep %p atid %u\n", ep, ep->atid); in send_connect()
773 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); in send_connect()
775 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, in send_connect()
777 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); in send_connect()
784 win = ep->rcv_win >> 10; in send_connect()
793 L2T_IDX_V(ep->l2t->idx) | in send_connect()
794 TX_CHAN_V(ep->tx_chan) | in send_connect()
795 SMAC_SEL_V(ep->smac_idx) | in send_connect()
796 DSCP_V(ep->tos >> 2) | in send_connect()
801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); in send_connect()
817 params = cxgb4_select_ntuple(netdev, ep->l2t); in send_connect()
819 if (ep->com.remote_addr.ss_family == AF_INET6) in send_connect()
820 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], in send_connect()
823 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); in send_connect()
825 if (ep->com.remote_addr.ss_family == AF_INET) { in send_connect()
850 ((ep->rss_qid<<14) | ep->atid))); in send_connect()
857 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { in send_connect()
861 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { in send_connect()
900 ((ep->rss_qid<<14)|ep->atid))); in send_connect()
909 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { in send_connect()
911 ep->l2t)); in send_connect()
914 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { in send_connect()
931 set_bit(ACT_OPEN_REQ, &ep->com.history); in send_connect()
932 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in send_connect()
934 if (ret && ep->com.remote_addr.ss_family == AF_INET6) in send_connect()
935 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], in send_connect()
940 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, in send_mpa_req() argument
948 pr_debug("ep %p tid %u pd_len %d\n", in send_mpa_req()
949 ep, ep->hwtid, ep->plen); in send_mpa_req()
951 mpalen = sizeof(*mpa) + ep->plen; in send_mpa_req()
957 connect_reply_upcall(ep, -ENOMEM); in send_mpa_req()
960 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_req()
968 FW_WR_FLOWID_V(ep->hwtid) | in send_mpa_req()
983 ep->mpa_attr.recv_marker_enabled = 1; in send_mpa_req()
985 ep->mpa_attr.recv_marker_enabled = 0; in send_mpa_req()
990 mpa->private_data_size = htons(ep->plen); in send_mpa_req()
993 ep->tried_with_mpa_v1 = 1; in send_mpa_req()
994 ep->retry_with_mpa_v1 = 0; in send_mpa_req()
1001 pr_debug("initiator ird %u ord %u\n", ep->ird, in send_mpa_req()
1002 ep->ord); in send_mpa_req()
1003 mpa_v2_params.ird = htons((u16)ep->ird); in send_mpa_req()
1004 mpa_v2_params.ord = htons((u16)ep->ord); in send_mpa_req()
1018 if (ep->plen) in send_mpa_req()
1021 ep->mpa_pkt + sizeof(*mpa), ep->plen); in send_mpa_req()
1023 if (ep->plen) in send_mpa_req()
1025 ep->mpa_pkt + sizeof(*mpa), ep->plen); in send_mpa_req()
1034 ep->mpa_skb = skb; in send_mpa_req()
1035 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in send_mpa_req()
1038 start_ep_timer(ep); in send_mpa_req()
1039 __state_set(&ep->com, MPA_REQ_SENT); in send_mpa_req()
1040 ep->mpa_attr.initiator = 1; in send_mpa_req()
1041 ep->snd_seq += mpalen; in send_mpa_req()
1045 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) in send_mpa_reject() argument
1053 pr_debug("ep %p tid %u pd_len %d\n", in send_mpa_reject()
1054 ep, ep->hwtid, ep->plen); in send_mpa_reject()
1057 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) in send_mpa_reject()
1066 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_reject()
1074 FW_WR_FLOWID_V(ep->hwtid) | in send_mpa_reject()
1085 mpa->revision = ep->mpa_attr.version; in send_mpa_reject()
1088 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { in send_mpa_reject()
1093 mpa_v2_params.ird = htons(((u16)ep->ird) | in send_mpa_reject()
1096 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? in send_mpa_reject()
1105 if (ep->plen) in send_mpa_reject()
1118 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_reject()
1120 ep->mpa_skb = skb; in send_mpa_reject()
1121 ep->snd_seq += mpalen; in send_mpa_reject()
1122 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in send_mpa_reject()
1125 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) in send_mpa_reply() argument
1133 pr_debug("ep %p tid %u pd_len %d\n", in send_mpa_reply()
1134 ep, ep->hwtid, ep->plen); in send_mpa_reply()
1137 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) in send_mpa_reply()
1146 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in send_mpa_reply()
1154 FW_WR_FLOWID_V(ep->hwtid) | in send_mpa_reply()
1165 if (ep->mpa_attr.crc_enabled) in send_mpa_reply()
1167 if (ep->mpa_attr.recv_marker_enabled) in send_mpa_reply()
1169 mpa->revision = ep->mpa_attr.version; in send_mpa_reply()
1172 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { in send_mpa_reply()
1177 mpa_v2_params.ird = htons((u16)ep->ird); in send_mpa_reply()
1178 mpa_v2_params.ord = htons((u16)ep->ord); in send_mpa_reply()
1179 if (peer2peer && (ep->mpa_attr.p2p_type != in send_mpa_reply()
1194 if (ep->plen) in send_mpa_reply()
1208 ep->mpa_skb = skb; in send_mpa_reply()
1209 __state_set(&ep->com, MPA_REP_SENT); in send_mpa_reply()
1210 ep->snd_seq += mpalen; in send_mpa_reply()
1211 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in send_mpa_reply()
1216 struct c4iw_ep *ep; in act_establish() local
1224 ep = lookup_atid(t, atid); in act_establish()
1225 if (!ep) in act_establish()
1228 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, in act_establish()
1231 mutex_lock(&ep->com.mutex); in act_establish()
1232 dst_confirm(ep->dst); in act_establish()
1235 ep->hwtid = tid; in act_establish()
1236 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); in act_establish()
1237 insert_ep_tid(ep); in act_establish()
1239 ep->snd_seq = be32_to_cpu(req->snd_isn); in act_establish()
1240 ep->rcv_seq = be32_to_cpu(req->rcv_isn); in act_establish()
1241 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); in act_establish()
1243 set_emss(ep, tcp_opt); in act_establish()
1246 xa_erase_irq(&ep->com.dev->atids, atid); in act_establish()
1248 set_bit(ACT_ESTAB, &ep->com.history); in act_establish()
1251 ret = send_flowc(ep); in act_establish()
1254 if (ep->retry_with_mpa_v1) in act_establish()
1255 ret = send_mpa_req(ep, skb, 1); in act_establish()
1257 ret = send_mpa_req(ep, skb, mpa_rev); in act_establish()
1260 mutex_unlock(&ep->com.mutex); in act_establish()
1263 mutex_unlock(&ep->com.mutex); in act_establish()
1264 connect_reply_upcall(ep, -ENOMEM); in act_establish()
1265 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); in act_establish()
1269 static void close_complete_upcall(struct c4iw_ep *ep, int status) in close_complete_upcall() argument
1273 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in close_complete_upcall()
1277 if (ep->com.cm_id) { in close_complete_upcall()
1278 pr_debug("close complete delivered ep %p cm_id %p tid %u\n", in close_complete_upcall()
1279 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
1280 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
1281 deref_cm_id(&ep->com); in close_complete_upcall()
1282 set_bit(CLOSE_UPCALL, &ep->com.history); in close_complete_upcall()
1286 static void peer_close_upcall(struct c4iw_ep *ep) in peer_close_upcall() argument
1290 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in peer_close_upcall()
1293 if (ep->com.cm_id) { in peer_close_upcall()
1294 pr_debug("peer close delivered ep %p cm_id %p tid %u\n", in peer_close_upcall()
1295 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
1296 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
1297 set_bit(DISCONN_UPCALL, &ep->com.history); in peer_close_upcall()
1301 static void peer_abort_upcall(struct c4iw_ep *ep) in peer_abort_upcall() argument
1305 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in peer_abort_upcall()
1309 if (ep->com.cm_id) { in peer_abort_upcall()
1310 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep, in peer_abort_upcall()
1311 ep->com.cm_id, ep->hwtid); in peer_abort_upcall()
1312 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_abort_upcall()
1313 deref_cm_id(&ep->com); in peer_abort_upcall()
1314 set_bit(ABORT_UPCALL, &ep->com.history); in peer_abort_upcall()
1318 static void connect_reply_upcall(struct c4iw_ep *ep, int status) in connect_reply_upcall() argument
1322 pr_debug("ep %p tid %u status %d\n", in connect_reply_upcall()
1323 ep, ep->hwtid, status); in connect_reply_upcall()
1327 memcpy(&event.local_addr, &ep->com.local_addr, in connect_reply_upcall()
1328 sizeof(ep->com.local_addr)); in connect_reply_upcall()
1329 memcpy(&event.remote_addr, &ep->com.remote_addr, in connect_reply_upcall()
1330 sizeof(ep->com.remote_addr)); in connect_reply_upcall()
1333 if (!ep->tried_with_mpa_v1) { in connect_reply_upcall()
1335 event.ord = ep->ird; in connect_reply_upcall()
1336 event.ird = ep->ord; in connect_reply_upcall()
1337 event.private_data_len = ep->plen - in connect_reply_upcall()
1339 event.private_data = ep->mpa_pkt + in connect_reply_upcall()
1344 event.ord = cur_max_read_depth(ep->com.dev); in connect_reply_upcall()
1345 event.ird = cur_max_read_depth(ep->com.dev); in connect_reply_upcall()
1346 event.private_data_len = ep->plen; in connect_reply_upcall()
1347 event.private_data = ep->mpa_pkt + in connect_reply_upcall()
1352 pr_debug("ep %p tid %u status %d\n", ep, in connect_reply_upcall()
1353 ep->hwtid, status); in connect_reply_upcall()
1354 set_bit(CONN_RPL_UPCALL, &ep->com.history); in connect_reply_upcall()
1355 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in connect_reply_upcall()
1358 deref_cm_id(&ep->com); in connect_reply_upcall()
1361 static int connect_request_upcall(struct c4iw_ep *ep) in connect_request_upcall() argument
1366 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in connect_request_upcall()
1369 memcpy(&event.local_addr, &ep->com.local_addr, in connect_request_upcall()
1370 sizeof(ep->com.local_addr)); in connect_request_upcall()
1371 memcpy(&event.remote_addr, &ep->com.remote_addr, in connect_request_upcall()
1372 sizeof(ep->com.remote_addr)); in connect_request_upcall()
1373 event.provider_data = ep; in connect_request_upcall()
1374 if (!ep->tried_with_mpa_v1) { in connect_request_upcall()
1376 event.ord = ep->ord; in connect_request_upcall()
1377 event.ird = ep->ird; in connect_request_upcall()
1378 event.private_data_len = ep->plen - in connect_request_upcall()
1380 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + in connect_request_upcall()
1384 event.ord = cur_max_read_depth(ep->com.dev); in connect_request_upcall()
1385 event.ird = cur_max_read_depth(ep->com.dev); in connect_request_upcall()
1386 event.private_data_len = ep->plen; in connect_request_upcall()
1387 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); in connect_request_upcall()
1389 c4iw_get_ep(&ep->com); in connect_request_upcall()
1390 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, in connect_request_upcall()
1393 c4iw_put_ep(&ep->com); in connect_request_upcall()
1394 set_bit(CONNREQ_UPCALL, &ep->com.history); in connect_request_upcall()
1395 c4iw_put_ep(&ep->parent_ep->com); in connect_request_upcall()
1399 static void established_upcall(struct c4iw_ep *ep) in established_upcall() argument
1403 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in established_upcall()
1406 event.ird = ep->ord; in established_upcall()
1407 event.ord = ep->ird; in established_upcall()
1408 if (ep->com.cm_id) { in established_upcall()
1409 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in established_upcall()
1410 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in established_upcall()
1411 set_bit(ESTAB_UPCALL, &ep->com.history); in established_upcall()
1415 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) in update_rx_credits() argument
1421 pr_debug("ep %p tid %u credits %u\n", in update_rx_credits()
1422 ep, ep->hwtid, credits); in update_rx_credits()
1434 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) in update_rx_credits()
1435 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; in update_rx_credits()
1440 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx, in update_rx_credits()
1443 c4iw_ofld_send(&ep->com.dev->rdev, skb); in update_rx_credits()
1461 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) in process_mpa_reply() argument
1473 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in process_mpa_reply()
1479 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { in process_mpa_reply()
1487 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), in process_mpa_reply()
1489 ep->mpa_pkt_len += skb->len; in process_mpa_reply()
1494 if (ep->mpa_pkt_len < sizeof(*mpa)) in process_mpa_reply()
1496 mpa = (struct mpa_message *) ep->mpa_pkt; in process_mpa_reply()
1523 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { in process_mpa_reply()
1528 ep->plen = (u8) plen; in process_mpa_reply()
1534 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) in process_mpa_reply()
1547 if (stop_ep_timer(ep)) in process_mpa_reply()
1555 __state_set(&ep->com, FPDU_MODE); in process_mpa_reply()
1556 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; in process_mpa_reply()
1557 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; in process_mpa_reply()
1558 ep->mpa_attr.version = mpa->revision; in process_mpa_reply()
1559 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; in process_mpa_reply()
1562 ep->mpa_attr.enhanced_rdma_conn = in process_mpa_reply()
1564 if (ep->mpa_attr.enhanced_rdma_conn) { in process_mpa_reply()
1566 (ep->mpa_pkt + sizeof(*mpa)); in process_mpa_reply()
1571 pr_debug("responder ird %u ord %u ep ird %u ord %u\n", in process_mpa_reply()
1572 resp_ird, resp_ord, ep->ird, ep->ord); in process_mpa_reply()
1579 if (ep->ird < resp_ord) { in process_mpa_reply()
1581 ep->com.dev->rdev.lldi.max_ordird_qp) in process_mpa_reply()
1582 ep->ird = resp_ord; in process_mpa_reply()
1585 } else if (ep->ird > resp_ord) { in process_mpa_reply()
1586 ep->ird = resp_ord; in process_mpa_reply()
1588 if (ep->ord > resp_ird) { in process_mpa_reply()
1590 ep->ord = resp_ird; in process_mpa_reply()
1596 ep->ird = resp_ord; in process_mpa_reply()
1597 ep->ord = resp_ird; in process_mpa_reply()
1604 ep->mpa_attr.p2p_type = in process_mpa_reply()
1608 ep->mpa_attr.p2p_type = in process_mpa_reply()
1614 ep->mpa_attr.p2p_type = p2p_type; in process_mpa_reply()
1617 ep->mpa_attr.crc_enabled, in process_mpa_reply()
1618 ep->mpa_attr.recv_marker_enabled, in process_mpa_reply()
1619 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, in process_mpa_reply()
1620 ep->mpa_attr.p2p_type, p2p_type); in process_mpa_reply()
1628 if ((ep->mpa_attr.version == 2) && peer2peer && in process_mpa_reply()
1629 (ep->mpa_attr.p2p_type != p2p_type)) { in process_mpa_reply()
1630 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; in process_mpa_reply()
1634 attrs.mpa_attr = ep->mpa_attr; in process_mpa_reply()
1635 attrs.max_ird = ep->ird; in process_mpa_reply()
1636 attrs.max_ord = ep->ord; in process_mpa_reply()
1637 attrs.llp_stream_handle = ep; in process_mpa_reply()
1645 err = c4iw_modify_qp(ep->com.qp->rhp, in process_mpa_reply()
1646 ep->com.qp, mask, &attrs, 1); in process_mpa_reply()
1660 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply()
1679 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply()
1687 stop_ep_timer(ep); in process_mpa_reply()
1691 connect_reply_upcall(ep, err); in process_mpa_reply()
1707 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) in process_mpa_request() argument
1713 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in process_mpa_request()
1719 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) in process_mpa_request()
1727 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), in process_mpa_request()
1729 ep->mpa_pkt_len += skb->len; in process_mpa_request()
1735 if (ep->mpa_pkt_len < sizeof(*mpa)) in process_mpa_request()
1739 mpa = (struct mpa_message *) ep->mpa_pkt; in process_mpa_request()
1764 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) in process_mpa_request()
1766 ep->plen = (u8) plen; in process_mpa_request()
1771 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) in process_mpa_request()
1778 ep->mpa_attr.initiator = 0; in process_mpa_request()
1779 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; in process_mpa_request()
1780 ep->mpa_attr.recv_marker_enabled = markers_enabled; in process_mpa_request()
1781 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; in process_mpa_request()
1782 ep->mpa_attr.version = mpa->revision; in process_mpa_request()
1784 ep->tried_with_mpa_v1 = 1; in process_mpa_request()
1785 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; in process_mpa_request()
1788 ep->mpa_attr.enhanced_rdma_conn = in process_mpa_request()
1790 if (ep->mpa_attr.enhanced_rdma_conn) { in process_mpa_request()
1792 (ep->mpa_pkt + sizeof(*mpa)); in process_mpa_request()
1793 ep->ird = ntohs(mpa_v2_params->ird) & in process_mpa_request()
1795 ep->ird = min_t(u32, ep->ird, in process_mpa_request()
1796 cur_max_read_depth(ep->com.dev)); in process_mpa_request()
1797 ep->ord = ntohs(mpa_v2_params->ord) & in process_mpa_request()
1799 ep->ord = min_t(u32, ep->ord, in process_mpa_request()
1800 cur_max_read_depth(ep->com.dev)); in process_mpa_request()
1802 ep->ird, ep->ord); in process_mpa_request()
1807 ep->mpa_attr.p2p_type = in process_mpa_request()
1811 ep->mpa_attr.p2p_type = in process_mpa_request()
1817 ep->mpa_attr.p2p_type = p2p_type; in process_mpa_request()
1820 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, in process_mpa_request()
1821 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, in process_mpa_request()
1822 ep->mpa_attr.p2p_type); in process_mpa_request()
1824 __state_set(&ep->com, MPA_REQ_RCVD); in process_mpa_request()
1827 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); in process_mpa_request()
1828 if (ep->parent_ep->com.state != DEAD) { in process_mpa_request()
1829 if (connect_request_upcall(ep)) in process_mpa_request()
1834 mutex_unlock(&ep->parent_ep->com.mutex); in process_mpa_request()
1838 mutex_unlock(&ep->parent_ep->com.mutex); in process_mpa_request()
1841 (void)stop_ep_timer(ep); in process_mpa_request()
1848 struct c4iw_ep *ep; in rx_data() local
1855 ep = get_ep_from_tid(dev, tid); in rx_data()
1856 if (!ep) in rx_data()
1858 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen); in rx_data()
1861 mutex_lock(&ep->com.mutex); in rx_data()
1863 switch (ep->com.state) { in rx_data()
1865 update_rx_credits(ep, dlen); in rx_data()
1866 ep->rcv_seq += dlen; in rx_data()
1867 disconnect = process_mpa_reply(ep, skb); in rx_data()
1870 update_rx_credits(ep, dlen); in rx_data()
1871 ep->rcv_seq += dlen; in rx_data()
1872 disconnect = process_mpa_request(ep, skb); in rx_data()
1877 update_rx_credits(ep, dlen); in rx_data()
1880 " qpid %u ep %p state %d tid %u status %d\n", in rx_data()
1881 __func__, ep->com.qp->wq.sq.qid, ep, in rx_data()
1882 ep->com.state, ep->hwtid, status); in rx_data()
1884 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in rx_data()
1892 mutex_unlock(&ep->com.mutex); in rx_data()
1894 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); in rx_data()
1895 c4iw_put_ep(&ep->com); in rx_data()
1899 static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx) in complete_cached_srq_buffers() argument
1903 adapter_type = ep->com.dev->rdev.lldi.adapter_type; in complete_cached_srq_buffers()
1912 if (ep->com.qp->ibqp.uobject) in complete_cached_srq_buffers()
1913 t4_set_wq_in_error(&ep->com.qp->wq, srqidx); in complete_cached_srq_buffers()
1915 c4iw_flush_srqidx(ep->com.qp, srqidx); in complete_cached_srq_buffers()
1922 struct c4iw_ep *ep; in abort_rpl() local
1927 ep = get_ep_from_tid(dev, tid); in abort_rpl()
1928 if (!ep) { in abort_rpl()
1933 if (ep->com.qp && ep->com.qp->srq) { in abort_rpl()
1935 complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx); in abort_rpl()
1938 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in abort_rpl()
1939 mutex_lock(&ep->com.mutex); in abort_rpl()
1940 switch (ep->com.state) { in abort_rpl()
1942 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); in abort_rpl()
1943 __state_set(&ep->com, DEAD); in abort_rpl()
1947 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state); in abort_rpl()
1950 mutex_unlock(&ep->com.mutex); in abort_rpl()
1953 close_complete_upcall(ep, -ECONNRESET); in abort_rpl()
1954 release_ep_resources(ep); in abort_rpl()
1956 c4iw_put_ep(&ep->com); in abort_rpl()
1960 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) in send_fw_act_open_req() argument
1977 ep->com.dev->rdev.lldi.ports[0], in send_fw_act_open_req()
1978 ep->l2t)); in send_fw_act_open_req()
1979 sin = (struct sockaddr_in *)&ep->com.local_addr; in send_fw_act_open_req()
1982 sin = (struct sockaddr_in *)&ep->com.remote_addr; in send_fw_act_open_req()
1992 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, in send_fw_act_open_req()
1994 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); in send_fw_act_open_req()
2001 win = ep->rcv_win >> 10; in send_fw_act_open_req()
2011 L2T_IDX_V(ep->l2t->idx) | in send_fw_act_open_req()
2012 TX_CHAN_V(ep->tx_chan) | in send_fw_act_open_req()
2013 SMAC_SEL_V(ep->smac_idx) | in send_fw_act_open_req()
2014 DSCP_V(ep->tos >> 2) | in send_fw_act_open_req()
2018 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | in send_fw_act_open_req()
2021 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); in send_fw_act_open_req()
2030 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); in send_fw_act_open_req()
2031 set_bit(ACT_OFLD_CONN, &ep->com.history); in send_fw_act_open_req()
2032 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in send_fw_act_open_req()
2063 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) in set_tcp_window() argument
2065 ep->snd_win = snd_win; in set_tcp_window()
2066 ep->rcv_win = rcv_win; in set_tcp_window()
2068 ep->snd_win, ep->rcv_win); in set_tcp_window()
2073 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, in import_ep() argument
2106 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, in import_ep()
2108 if (!ep->l2t) in import_ep()
2110 ep->mtu = pdev->mtu; in import_ep()
2111 ep->tx_chan = cxgb4_port_chan(pdev); in import_ep()
2112 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; in import_ep()
2115 ep->txq_idx = cxgb4_port_idx(pdev) * step; in import_ep()
2118 ep->ctrlq_idx = cxgb4_port_idx(pdev); in import_ep()
2119 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ in import_ep()
2121 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); in import_ep()
2124 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, in import_ep()
2126 if (!ep->l2t) in import_ep()
2128 ep->mtu = dst_mtu(dst); in import_ep()
2129 ep->tx_chan = cxgb4_port_chan(pdev); in import_ep()
2130 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; in import_ep()
2133 ep->txq_idx = cxgb4_port_idx(pdev) * step; in import_ep()
2134 ep->ctrlq_idx = cxgb4_port_idx(pdev); in import_ep()
2137 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ in import_ep()
2139 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); in import_ep()
2142 ep->retry_with_mpa_v1 = 0; in import_ep()
2143 ep->tried_with_mpa_v1 = 0; in import_ep()
2155 static int c4iw_reconnect(struct c4iw_ep *ep) in c4iw_reconnect() argument
2160 &ep->com.cm_id->m_local_addr; in c4iw_reconnect()
2162 &ep->com.cm_id->m_remote_addr; in c4iw_reconnect()
2164 &ep->com.cm_id->m_local_addr; in c4iw_reconnect()
2166 &ep->com.cm_id->m_remote_addr; in c4iw_reconnect()
2170 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id); in c4iw_reconnect()
2171 c4iw_init_wr_wait(ep->com.wr_waitp); in c4iw_reconnect()
2174 * tries to reconnect with MPA_rev 1 for the same EP through in c4iw_reconnect()
2175 * c4iw_reconnect(), where the same EP is assigned with new tid for in c4iw_reconnect()
2176 * further connection establishment. As we are using the same EP pointer in c4iw_reconnect()
2178 * which leaves the EP with inadequate skbs for further in c4iw_reconnect()
2182 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); in c4iw_reconnect()
2183 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { in c4iw_reconnect()
2191 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); in c4iw_reconnect()
2192 if (ep->atid == -1) { in c4iw_reconnect()
2197 err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL); in c4iw_reconnect()
2202 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { in c4iw_reconnect()
2203 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev, in c4iw_reconnect()
2207 raddr->sin_port, ep->com.cm_id->tos); in c4iw_reconnect()
2211 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi, in c4iw_reconnect()
2217 ep->com.cm_id->tos, in c4iw_reconnect()
2222 if (!ep->dst) { in c4iw_reconnect()
2227 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, in c4iw_reconnect()
2228 ep->com.dev->rdev.lldi.adapter_type, in c4iw_reconnect()
2229 ep->com.cm_id->tos); in c4iw_reconnect()
2236 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, in c4iw_reconnect()
2237 ep->l2t->idx); in c4iw_reconnect()
2239 state_set(&ep->com, CONNECTING); in c4iw_reconnect()
2240 ep->tos = ep->com.cm_id->tos; in c4iw_reconnect()
2243 err = send_connect(ep); in c4iw_reconnect()
2247 cxgb4_l2t_release(ep->l2t); in c4iw_reconnect()
2249 dst_release(ep->dst); in c4iw_reconnect()
2251 xa_erase_irq(&ep->com.dev->atids, ep->atid); in c4iw_reconnect()
2253 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); in c4iw_reconnect()
2261 connect_reply_upcall(ep, -ECONNRESET); in c4iw_reconnect()
2263 c4iw_put_ep(&ep->com); in c4iw_reconnect()
2270 struct c4iw_ep *ep; in act_open_rpl() local
2282 ep = lookup_atid(t, atid); in act_open_rpl()
2283 if (!ep) in act_open_rpl()
2286 la = (struct sockaddr_in *)&ep->com.local_addr; in act_open_rpl()
2287 ra = (struct sockaddr_in *)&ep->com.remote_addr; in act_open_rpl()
2288 la6 = (struct sockaddr_in6 *)&ep->com.local_addr; in act_open_rpl()
2289 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; in act_open_rpl()
2291 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid, in act_open_rpl()
2297 ep->stats.connect_neg_adv++; in act_open_rpl()
2304 set_bit(ACT_OPEN_RPL, &ep->com.history); in act_open_rpl()
2317 if (ep->com.local_addr.ss_family == AF_INET && in act_open_rpl()
2319 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( in act_open_rpl()
2327 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { in act_open_rpl()
2328 set_bit(ACT_RETRY_INUSE, &ep->com.history); in act_open_rpl()
2329 if (ep->com.remote_addr.ss_family == AF_INET6) { in act_open_rpl()
2332 &ep->com.local_addr; in act_open_rpl()
2334 ep->com.dev->rdev.lldi.ports[0], in act_open_rpl()
2338 xa_erase_irq(&ep->com.dev->atids, atid); in act_open_rpl()
2340 dst_release(ep->dst); in act_open_rpl()
2341 cxgb4_l2t_release(ep->l2t); in act_open_rpl()
2342 c4iw_reconnect(ep); in act_open_rpl()
2347 if (ep->com.local_addr.ss_family == AF_INET) { in act_open_rpl()
2362 connect_reply_upcall(ep, status2errno(status)); in act_open_rpl()
2363 state_set(&ep->com, DEAD); in act_open_rpl()
2365 if (ep->com.remote_addr.ss_family == AF_INET6) { in act_open_rpl()
2367 (struct sockaddr_in6 *)&ep->com.local_addr; in act_open_rpl()
2368 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], in act_open_rpl()
2372 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), in act_open_rpl()
2373 ep->com.local_addr.ss_family); in act_open_rpl()
2375 xa_erase_irq(&ep->com.dev->atids, atid); in act_open_rpl()
2377 dst_release(ep->dst); in act_open_rpl()
2378 cxgb4_l2t_release(ep->l2t); in act_open_rpl()
2379 c4iw_put_ep(&ep->com); in act_open_rpl()
2388 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); in pass_open_rpl() local
2390 if (!ep) { in pass_open_rpl()
2394 pr_debug("ep %p status %d error %d\n", ep, in pass_open_rpl()
2396 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); in pass_open_rpl()
2397 c4iw_put_ep(&ep->com); in pass_open_rpl()
2406 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); in close_listsrv_rpl() local
2408 if (!ep) { in close_listsrv_rpl()
2412 pr_debug("ep %p\n", ep); in close_listsrv_rpl()
2413 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); in close_listsrv_rpl()
2414 c4iw_put_ep(&ep->com); in close_listsrv_rpl()
2419 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, in accept_cr() argument
2429 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; in accept_cr()
2431 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in accept_cr()
2432 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, in accept_cr()
2434 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); in accept_cr()
2441 win = ep->rcv_win >> 10; in accept_cr()
2449 L2T_IDX_V(ep->l2t->idx) | in accept_cr()
2450 TX_CHAN_V(ep->tx_chan) | in accept_cr()
2451 SMAC_SEL_V(ep->smac_idx) | in accept_cr()
2452 DSCP_V(ep->tos >> 2) | in accept_cr()
2456 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); in accept_cr()
2484 INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid); in accept_cr()
2495 INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid); in accept_cr()
2500 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); in accept_cr()
2501 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); in accept_cr()
2503 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); in accept_cr()
2539 pr_err("%s - listening ep not in LISTEN\n", __func__); in pass_accept_req()
2553 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" in pass_accept_req()
2561 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" in pass_accept_req()
2578 pr_err("%s - failed to allocate ep entry!\n", __func__); in pass_accept_req()
2679 struct c4iw_ep *ep; in pass_establish() local
2685 ep = get_ep_from_tid(dev, tid); in pass_establish()
2686 if (!ep) in pass_establish()
2689 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in pass_establish()
2690 ep->snd_seq = be32_to_cpu(req->snd_isn); in pass_establish()
2691 ep->rcv_seq = be32_to_cpu(req->rcv_isn); in pass_establish()
2692 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); in pass_establish()
2694 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt); in pass_establish()
2696 set_emss(ep, tcp_opt); in pass_establish()
2698 dst_confirm(ep->dst); in pass_establish()
2699 mutex_lock(&ep->com.mutex); in pass_establish()
2700 ep->com.state = MPA_REQ_WAIT; in pass_establish()
2701 start_ep_timer(ep); in pass_establish()
2702 set_bit(PASS_ESTAB, &ep->com.history); in pass_establish()
2703 ret = send_flowc(ep); in pass_establish()
2704 mutex_unlock(&ep->com.mutex); in pass_establish()
2706 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); in pass_establish()
2707 c4iw_put_ep(&ep->com); in pass_establish()
2715 struct c4iw_ep *ep; in peer_close() local
2722 ep = get_ep_from_tid(dev, tid); in peer_close()
2723 if (!ep) in peer_close()
2726 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in peer_close()
2727 dst_confirm(ep->dst); in peer_close()
2729 set_bit(PEER_CLOSE, &ep->com.history); in peer_close()
2730 mutex_lock(&ep->com.mutex); in peer_close()
2731 switch (ep->com.state) { in peer_close()
2733 __state_set(&ep->com, CLOSING); in peer_close()
2736 __state_set(&ep->com, CLOSING); in peer_close()
2737 connect_reply_upcall(ep, -ECONNRESET); in peer_close()
2747 __state_set(&ep->com, CLOSING); in peer_close()
2748 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); in peer_close()
2749 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); in peer_close()
2752 __state_set(&ep->com, CLOSING); in peer_close()
2753 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); in peer_close()
2754 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); in peer_close()
2757 start_ep_timer(ep); in peer_close()
2758 __state_set(&ep->com, CLOSING); in peer_close()
2760 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
2763 peer_close_upcall(ep); in peer_close()
2771 __state_set(&ep->com, MORIBUND); in peer_close()
2775 (void)stop_ep_timer(ep); in peer_close()
2776 if (ep->com.cm_id && ep->com.qp) { in peer_close()
2778 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
2781 close_complete_upcall(ep, 0); in peer_close()
2782 __state_set(&ep->com, DEAD); in peer_close()
2790 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); in peer_close()
2792 mutex_unlock(&ep->com.mutex); in peer_close()
2794 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); in peer_close()
2796 release_ep_resources(ep); in peer_close()
2797 c4iw_put_ep(&ep->com); in peer_close()
2801 static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep) in finish_peer_abort() argument
2803 complete_cached_srq_buffers(ep, ep->srqe_idx); in finish_peer_abort()
2804 if (ep->com.cm_id && ep->com.qp) { in finish_peer_abort()
2808 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in finish_peer_abort()
2811 peer_abort_upcall(ep); in finish_peer_abort()
2812 release_ep_resources(ep); in finish_peer_abort()
2813 c4iw_put_ep(&ep->com); in finish_peer_abort()
2819 struct c4iw_ep *ep; in peer_abort() local
2830 ep = get_ep_from_tid(dev, tid); in peer_abort()
2831 if (!ep) in peer_abort()
2838 ep->hwtid, status, neg_adv_str(status)); in peer_abort()
2839 ep->stats.abort_neg_adv++; in peer_abort()
2846 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, in peer_abort()
2847 ep->com.state); in peer_abort()
2848 set_bit(PEER_ABORT, &ep->com.history); in peer_abort()
2855 if (ep->com.state != MPA_REQ_SENT) in peer_abort()
2856 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); in peer_abort()
2858 mutex_lock(&ep->com.mutex); in peer_abort()
2859 switch (ep->com.state) { in peer_abort()
2861 c4iw_put_ep(&ep->parent_ep->com); in peer_abort()
2864 (void)stop_ep_timer(ep); in peer_abort()
2867 (void)stop_ep_timer(ep); in peer_abort()
2869 (mpa_rev == 2 && ep->tried_with_mpa_v1)) in peer_abort()
2870 connect_reply_upcall(ep, -ECONNRESET); in peer_abort()
2882 ep->retry_with_mpa_v1 = 1; in peer_abort()
2891 stop_ep_timer(ep); in peer_abort()
2894 if (ep->com.qp && ep->com.qp->srq) { in peer_abort()
2898 complete_cached_srq_buffers(ep, srqidx); in peer_abort()
2900 /* Hold ep ref until finish_peer_abort() */ in peer_abort()
2901 c4iw_get_ep(&ep->com); in peer_abort()
2902 __state_set(&ep->com, ABORTING); in peer_abort()
2903 set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags); in peer_abort()
2904 read_tcb(ep); in peer_abort()
2910 if (ep->com.cm_id && ep->com.qp) { in peer_abort()
2912 ret = c4iw_modify_qp(ep->com.qp->rhp, in peer_abort()
2913 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, in peer_abort()
2918 peer_abort_upcall(ep); in peer_abort()
2924 mutex_unlock(&ep->com.mutex); in peer_abort()
2927 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); in peer_abort()
2930 dst_confirm(ep->dst); in peer_abort()
2931 if (ep->com.state != ABORTING) { in peer_abort()
2932 __state_set(&ep->com, DEAD); in peer_abort()
2934 if (!ep->retry_with_mpa_v1) in peer_abort()
2937 mutex_unlock(&ep->com.mutex); in peer_abort()
2939 rpl_skb = skb_dequeue(&ep->com.ep_skb_list); in peer_abort()
2945 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx); in peer_abort()
2947 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); in peer_abort()
2950 release_ep_resources(ep); in peer_abort()
2951 else if (ep->retry_with_mpa_v1) { in peer_abort()
2952 if (ep->com.remote_addr.ss_family == AF_INET6) { in peer_abort()
2955 &ep->com.local_addr; in peer_abort()
2957 ep->com.dev->rdev.lldi.ports[0], in peer_abort()
2961 xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid); in peer_abort()
2962 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, in peer_abort()
2963 ep->com.local_addr.ss_family); in peer_abort()
2964 dst_release(ep->dst); in peer_abort()
2965 cxgb4_l2t_release(ep->l2t); in peer_abort()
2966 c4iw_reconnect(ep); in peer_abort()
2970 c4iw_put_ep(&ep->com); in peer_abort()
2971 /* Dereferencing ep, referenced in peer_abort_intr() */ in peer_abort()
2972 c4iw_put_ep(&ep->com); in peer_abort()
2978 struct c4iw_ep *ep; in close_con_rpl() local
2984 ep = get_ep_from_tid(dev, tid); in close_con_rpl()
2985 if (!ep) in close_con_rpl()
2988 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in close_con_rpl()
2991 mutex_lock(&ep->com.mutex); in close_con_rpl()
2992 set_bit(CLOSE_CON_RPL, &ep->com.history); in close_con_rpl()
2993 switch (ep->com.state) { in close_con_rpl()
2995 __state_set(&ep->com, MORIBUND); in close_con_rpl()
2998 (void)stop_ep_timer(ep); in close_con_rpl()
2999 if ((ep->com.cm_id) && (ep->com.qp)) { in close_con_rpl()
3001 c4iw_modify_qp(ep->com.qp->rhp, in close_con_rpl()
3002 ep->com.qp, in close_con_rpl()
3006 close_complete_upcall(ep, 0); in close_con_rpl()
3007 __state_set(&ep->com, DEAD); in close_con_rpl()
3014 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); in close_con_rpl()
3017 mutex_unlock(&ep->com.mutex); in close_con_rpl()
3019 release_ep_resources(ep); in close_con_rpl()
3020 c4iw_put_ep(&ep->com); in close_con_rpl()
3028 struct c4iw_ep *ep; in terminate() local
3031 ep = get_ep_from_tid(dev, tid); in terminate()
3033 if (ep) { in terminate()
3034 if (ep->com.qp) { in terminate()
3036 ep->com.qp->wq.sq.qid); in terminate()
3038 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in terminate()
3045 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); in terminate()
3046 c4iw_put_ep(&ep->com); in terminate()
3048 pr_warn("TERM received tid %u no ep/qp\n", tid); in terminate()
3060 struct c4iw_ep *ep; in fw4_ack() local
3066 ep = get_ep_from_tid(dev, tid); in fw4_ack()
3067 if (!ep) in fw4_ack()
3069 pr_debug("ep %p tid %u credits %u\n", in fw4_ack()
3070 ep, ep->hwtid, credits); in fw4_ack()
3072 pr_debug("0 credit ack ep %p tid %u state %u\n", in fw4_ack()
3073 ep, ep->hwtid, state_read(&ep->com)); in fw4_ack()
3077 dst_confirm(ep->dst); in fw4_ack()
3078 if (ep->mpa_skb) { in fw4_ack()
3079 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n", in fw4_ack()
3080 ep, ep->hwtid, state_read(&ep->com), in fw4_ack()
3081 ep->mpa_attr.initiator ? 1 : 0); in fw4_ack()
3082 mutex_lock(&ep->com.mutex); in fw4_ack()
3083 kfree_skb(ep->mpa_skb); in fw4_ack()
3084 ep->mpa_skb = NULL; in fw4_ack()
3085 if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) in fw4_ack()
3086 stop_ep_timer(ep); in fw4_ack()
3087 mutex_unlock(&ep->com.mutex); in fw4_ack()
3090 c4iw_put_ep(&ep->com); in fw4_ack()
3097 struct c4iw_ep *ep = to_ep(cm_id); in c4iw_reject_cr() local
3099 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in c4iw_reject_cr()
3101 mutex_lock(&ep->com.mutex); in c4iw_reject_cr()
3102 if (ep->com.state != MPA_REQ_RCVD) { in c4iw_reject_cr()
3103 mutex_unlock(&ep->com.mutex); in c4iw_reject_cr()
3104 c4iw_put_ep(&ep->com); in c4iw_reject_cr()
3107 set_bit(ULP_REJECT, &ep->com.history); in c4iw_reject_cr()
3111 abort = send_mpa_reject(ep, pdata, pdata_len); in c4iw_reject_cr()
3112 mutex_unlock(&ep->com.mutex); in c4iw_reject_cr()
3114 stop_ep_timer(ep); in c4iw_reject_cr()
3115 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); in c4iw_reject_cr()
3116 c4iw_put_ep(&ep->com); in c4iw_reject_cr()
3125 struct c4iw_ep *ep = to_ep(cm_id); in c4iw_accept_cr() local
3130 pr_debug("ep %p tid %u\n", ep, ep->hwtid); in c4iw_accept_cr()
3132 mutex_lock(&ep->com.mutex); in c4iw_accept_cr()
3133 if (ep->com.state != MPA_REQ_RCVD) { in c4iw_accept_cr()
3143 set_bit(ULP_ACCEPT, &ep->com.history); in c4iw_accept_cr()
3144 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || in c4iw_accept_cr()
3145 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { in c4iw_accept_cr()
3150 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { in c4iw_accept_cr()
3151 if (conn_param->ord > ep->ird) { in c4iw_accept_cr()
3153 conn_param->ord = ep->ird; in c4iw_accept_cr()
3155 ep->ird = conn_param->ird; in c4iw_accept_cr()
3156 ep->ord = conn_param->ord; in c4iw_accept_cr()
3157 send_mpa_reject(ep, conn_param->private_data, in c4iw_accept_cr()
3163 if (conn_param->ird < ep->ord) { in c4iw_accept_cr()
3165 ep->ord <= h->rdev.lldi.max_ordird_qp) { in c4iw_accept_cr()
3166 conn_param->ird = ep->ord; in c4iw_accept_cr()
3173 ep->ird = conn_param->ird; in c4iw_accept_cr()
3174 ep->ord = conn_param->ord; in c4iw_accept_cr()
3176 if (ep->mpa_attr.version == 1) { in c4iw_accept_cr()
3177 if (peer2peer && ep->ird == 0) in c4iw_accept_cr()
3178 ep->ird = 1; in c4iw_accept_cr()
3181 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && in c4iw_accept_cr()
3182 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) in c4iw_accept_cr()
3183 ep->ird = 1; in c4iw_accept_cr()
3186 pr_debug("ird %d ord %d\n", ep->ird, ep->ord); in c4iw_accept_cr()
3188 ep->com.cm_id = cm_id; in c4iw_accept_cr()
3189 ref_cm_id(&ep->com); in c4iw_accept_cr()
3190 ep->com.qp = qp; in c4iw_accept_cr()
3191 ref_qp(ep); in c4iw_accept_cr()
3193 /* bind QP to EP and move to RTS */ in c4iw_accept_cr()
3194 attrs.mpa_attr = ep->mpa_attr; in c4iw_accept_cr()
3195 attrs.max_ird = ep->ird; in c4iw_accept_cr()
3196 attrs.max_ord = ep->ord; in c4iw_accept_cr()
3197 attrs.llp_stream_handle = ep; in c4iw_accept_cr()
3207 err = c4iw_modify_qp(ep->com.qp->rhp, in c4iw_accept_cr()
3208 ep->com.qp, mask, &attrs, 1); in c4iw_accept_cr()
3212 set_bit(STOP_MPA_TIMER, &ep->com.flags); in c4iw_accept_cr()
3213 err = send_mpa_reply(ep, conn_param->private_data, in c4iw_accept_cr()
3218 __state_set(&ep->com, FPDU_MODE); in c4iw_accept_cr()
3219 established_upcall(ep); in c4iw_accept_cr()
3220 mutex_unlock(&ep->com.mutex); in c4iw_accept_cr()
3221 c4iw_put_ep(&ep->com); in c4iw_accept_cr()
3224 deref_cm_id(&ep->com); in c4iw_accept_cr()
3228 mutex_unlock(&ep->com.mutex); in c4iw_accept_cr()
3230 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); in c4iw_accept_cr()
3231 c4iw_put_ep(&ep->com); in c4iw_accept_cr()
3304 struct c4iw_ep *ep; in c4iw_connect() local
3318 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); in c4iw_connect()
3319 if (!ep) { in c4iw_connect()
3320 pr_err("%s - cannot alloc ep\n", __func__); in c4iw_connect()
3325 skb_queue_head_init(&ep->com.ep_skb_list); in c4iw_connect()
3326 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { in c4iw_connect()
3331 timer_setup(&ep->timer, ep_timeout, 0); in c4iw_connect()
3332 ep->plen = conn_param->private_data_len; in c4iw_connect()
3333 if (ep->plen) in c4iw_connect()
3334 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), in c4iw_connect()
3335 conn_param->private_data, ep->plen); in c4iw_connect()
3336 ep->ird = conn_param->ird; in c4iw_connect()
3337 ep->ord = conn_param->ord; in c4iw_connect()
3339 if (peer2peer && ep->ord == 0) in c4iw_connect()
3340 ep->ord = 1; in c4iw_connect()
3342 ep->com.cm_id = cm_id; in c4iw_connect()
3343 ref_cm_id(&ep->com); in c4iw_connect()
3344 cm_id->provider_data = ep; in c4iw_connect()
3345 ep->com.dev = dev; in c4iw_connect()
3346 ep->com.qp = get_qhp(dev, conn_param->qpn); in c4iw_connect()
3347 if (!ep->com.qp) { in c4iw_connect()
3352 ref_qp(ep); in c4iw_connect()
3354 ep->com.qp, cm_id); in c4iw_connect()
3359 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); in c4iw_connect()
3360 if (ep->atid == -1) { in c4iw_connect()
3365 err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL); in c4iw_connect()
3369 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, in c4iw_connect()
3370 sizeof(ep->com.local_addr)); in c4iw_connect()
3371 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, in c4iw_connect()
3372 sizeof(ep->com.remote_addr)); in c4iw_connect()
3374 laddr = (struct sockaddr_in *)&ep->com.local_addr; in c4iw_connect()
3375 raddr = (struct sockaddr_in *)&ep->com.remote_addr; in c4iw_connect()
3376 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; in c4iw_connect()
3377 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; in c4iw_connect()
3396 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, in c4iw_connect()
3419 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, in c4iw_connect()
3426 if (!ep->dst) { in c4iw_connect()
3432 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, in c4iw_connect()
3433 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); in c4iw_connect()
3440 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, in c4iw_connect()
3441 ep->l2t->idx); in c4iw_connect()
3443 state_set(&ep->com, CONNECTING); in c4iw_connect()
3444 ep->tos = cm_id->tos; in c4iw_connect()
3447 err = send_connect(ep); in c4iw_connect()
3451 cxgb4_l2t_release(ep->l2t); in c4iw_connect()
3453 dst_release(ep->dst); in c4iw_connect()
3455 xa_erase_irq(&ep->com.dev->atids, ep->atid); in c4iw_connect()
3457 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); in c4iw_connect()
3459 skb_queue_purge(&ep->com.ep_skb_list); in c4iw_connect()
3460 deref_cm_id(&ep->com); in c4iw_connect()
3462 c4iw_put_ep(&ep->com); in c4iw_connect()
3467 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) in create_server6() argument
3471 &ep->com.local_addr; in create_server6()
3474 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], in create_server6()
3479 c4iw_init_wr_wait(ep->com.wr_waitp); in create_server6()
3480 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], in create_server6()
3481 ep->stid, &sin6->sin6_addr, in create_server6()
3483 ep->com.dev->rdev.lldi.rxq_ids[0]); in create_server6()
3485 err = c4iw_wait_for_reply(&ep->com.dev->rdev, in create_server6()
3486 ep->com.wr_waitp, in create_server6()
3491 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], in create_server6()
3494 err, ep->stid, in create_server6()
3500 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) in create_server4() argument
3504 &ep->com.local_addr; in create_server4()
3509 ep->com.dev->rdev.lldi.ports[0], ep->stid, in create_server4()
3511 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); in create_server4()
3513 if (c4iw_fatal_error(&ep->com.dev->rdev)) { in create_server4()
3522 c4iw_init_wr_wait(ep->com.wr_waitp); in create_server4()
3523 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], in create_server4()
3524 ep->stid, sin->sin_addr.s_addr, sin->sin_port, in create_server4()
3525 0, ep->com.dev->rdev.lldi.rxq_ids[0]); in create_server4()
3527 err = c4iw_wait_for_reply(&ep->com.dev->rdev, in create_server4()
3528 ep->com.wr_waitp, in create_server4()
3535 , err, ep->stid, in create_server4()
3544 struct c4iw_listen_ep *ep; in c4iw_create_listen() local
3548 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); in c4iw_create_listen()
3549 if (!ep) { in c4iw_create_listen()
3550 pr_err("%s - cannot alloc ep\n", __func__); in c4iw_create_listen()
3554 skb_queue_head_init(&ep->com.ep_skb_list); in c4iw_create_listen()
3555 pr_debug("ep %p\n", ep); in c4iw_create_listen()
3556 ep->com.cm_id = cm_id; in c4iw_create_listen()
3557 ref_cm_id(&ep->com); in c4iw_create_listen()
3558 ep->com.dev = dev; in c4iw_create_listen()
3559 ep->backlog = backlog; in c4iw_create_listen()
3560 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, in c4iw_create_listen()
3561 sizeof(ep->com.local_addr)); in c4iw_create_listen()
3567 ep->com.local_addr.ss_family == AF_INET) in c4iw_create_listen()
3568 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, in c4iw_create_listen()
3569 cm_id->m_local_addr.ss_family, ep); in c4iw_create_listen()
3571 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, in c4iw_create_listen()
3572 cm_id->m_local_addr.ss_family, ep); in c4iw_create_listen()
3574 if (ep->stid == -1) { in c4iw_create_listen()
3579 err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL); in c4iw_create_listen()
3583 state_set(&ep->com, LISTEN); in c4iw_create_listen()
3584 if (ep->com.local_addr.ss_family == AF_INET) in c4iw_create_listen()
3585 err = create_server4(dev, ep); in c4iw_create_listen()
3587 err = create_server6(dev, ep); in c4iw_create_listen()
3589 cm_id->provider_data = ep; in c4iw_create_listen()
3592 xa_erase_irq(&ep->com.dev->stids, ep->stid); in c4iw_create_listen()
3594 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, in c4iw_create_listen()
3595 ep->com.local_addr.ss_family); in c4iw_create_listen()
3597 deref_cm_id(&ep->com); in c4iw_create_listen()
3598 c4iw_put_ep(&ep->com); in c4iw_create_listen()
3607 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); in c4iw_destroy_listen() local
3609 pr_debug("ep %p\n", ep); in c4iw_destroy_listen()
3612 state_set(&ep->com, DEAD); in c4iw_destroy_listen()
3613 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && in c4iw_destroy_listen()
3614 ep->com.local_addr.ss_family == AF_INET) { in c4iw_destroy_listen()
3616 ep->com.dev->rdev.lldi.ports[0], ep->stid, in c4iw_destroy_listen()
3617 ep->com.dev->rdev.lldi.rxq_ids[0], false); in c4iw_destroy_listen()
3620 c4iw_init_wr_wait(ep->com.wr_waitp); in c4iw_destroy_listen()
3622 ep->com.dev->rdev.lldi.ports[0], ep->stid, in c4iw_destroy_listen()
3623 ep->com.dev->rdev.lldi.rxq_ids[0], in c4iw_destroy_listen()
3624 ep->com.local_addr.ss_family == AF_INET6); in c4iw_destroy_listen()
3627 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, in c4iw_destroy_listen()
3629 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; in c4iw_destroy_listen()
3630 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], in c4iw_destroy_listen()
3633 xa_erase_irq(&ep->com.dev->stids, ep->stid); in c4iw_destroy_listen()
3634 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, in c4iw_destroy_listen()
3635 ep->com.local_addr.ss_family); in c4iw_destroy_listen()
3637 deref_cm_id(&ep->com); in c4iw_destroy_listen()
3638 c4iw_put_ep(&ep->com); in c4iw_destroy_listen()
3642 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) in c4iw_ep_disconnect() argument
3649 mutex_lock(&ep->com.mutex); in c4iw_ep_disconnect()
3651 pr_debug("ep %p state %s, abrupt %d\n", ep, in c4iw_ep_disconnect()
3652 states[ep->com.state], abrupt); in c4iw_ep_disconnect()
3655 * Ref the ep here in case we have fatal errors causing the in c4iw_ep_disconnect()
3656 * ep to be released and freed. in c4iw_ep_disconnect()
3658 c4iw_get_ep(&ep->com); in c4iw_ep_disconnect()
3660 rdev = &ep->com.dev->rdev; in c4iw_ep_disconnect()
3663 close_complete_upcall(ep, -EIO); in c4iw_ep_disconnect()
3664 ep->com.state = DEAD; in c4iw_ep_disconnect()
3666 switch (ep->com.state) { in c4iw_ep_disconnect()
3675 ep->com.state = ABORTING; in c4iw_ep_disconnect()
3677 ep->com.state = CLOSING; in c4iw_ep_disconnect()
3683 if (ep->mpa_skb && in c4iw_ep_disconnect()
3684 test_bit(STOP_MPA_TIMER, &ep->com.flags)) { in c4iw_ep_disconnect()
3685 clear_bit(STOP_MPA_TIMER, &ep->com.flags); in c4iw_ep_disconnect()
3686 stop_ep_timer(ep); in c4iw_ep_disconnect()
3688 start_ep_timer(ep); in c4iw_ep_disconnect()
3690 set_bit(CLOSE_SENT, &ep->com.flags); in c4iw_ep_disconnect()
3693 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { in c4iw_ep_disconnect()
3696 (void)stop_ep_timer(ep); in c4iw_ep_disconnect()
3697 ep->com.state = ABORTING; in c4iw_ep_disconnect()
3699 ep->com.state = MORIBUND; in c4iw_ep_disconnect()
3705 pr_debug("ignoring disconnect ep %p state %u\n", in c4iw_ep_disconnect()
3706 ep, ep->com.state); in c4iw_ep_disconnect()
3709 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); in c4iw_ep_disconnect()
3715 set_bit(EP_DISC_ABORT, &ep->com.history); in c4iw_ep_disconnect()
3716 ret = send_abort(ep); in c4iw_ep_disconnect()
3718 set_bit(EP_DISC_CLOSE, &ep->com.history); in c4iw_ep_disconnect()
3719 ret = send_halfclose(ep); in c4iw_ep_disconnect()
3722 set_bit(EP_DISC_FAIL, &ep->com.history); in c4iw_ep_disconnect()
3724 stop_ep_timer(ep); in c4iw_ep_disconnect()
3725 close_complete_upcall(ep, -EIO); in c4iw_ep_disconnect()
3727 if (ep->com.qp) { in c4iw_ep_disconnect()
3731 ret = c4iw_modify_qp(ep->com.qp->rhp, in c4iw_ep_disconnect()
3732 ep->com.qp, in c4iw_ep_disconnect()
3742 mutex_unlock(&ep->com.mutex); in c4iw_ep_disconnect()
3743 c4iw_put_ep(&ep->com); in c4iw_ep_disconnect()
3745 release_ep_resources(ep); in c4iw_ep_disconnect()
3752 struct c4iw_ep *ep; in active_ofld_conn_reply() local
3755 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, in active_ofld_conn_reply()
3757 if (!ep) in active_ofld_conn_reply()
3762 set_bit(ACT_RETRY_NOMEM, &ep->com.history); in active_ofld_conn_reply()
3763 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { in active_ofld_conn_reply()
3764 send_fw_act_open_req(ep, atid); in active_ofld_conn_reply()
3769 set_bit(ACT_RETRY_INUSE, &ep->com.history); in active_ofld_conn_reply()
3770 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { in active_ofld_conn_reply()
3771 send_fw_act_open_req(ep, atid); in active_ofld_conn_reply()
3785 connect_reply_upcall(ep, status2errno(req->retval)); in active_ofld_conn_reply()
3786 state_set(&ep->com, DEAD); in active_ofld_conn_reply()
3787 if (ep->com.remote_addr.ss_family == AF_INET6) { in active_ofld_conn_reply()
3789 (struct sockaddr_in6 *)&ep->com.local_addr; in active_ofld_conn_reply()
3790 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], in active_ofld_conn_reply()
3795 dst_release(ep->dst); in active_ofld_conn_reply()
3796 cxgb4_l2t_release(ep->l2t); in active_ofld_conn_reply()
3797 c4iw_put_ep(&ep->com); in active_ofld_conn_reply()
3854 struct c4iw_ep *ep; in read_tcb_rpl() local
3858 ep = get_ep_from_tid(dev, tid); in read_tcb_rpl()
3859 if (!ep) in read_tcb_rpl()
3872 c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */ in read_tcb_rpl()
3873 c4iw_put_ep(&ep->com); /* from read_tcb() */ in read_tcb_rpl()
3877 if (++ep->rx_pdu_out_cnt >= 2) { in read_tcb_rpl()
3881 read_tcb(ep); in read_tcb_rpl()
3885 ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, in read_tcb_rpl()
3888 pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); in read_tcb_rpl()
3890 if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) in read_tcb_rpl()
3891 finish_peer_abort(dev, ep); in read_tcb_rpl()
3892 else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) in read_tcb_rpl()
3893 send_abort_req(ep); in read_tcb_rpl()
4231 static void process_timeout(struct c4iw_ep *ep) in process_timeout() argument
4236 mutex_lock(&ep->com.mutex); in process_timeout()
4237 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state); in process_timeout()
4238 set_bit(TIMEDOUT, &ep->com.history); in process_timeout()
4239 switch (ep->com.state) { in process_timeout()
4241 connect_reply_upcall(ep, -ETIMEDOUT); in process_timeout()
4250 if (ep->com.cm_id && ep->com.qp) { in process_timeout()
4252 c4iw_modify_qp(ep->com.qp->rhp, in process_timeout()
4253 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, in process_timeout()
4256 close_complete_upcall(ep, -ETIMEDOUT); in process_timeout()
4262 * These states are expected if the ep timed out at the same in process_timeout()
4269 WARN(1, "%s unexpected state ep %p tid %u state %u\n", in process_timeout()
4270 __func__, ep, ep->hwtid, ep->com.state); in process_timeout()
4273 mutex_unlock(&ep->com.mutex); in process_timeout()
4275 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); in process_timeout()
4276 c4iw_put_ep(&ep->com); in process_timeout()
4281 struct c4iw_ep *ep; in process_timedout_eps() local
4292 ep = list_entry(tmp, struct c4iw_ep, entry); in process_timedout_eps()
4293 process_timeout(ep); in process_timedout_eps()
4330 struct c4iw_ep *ep = from_timer(ep, t, timer); in ep_timeout() local
4334 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { in ep_timeout()
4338 if (!ep->entry.next) { in ep_timeout()
4339 list_add_tail(&ep->entry, &timeout_list); in ep_timeout()
4412 struct c4iw_ep *ep; in peer_abort_intr() local
4415 ep = get_ep_from_tid(dev, tid); in peer_abort_intr()
4416 /* This EP will be dereferenced in peer_abort() */ in peer_abort_intr()
4417 if (!ep) { in peer_abort_intr()
4424 ep->hwtid, req->status, in peer_abort_intr()
4428 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state); in peer_abort_intr()
4430 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); in peer_abort_intr()