Lines Matching full:qp
47 #include <linux/mlx4/qp.h>
107 struct mlx4_qp *qp; member
113 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
118 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
119 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
123 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
130 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
131 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
137 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp()
138 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp()
147 return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); in is_sqp()
151 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
158 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
159 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
165 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { in is_qp0()
174 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
176 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
179 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
181 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
184 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
186 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
194 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) in stamp_send_wqe() argument
202 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
215 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; in mlx4_ib_handle_qp_event()
219 event.element.qp = ibqp; in mlx4_ib_handle_qp_event()
247 pr_warn("Unexpected event type %d on QP %06x\n", in mlx4_ib_handle_qp_event()
248 qpe_work->type, qpe_work->qp->qpn); in mlx4_ib_handle_qp_event()
255 mlx4_put_qp(qpe_work->qp); in mlx4_ib_handle_qp_event()
259 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
265 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
274 qpe_work->qp = qp; in mlx4_ib_qp_event()
281 mlx4_put_qp(qp); in mlx4_ib_qp_event()
284 static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_wq_event() argument
287 type, qp->qpn); in mlx4_ib_wq_event()
337 bool is_user, bool has_rq, struct mlx4_ib_qp *qp, in set_rq_size() argument
349 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
360 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
361 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
362 wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg); in set_rq_size()
363 qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz)); in set_rq_size()
368 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
369 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
371 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
372 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
373 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
382 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
389 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
404 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
409 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
415 qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); in set_kernel_sq_size()
416 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + in set_kernel_sq_size()
417 qp->sq_spare_wqes); in set_kernel_sq_size()
419 qp->sq.max_gs = in set_kernel_sq_size()
421 (1 << qp->sq.wqe_shift)) - in set_kernel_sq_size()
422 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
425 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
426 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
427 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
428 qp->rq.offset = 0; in set_kernel_sq_size()
429 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
431 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
432 qp->sq.offset = 0; in set_kernel_sq_size()
435 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
436 qp->sq.wqe_cnt - qp->sq_spare_wqes; in set_kernel_sq_size()
437 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
447 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
461 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
462 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
464 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
465 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
470 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
474 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
475 kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf), in alloc_proxy_bufs()
477 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
479 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
480 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
483 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
485 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
486 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
489 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { in alloc_proxy_bufs()
490 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
499 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
502 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
504 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
505 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
509 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
513 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
514 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
517 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
519 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
541 struct mlx4_ib_qp *qp) in mlx4_ib_free_qp_counter() argument
543 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
544 mlx4_counter_free(dev->dev, qp->counter_index->index); in mlx4_ib_free_qp_counter()
545 list_del(&qp->counter_index->list); in mlx4_ib_free_qp_counter()
546 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
548 kfree(qp->counter_index); in mlx4_ib_free_qp_counter()
549 qp->counter_index = NULL; in mlx4_ib_free_qp_counter()
657 struct mlx4_ib_qp *qp) in create_qp_rss() argument
662 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_rss()
664 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); in create_qp_rss()
668 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_rss()
672 INIT_LIST_HEAD(&qp->gid_list); in create_qp_rss()
673 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_rss()
675 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_qp_rss()
676 qp->state = IB_QPS_RESET; in create_qp_rss()
679 qp->sq_no_prefetch = 1; in create_qp_rss()
680 qp->sq.wqe_cnt = 1; in create_qp_rss()
681 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
682 qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
683 qp->mtt = (to_mqp( in create_qp_rss()
686 qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL); in create_qp_rss()
687 if (!qp->rss_ctx) { in create_qp_rss()
692 err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd); in create_qp_rss()
699 kfree(qp->rss_ctx); in create_qp_rss()
702 mlx4_qp_remove(dev->dev, &qp->mqp); in create_qp_rss()
703 mlx4_qp_free(dev->dev, &qp->mqp); in create_qp_rss()
710 static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp_rss() argument
719 pr_debug("RSS QP with NULL udata\n"); in _mlx4_ib_create_qp_rss()
752 pr_debug("RSS QP with unsupported QP type %d\n", in _mlx4_ib_create_qp_rss()
758 pr_debug("RSS QP doesn't support create flags\n"); in _mlx4_ib_create_qp_rss()
763 pr_debug("RSS QP with unsupported send attributes\n"); in _mlx4_ib_create_qp_rss()
767 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
768 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
770 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); in _mlx4_ib_create_qp_rss()
774 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss()
784 struct mlx4_ib_qp *qp, int range_size, int *wqn) in mlx4_ib_alloc_wqn() argument
804 qp->mqp.usage); in mlx4_ib_alloc_wqn()
821 qp->wqn_range = range; in mlx4_ib_alloc_wqn()
834 struct mlx4_ib_qp *qp, bool dirty_release) in mlx4_ib_release_wqn() argument
841 range = qp->wqn_range; in mlx4_ib_release_wqn()
862 struct ib_udata *udata, struct mlx4_ib_qp *qp) in create_rq() argument
877 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_rq()
879 spin_lock_init(&qp->sq.lock); in create_rq()
880 spin_lock_init(&qp->rq.lock); in create_rq()
881 INIT_LIST_HEAD(&qp->gid_list); in create_rq()
882 INIT_LIST_HEAD(&qp->steering_rules); in create_rq()
884 qp->state = IB_QPS_RESET; in create_rq()
909 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_rq()
911 err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); in create_rq()
915 qp->sq_no_prefetch = 1; in create_rq()
916 qp->sq.wqe_cnt = 1; in create_rq()
917 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_rq()
918 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in create_rq()
919 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in create_rq()
921 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); in create_rq()
922 if (IS_ERR(qp->umem)) { in create_rq()
923 err = PTR_ERR(qp->umem); in create_rq()
927 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_rq()
933 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_rq()
937 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_rq()
941 err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); in create_rq()
944 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_rq()
946 err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); in create_rq()
950 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_rq()
959 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_rq()
961 qp->mqp.event = mlx4_ib_wq_event; in create_rq()
969 list_add_tail(&qp->qps_list, &dev->qp_list); in create_rq()
974 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_rq()
976 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_rq()
983 mlx4_ib_release_wqn(context, qp, 0); in create_rq()
985 mlx4_ib_db_unmap_user(context, &qp->db); in create_rq()
988 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_rq()
990 ib_umem_release(qp->umem); in create_rq()
997 struct mlx4_ib_qp *qp) in create_qp_common() argument
1008 /* When tunneling special qps, we use a plain UD qp */ in create_qp_common()
1054 qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); in create_qp_common()
1055 if (!qp->sqp) in create_qp_common()
1059 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
1061 spin_lock_init(&qp->sq.lock); in create_qp_common()
1062 spin_lock_init(&qp->rq.lock); in create_qp_common()
1063 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
1064 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
1066 qp->state = IB_QPS_RESET; in create_qp_common()
1068 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
1083 qp->inl_recv_sz = ucmd.inl_recv_sz; in create_qp_common()
1093 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_qp_common()
1097 qp_has_rq(init_attr), qp, qp->inl_recv_sz); in create_qp_common()
1101 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
1103 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
1107 qp->umem = in create_qp_common()
1108 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); in create_qp_common()
1109 if (IS_ERR(qp->umem)) { in create_qp_common()
1110 err = PTR_ERR(qp->umem); in create_qp_common()
1114 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_qp_common()
1120 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_qp_common()
1124 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
1129 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); in create_qp_common()
1133 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_common()
1136 qp_has_rq(init_attr), qp, 0); in create_qp_common()
1140 qp->sq_no_prefetch = 0; in create_qp_common()
1143 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
1148 qp->flags |= MLX4_IB_QP_NETIF; in create_qp_common()
1155 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
1160 err = mlx4_db_alloc(dev->dev, &qp->db, 0); in create_qp_common()
1164 *qp->db.db = 0; in create_qp_common()
1167 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, in create_qp_common()
1168 &qp->buf)) { in create_qp_common()
1173 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
1174 &qp->mtt); in create_qp_common()
1178 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common()
1182 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, in create_qp_common()
1184 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, in create_qp_common()
1186 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
1190 qp->mqp.usage = MLX4_RES_USAGE_DRIVER; in create_qp_common()
1194 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
1196 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
1211 qp->mqp.usage); in create_qp_common()
1213 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1217 &qpn, 0, qp->mqp.usage); in create_qp_common()
1223 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
1225 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common()
1230 qp->mqp.qpn |= (1 << 23); in create_qp_common()
1237 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1239 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
1247 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
1252 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common()
1254 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common()
1262 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1268 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
1269 free_proxy_bufs(pd->device, qp); in create_qp_common()
1273 mlx4_ib_db_unmap_user(context, &qp->db); in create_qp_common()
1275 kvfree(qp->sq.wrid); in create_qp_common()
1276 kvfree(qp->rq.wrid); in create_qp_common()
1280 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
1283 if (!qp->umem) in create_qp_common()
1284 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
1285 ib_umem_release(qp->umem); in create_qp_common()
1289 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
1292 kfree(qp->sqp); in create_qp_common()
1340 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
1344 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
1350 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
1352 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
1353 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
1355 return to_mpd(qp->ibqp.pd); in get_pd()
1358 static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, in get_cqs() argument
1361 switch (qp->ibqp.qp_type) { in get_cqs()
1363 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
1367 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1371 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : in get_cqs()
1372 to_mcq(qp->ibwq.cq); in get_cqs()
1373 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1379 static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in destroy_qp_rss() argument
1381 if (qp->state != IB_QPS_RESET) { in destroy_qp_rss()
1384 for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); in destroy_qp_rss()
1386 struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; in destroy_qp_rss()
1396 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_rss()
1397 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_rss()
1398 pr_warn("modify QP %06x to RESET failed.\n", in destroy_qp_rss()
1399 qp->mqp.qpn); in destroy_qp_rss()
1402 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_rss()
1403 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_rss()
1404 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_rss()
1405 del_gid_entries(qp); in destroy_qp_rss()
1408 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
1415 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
1416 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
1417 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
1418 pr_warn("modify QP %06x to RESET failed.\n", in destroy_qp_common()
1419 qp->mqp.qpn); in destroy_qp_common()
1420 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in destroy_qp_common()
1421 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in destroy_qp_common()
1422 qp->pri.smac = 0; in destroy_qp_common()
1423 qp->pri.smac_port = 0; in destroy_qp_common()
1425 if (qp->alt.smac) { in destroy_qp_common()
1426 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in destroy_qp_common()
1427 qp->alt.smac = 0; in destroy_qp_common()
1429 if (qp->pri.vid < 0x1000) { in destroy_qp_common()
1430 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in destroy_qp_common()
1431 qp->pri.vid = 0xFFFF; in destroy_qp_common()
1432 qp->pri.candidate_vid = 0xFFFF; in destroy_qp_common()
1433 qp->pri.update_vid = 0; in destroy_qp_common()
1435 if (qp->alt.vid < 0x1000) { in destroy_qp_common()
1436 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in destroy_qp_common()
1437 qp->alt.vid = 0xFFFF; in destroy_qp_common()
1438 qp->alt.candidate_vid = 0xFFFF; in destroy_qp_common()
1439 qp->alt.update_vid = 0; in destroy_qp_common()
1443 get_cqs(qp, src, &send_cq, &recv_cq); in destroy_qp_common()
1449 list_del(&qp->qps_list); in destroy_qp_common()
1450 list_del(&qp->cq_send_list); in destroy_qp_common()
1451 list_del(&qp->cq_recv_list); in destroy_qp_common()
1453 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1454 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1456 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1459 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1464 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1466 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { in destroy_qp_common()
1467 if (qp->flags & MLX4_IB_QP_NETIF) in destroy_qp_common()
1468 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1475 qp, 1); in destroy_qp_common()
1477 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1480 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
1483 if (qp->rq.wqe_cnt) { in destroy_qp_common()
1490 mlx4_ib_db_unmap_user(mcontext, &qp->db); in destroy_qp_common()
1493 kvfree(qp->sq.wrid); in destroy_qp_common()
1494 kvfree(qp->rq.wrid); in destroy_qp_common()
1495 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
1497 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
1498 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
1499 if (qp->rq.wqe_cnt) in destroy_qp_common()
1500 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
1502 ib_umem_release(qp->umem); in destroy_qp_common()
1504 del_gid_entries(qp); in destroy_qp_common()
1524 static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp() argument
1533 return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata); in _mlx4_ib_create_qp()
1582 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1583 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1584 err = create_qp_common(pd, init_attr, udata, 0, qp); in _mlx4_ib_create_qp()
1588 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp()
1589 qp->xrcdn = xrcdn; in _mlx4_ib_create_qp()
1607 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1608 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1609 err = create_qp_common(pd, init_attr, udata, sqpn, qp); in _mlx4_ib_create_qp()
1615 /* Internal QP created with ib_create_qp */ in _mlx4_ib_create_qp()
1616 rdma_restrack_no_track(&qp->ibqp.res); in _mlx4_ib_create_qp()
1618 qp->port = init_attr->port_num; in _mlx4_ib_create_qp()
1619 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : in _mlx4_ib_create_qp()
1635 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_create_qp() local
1639 mutex_init(&qp->mutex); in mlx4_ib_create_qp()
1640 ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); in mlx4_ib_create_qp()
1646 struct mlx4_ib_sqp *sqp = qp->sqp; in mlx4_ib_create_qp()
1655 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); in mlx4_ib_create_qp()
1668 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in _mlx4_ib_destroy_qp() argument
1670 struct mlx4_ib_dev *dev = to_mdev(qp->device); in _mlx4_ib_destroy_qp()
1671 struct mlx4_ib_qp *mqp = to_mqp(qp); in _mlx4_ib_destroy_qp()
1686 if (qp->rwq_ind_tbl) { in _mlx4_ib_destroy_qp()
1696 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in mlx4_ib_destroy_qp() argument
1698 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1707 return _mlx4_ib_destroy_qp(qp, udata); in mlx4_ib_destroy_qp()
1734 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1744 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1749 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1847 /* no current vlan tag in qp */ in _mlx4_set_path()
1860 /* have current vlan tag. unregister it at modify-qp success */ in _mlx4_set_path()
1898 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, in mlx4_set_path() argument
1904 return _mlx4_set_path(dev, &qp->ah_attr, in mlx4_set_path()
1911 const struct ib_qp_attr *qp, in mlx4_set_alt_path() argument
1916 return _mlx4_set_path(dev, &qp->alt_ah_attr, in mlx4_set_alt_path()
1922 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1926 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1927 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1929 ge->port = qp->port; in update_mcg_macs()
1935 struct mlx4_ib_qp *qp, in handle_eth_ud_smac_index() argument
1941 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); in handle_eth_ud_smac_index()
1943 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index()
1944 if (!qp->pri.smac && !qp->pri.smac_port) { in handle_eth_ud_smac_index()
1945 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); in handle_eth_ud_smac_index()
1947 qp->pri.candidate_smac_index = smac_index; in handle_eth_ud_smac_index()
1948 qp->pri.candidate_smac = u64_mac; in handle_eth_ud_smac_index()
1949 qp->pri.candidate_smac_port = qp->port; in handle_eth_ud_smac_index()
1958 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in create_qp_lb_counter() argument
1964 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != in create_qp_lb_counter()
1966 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || in create_qp_lb_counter()
1982 qp->counter_index = new_counter_index; in create_qp_lb_counter()
1984 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
1986 &dev->counters_table[qp->port - 1].counters_list); in create_qp_lb_counter()
1987 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
2011 * Go over all RSS QP's childes (WQs) and apply their HW state according to
2012 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
2027 * WQ's is associated to a port according to the RSS QP it is in bringup_rss_rwqs()
2030 * RSS QP, return a failure. in bringup_rss_rwqs()
2096 struct mlx4_ib_qp *qp) in fill_qp_rss_context() argument
2103 rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz); in fill_qp_rss_context()
2105 cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff); in fill_qp_rss_context()
2106 if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6)) in fill_qp_rss_context()
2108 rss_context->flags = qp->rss_ctx->flags; in fill_qp_rss_context()
2112 memcpy(rss_context->rss_key, qp->rss_ctx->rss_key, in fill_qp_rss_context()
2127 struct mlx4_ib_qp *qp; in __mlx4_ib_modify_qp() local
2146 qp = to_mqp((struct ib_qp *)ibwq); in __mlx4_ib_modify_qp()
2156 qp = to_mqp(ibqp); in __mlx4_ib_modify_qp()
2158 pd = get_pd(qp); in __mlx4_ib_modify_qp()
2163 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2172 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
2191 if (qp->inl_recv_sz) in __mlx4_ib_modify_qp()
2194 if (qp->flags & MLX4_IB_QP_SCATTER_FCS) in __mlx4_ib_modify_qp()
2202 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
2218 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2219 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2220 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2223 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
2224 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2225 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2227 if (new_state == IB_QPS_RESET && qp->counter_index) in __mlx4_ib_modify_qp()
2228 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2231 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
2232 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
2256 err = create_qp_lb_counter(dev, qp); in __mlx4_ib_modify_qp()
2261 dev->counters_table[qp->port - 1].default_counter; in __mlx4_ib_modify_qp()
2262 if (qp->counter_index) in __mlx4_ib_modify_qp()
2263 counter_index = qp->counter_index->index; in __mlx4_ib_modify_qp()
2268 if (qp->counter_index) { in __mlx4_ib_modify_qp()
2278 if (qp->flags & MLX4_IB_QP_NETIF) { in __mlx4_ib_modify_qp()
2279 mlx4_ib_steer_qp_reg(dev, qp, 1); in __mlx4_ib_modify_qp()
2284 enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ? in __mlx4_ib_modify_qp()
2293 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2301 attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in __mlx4_ib_modify_qp()
2316 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, in __mlx4_ib_modify_qp()
2350 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, in __mlx4_ib_modify_qp()
2364 get_cqs(qp, src_type, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
2404 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
2418 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ in __mlx4_ib_modify_qp()
2420 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2425 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
2443 if (qp->rq.wqe_cnt && in __mlx4_ib_modify_qp()
2446 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
2452 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
2453 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
2454 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2457 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
2460 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2464 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2466 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || in __mlx4_ib_modify_qp()
2467 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) in __mlx4_ib_modify_qp()
2470 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || in __mlx4_ib_modify_qp()
2471 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || in __mlx4_ib_modify_qp()
2472 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { in __mlx4_ib_modify_qp()
2473 err = handle_eth_ud_smac_index(dev, qp, context); in __mlx4_ib_modify_qp()
2478 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in __mlx4_ib_modify_qp()
2479 dev->qp1_proxy[qp->port - 1] = qp; in __mlx4_ib_modify_qp()
2488 /* set QP to receive both tunneled & non-tunneled packets */ in __mlx4_ib_modify_qp()
2496 &dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2516 * Before passing a kernel QP to the HW, make sure that the in __mlx4_ib_modify_qp()
2527 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
2528 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2531 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
2532 stamp_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2539 fill_qp_rss_context(context, qp); in __mlx4_ib_modify_qp()
2543 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
2545 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
2549 qp->state = new_state; in __mlx4_ib_modify_qp()
2552 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
2554 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
2556 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
2557 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
2560 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
2562 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
2563 store_sqp_attrs(qp->sqp, attr, attr_mask); in __mlx4_ib_modify_qp()
2569 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
2571 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
2573 qp->port); in __mlx4_ib_modify_qp()
2577 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
2581 * If we moved a kernel QP to RESET, clean up all old CQ in __mlx4_ib_modify_qp()
2582 * entries and reinitialize the QP. in __mlx4_ib_modify_qp()
2586 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
2589 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
2591 qp->rq.head = 0; in __mlx4_ib_modify_qp()
2592 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
2593 qp->sq.head = 0; in __mlx4_ib_modify_qp()
2594 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
2595 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
2596 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2597 *qp->db.db = 0; in __mlx4_ib_modify_qp()
2599 if (qp->flags & MLX4_IB_QP_NETIF) in __mlx4_ib_modify_qp()
2600 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2602 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in __mlx4_ib_modify_qp()
2603 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2604 qp->pri.smac = 0; in __mlx4_ib_modify_qp()
2605 qp->pri.smac_port = 0; in __mlx4_ib_modify_qp()
2607 if (qp->alt.smac) { in __mlx4_ib_modify_qp()
2608 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2609 qp->alt.smac = 0; in __mlx4_ib_modify_qp()
2611 if (qp->pri.vid < 0x1000) { in __mlx4_ib_modify_qp()
2612 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in __mlx4_ib_modify_qp()
2613 qp->pri.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2614 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2615 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2618 if (qp->alt.vid < 0x1000) { in __mlx4_ib_modify_qp()
2619 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in __mlx4_ib_modify_qp()
2620 qp->alt.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2621 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2622 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2626 if (err && qp->counter_index) in __mlx4_ib_modify_qp()
2627 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2629 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2631 if (qp->pri.candidate_smac || in __mlx4_ib_modify_qp()
2632 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { in __mlx4_ib_modify_qp()
2634 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); in __mlx4_ib_modify_qp()
2636 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) in __mlx4_ib_modify_qp()
2637 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2638 qp->pri.smac = qp->pri.candidate_smac; in __mlx4_ib_modify_qp()
2639 qp->pri.smac_index = qp->pri.candidate_smac_index; in __mlx4_ib_modify_qp()
2640 qp->pri.smac_port = qp->pri.candidate_smac_port; in __mlx4_ib_modify_qp()
2642 qp->pri.candidate_smac = 0; in __mlx4_ib_modify_qp()
2643 qp->pri.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2644 qp->pri.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2646 if (qp->alt.candidate_smac) { in __mlx4_ib_modify_qp()
2648 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); in __mlx4_ib_modify_qp()
2650 if (qp->alt.smac) in __mlx4_ib_modify_qp()
2651 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2652 qp->alt.smac = qp->alt.candidate_smac; in __mlx4_ib_modify_qp()
2653 qp->alt.smac_index = qp->alt.candidate_smac_index; in __mlx4_ib_modify_qp()
2654 qp->alt.smac_port = qp->alt.candidate_smac_port; in __mlx4_ib_modify_qp()
2656 qp->alt.candidate_smac = 0; in __mlx4_ib_modify_qp()
2657 qp->alt.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2658 qp->alt.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2661 if (qp->pri.update_vid) { in __mlx4_ib_modify_qp()
2663 if (qp->pri.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2664 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, in __mlx4_ib_modify_qp()
2665 qp->pri.candidate_vid); in __mlx4_ib_modify_qp()
2667 if (qp->pri.vid < 0x1000) in __mlx4_ib_modify_qp()
2668 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, in __mlx4_ib_modify_qp()
2669 qp->pri.vid); in __mlx4_ib_modify_qp()
2670 qp->pri.vid = qp->pri.candidate_vid; in __mlx4_ib_modify_qp()
2671 qp->pri.vlan_port = qp->pri.candidate_vlan_port; in __mlx4_ib_modify_qp()
2672 qp->pri.vlan_index = qp->pri.candidate_vlan_index; in __mlx4_ib_modify_qp()
2674 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2675 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2678 if (qp->alt.update_vid) { in __mlx4_ib_modify_qp()
2680 if (qp->alt.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2681 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, in __mlx4_ib_modify_qp()
2682 qp->alt.candidate_vid); in __mlx4_ib_modify_qp()
2684 if (qp->alt.vid < 0x1000) in __mlx4_ib_modify_qp()
2685 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, in __mlx4_ib_modify_qp()
2686 qp->alt.vid); in __mlx4_ib_modify_qp()
2687 qp->alt.vid = qp->alt.candidate_vid; in __mlx4_ib_modify_qp()
2688 qp->alt.vlan_port = qp->alt.candidate_vlan_port; in __mlx4_ib_modify_qp()
2689 qp->alt.vlan_index = qp->alt.candidate_vlan_index; in __mlx4_ib_modify_qp()
2691 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2692 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2707 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_modify_qp() local
2710 mutex_lock(&qp->mutex); in _mlx4_ib_modify_qp()
2712 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in _mlx4_ib_modify_qp()
2730 pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n", in _mlx4_ib_modify_qp()
2738 pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n", in _mlx4_ib_modify_qp()
2777 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in _mlx4_ib_modify_qp()
2827 mutex_unlock(&qp->mutex); in _mlx4_ib_modify_qp()
2849 pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n", in mlx4_ib_modify_qp()
2868 static int build_sriov_qp0_header(struct mlx4_ib_qp *qp, in build_sriov_qp0_header() argument
2872 struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); in build_sriov_qp0_header()
2873 struct mlx4_ib_sqp *sqp = qp->sqp; in build_sriov_qp0_header()
2874 struct ib_device *ib_dev = qp->ibqp.device; in build_sriov_qp0_header()
2896 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2901 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2918 err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey); in build_sriov_qp0_header()
2922 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2926 cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel); in build_sriov_qp0_header()
2930 if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2933 if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2937 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn); in build_sriov_qp0_header()
3021 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
3024 struct mlx4_ib_sqp *sqp = qp->sqp; in build_mlx_header()
3025 struct ib_device *ib_dev = qp->ibqp.device; in build_mlx_header()
3049 is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
3063 err = fill_gid_by_hw_index(ibdev, qp->port, in build_mlx_header()
3115 .demux[qp->port - 1] in build_mlx_header()
3120 ->sriov.demux[qp->port - 1] in build_mlx_header()
3154 cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
3203 !qp->ibqp.qp_num ? in build_mlx_header()
3207 qp->port); in build_mlx_header()
3208 if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) in build_mlx_header()
3214 if (!qp->ibqp.qp_num) in build_mlx_header()
3215 err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index, in build_mlx_header()
3218 err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
3228 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); in build_mlx_header()
3400 /* Use QKEY from the QP context, which is set by master */ in set_tunnel_datagram_seg()
3489 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, in build_lso_seg() argument
3497 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
3498 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
3533 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_send() local
3550 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { in _mlx4_ib_post_send()
3551 struct mlx4_ib_sqp *sqp = qp->sqp; in _mlx4_ib_post_send()
3558 if (!fill_gid_by_hw_index(mdev, qp->port, in _mlx4_ib_post_send()
3561 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? in _mlx4_ib_post_send()
3562 to_mqp(sqp->roce_v2_gsi) : qp; in _mlx4_ib_post_send()
3569 spin_lock_irqsave(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3578 ind = qp->sq_next_wqe; in _mlx4_ib_post_send()
3584 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send()
3590 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx4_ib_post_send()
3596 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in _mlx4_ib_post_send()
3597 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send()
3607 qp->sq_signal_bits; in _mlx4_ib_post_send()
3614 switch (qp->mlx4_ib_qp_type) { in _mlx4_ib_post_send()
3678 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3689 /* this is a UD qp used in MAD responses to slaves. */ in _mlx4_ib_post_send()
3702 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in _mlx4_ib_post_send()
3715 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3733 /* If we are tunneling special qps, this is a UD qp. in _mlx4_ib_post_send()
3735 * the tunnel qp, and then add a header with address in _mlx4_ib_post_send()
3739 qp->mlx4_ib_qp_type); in _mlx4_ib_post_send()
3749 err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); in _mlx4_ib_post_send()
3774 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in _mlx4_ib_post_send()
3775 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in _mlx4_ib_post_send()
3776 qp->mlx4_ib_qp_type & in _mlx4_ib_post_send()
3810 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in _mlx4_ib_post_send()
3818 stamp_send_wqe(qp, ind + qp->sq_spare_wqes); in _mlx4_ib_post_send()
3824 qp->sq.head += nreq; in _mlx4_ib_post_send()
3832 writel_relaxed(qp->doorbell_qpn, in _mlx4_ib_post_send()
3835 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); in _mlx4_ib_post_send()
3837 qp->sq_next_wqe = ind; in _mlx4_ib_post_send()
3840 spin_unlock_irqrestore(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3854 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_recv() local
3864 max_gs = qp->rq.max_gs; in _mlx4_ib_post_recv()
3865 spin_lock_irqsave(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
3875 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3878 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in _mlx4_ib_post_recv()
3884 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in _mlx4_ib_post_recv()
3890 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv()
3892 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in _mlx4_ib_post_recv()
3895 qp->sqp_proxy_rcv[ind].map, in _mlx4_ib_post_recv()
3902 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv()
3916 qp->rq.wrid[ind] = wr->wr_id; in _mlx4_ib_post_recv()
3918 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3923 qp->rq.head += nreq; in _mlx4_ib_post_recv()
3931 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in _mlx4_ib_post_recv()
3934 spin_unlock_irqrestore(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
4022 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
4030 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
4032 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
4037 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
4045 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
4046 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
4057 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || in mlx4_ib_query_qp()
4058 qp->ibqp.qp_type == IB_QPT_XRC_INI || in mlx4_ib_query_qp()
4059 qp->ibqp.qp_type == IB_QPT_XRC_TGT) { in mlx4_ib_query_qp()
4069 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
4073 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ in mlx4_ib_query_qp()
4089 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
4090 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
4093 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
4094 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
4109 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
4112 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
4115 if (qp->flags & MLX4_IB_QP_NETIF) in mlx4_ib_query_qp()
4119 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
4123 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()
4133 struct mlx4_ib_qp *qp; in mlx4_ib_create_wq() local
4169 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx4_ib_create_wq()
4170 if (!qp) in mlx4_ib_create_wq()
4173 mutex_init(&qp->mutex); in mlx4_ib_create_wq()
4174 qp->pri.vid = 0xFFFF; in mlx4_ib_create_wq()
4175 qp->alt.vid = 0xFFFF; in mlx4_ib_create_wq()
4187 err = create_rq(pd, &ib_qp_init_attr, udata, qp); in mlx4_ib_create_wq()
4189 kfree(qp); in mlx4_ib_create_wq()
4193 qp->ibwq.event_handler = init_attr->event_handler; in mlx4_ib_create_wq()
4194 qp->ibwq.wq_num = qp->mqp.qpn; in mlx4_ib_create_wq()
4195 qp->ibwq.state = IB_WQS_RESET; in mlx4_ib_create_wq()
4197 return &qp->ibwq; in mlx4_ib_create_wq()
4215 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in _mlx4_ib_modify_wq() local
4224 qp_cur_state = qp->state; in _mlx4_ib_modify_wq()
4233 attr.port_num = qp->port; in _mlx4_ib_modify_wq()
4240 pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n", in _mlx4_ib_modify_wq()
4263 qp->state = qp_new_state; in _mlx4_ib_modify_wq()
4271 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_modify_wq() local
4308 mutex_lock(&qp->mutex); in mlx4_ib_modify_wq()
4310 /* Can update HW state only if a RSS QP has already associated to this in mlx4_ib_modify_wq()
4313 if (qp->rss_usecnt) in mlx4_ib_modify_wq()
4319 mutex_unlock(&qp->mutex); in mlx4_ib_modify_wq()
4327 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_destroy_wq() local
4329 if (qp->counter_index) in mlx4_ib_destroy_wq()
4330 mlx4_ib_free_qp_counter(dev, qp); in mlx4_ib_destroy_wq()
4332 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); in mlx4_ib_destroy_wq()
4334 kfree(qp); in mlx4_ib_destroy_wq()
4454 void mlx4_ib_drain_sq(struct ib_qp *qp) in mlx4_ib_drain_sq() argument
4456 struct ib_cq *cq = qp->send_cq; in mlx4_ib_drain_sq()
4468 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_sq()
4471 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_sq()
4480 ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); in mlx4_ib_drain_sq()
4489 void mlx4_ib_drain_rq(struct ib_qp *qp) in mlx4_ib_drain_rq() argument
4491 struct ib_cq *cq = qp->recv_cq; in mlx4_ib_drain_rq()
4497 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_rq()
4500 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_rq()
4510 ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true); in mlx4_ib_drain_rq()