Lines Matching +full:cap +full:- +full:get

15  *      - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
45 struct device *dev = hr_dev->dev; in hns_roce_qp_lookup()
49 xa_lock_irqsave(&hr_dev->qp_table_xa, flags); in hns_roce_qp_lookup()
52 refcount_inc(&qp->refcount); in hns_roce_qp_lookup()
53 xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); in hns_roce_qp_lookup()
67 struct device *dev = flush_work->hr_dev->dev; in flush_work_handle()
75 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { in flush_work_handle()
76 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); in flush_work_handle()
86 if (refcount_dec_and_test(&hr_qp->refcount)) in flush_work_handle()
87 complete(&hr_qp->free); in flush_work_handle()
92 struct hns_roce_work *flush_work = &hr_qp->flush_work; in init_flush_work()
95 spin_lock_irqsave(&hr_qp->flush_lock, flags); in init_flush_work()
97 if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) { in init_flush_work()
98 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); in init_flush_work()
102 refcount_inc(&hr_qp->refcount); in init_flush_work()
103 queue_work(hr_dev->irq_workq, &flush_work->work); in init_flush_work()
104 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); in init_flush_work()
118 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) in flush_cqe()
130 qp->event(qp, (enum hns_roce_event)event_type); in hns_roce_qp_event()
132 if (refcount_dec_and_test(&qp->refcount)) in hns_roce_qp_event()
133 complete(&qp->free); in hns_roce_qp_event()
144 qp->state = IB_QPS_ERR; in hns_roce_flush_cqe()
147 if (refcount_dec_and_test(&qp->refcount)) in hns_roce_flush_cqe()
148 complete(&qp->free); in hns_roce_flush_cqe()
154 struct ib_qp *ibqp = &hr_qp->ibqp; in hns_roce_ib_qp_event()
157 if (ibqp->event_handler) { in hns_roce_ib_qp_event()
158 event.device = ibqp->device; in hns_roce_ib_qp_event()
188 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", in hns_roce_ib_qp_event()
189 type, hr_qp->qpn); in hns_roce_ib_qp_event()
192 ibqp->event_handler(&event, ibqp->qp_context); in hns_roce_ib_qp_event()
205 struct ib_cq *scq = init_attr->send_cq; in get_least_load_bankid_for_qp()
213 cqn = to_hr_cq(scq)->cqn; in get_least_load_bankid_for_qp()
234 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); in alloc_qpn_with_bankid()
236 id = ida_alloc_range(&bank->ida, bank->min, bank->max, in alloc_qpn_with_bankid()
243 bank->next = (id + 1) > bank->max ? bank->min : id + 1; in alloc_qpn_with_bankid()
253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpn()
258 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { in alloc_qpn()
261 mutex_lock(&qp_table->bank_mutex); in alloc_qpn()
262 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); in alloc_qpn()
264 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, in alloc_qpn()
267 ibdev_err(&hr_dev->ib_dev, in alloc_qpn()
269 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
273 qp_table->bank[bankid].inuse++; in alloc_qpn()
274 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
277 hr_qp->qpn = num; in alloc_qpn()
292 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); in add_qp_to_list()
295 list_add_tail(&hr_qp->node, &hr_dev->qp_list); in add_qp_to_list()
297 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); in add_qp_to_list()
299 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); in add_qp_to_list()
302 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); in add_qp_to_list()
309 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_store()
312 if (!hr_qp->qpn) in hns_roce_qp_store()
313 return -EINVAL; in hns_roce_qp_store()
315 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); in hns_roce_qp_store()
317 dev_err(hr_dev->dev, "failed to xa store for QPC\n"); in hns_roce_qp_store()
320 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, in hns_roce_qp_store()
321 init_attr->recv_cq); in hns_roce_qp_store()
328 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc()
329 struct device *dev = hr_dev->dev; in alloc_qpc()
332 if (!hr_qp->qpn) in alloc_qpc()
333 return -EINVAL; in alloc_qpc()
336 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
338 dev_err(dev, "failed to get QPC table\n"); in alloc_qpc()
343 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
345 dev_err(dev, "failed to get IRRL table\n"); in alloc_qpc()
349 if (hr_dev->caps.trrl_entry_sz) { in alloc_qpc()
351 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, in alloc_qpc()
352 hr_qp->qpn); in alloc_qpc()
354 dev_err(dev, "failed to get TRRL table\n"); in alloc_qpc()
359 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in alloc_qpc()
361 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, in alloc_qpc()
362 hr_qp->qpn); in alloc_qpc()
364 dev_err(dev, "failed to get SCC CTX table\n"); in alloc_qpc()
372 if (hr_dev->caps.trrl_entry_sz) in alloc_qpc()
373 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in alloc_qpc()
376 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
379 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
387 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); in qp_user_mmap_entry_remove()
392 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_remove()
395 list_del(&hr_qp->node); in hns_roce_qp_remove()
397 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) in hns_roce_qp_remove()
398 list_del(&hr_qp->sq_node); in hns_roce_qp_remove()
400 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && in hns_roce_qp_remove()
401 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) in hns_roce_qp_remove()
402 list_del(&hr_qp->rq_node); in hns_roce_qp_remove()
405 __xa_erase(xa, hr_qp->qpn); in hns_roce_qp_remove()
411 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in free_qpc()
413 if (hr_dev->caps.trrl_entry_sz) in free_qpc()
414 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in free_qpc()
415 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in free_qpc()
428 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) in free_qpn()
431 if (hr_qp->qpn < hr_dev->caps.reserved_qps) in free_qpn()
434 bankid = get_qp_bankid(hr_qp->qpn); in free_qpn()
436 ida_free(&hr_dev->qp_table.bank[bankid].ida, in free_qpn()
437 hr_qp->qpn / HNS_ROCE_QP_BANK_NUM); in free_qpn()
439 mutex_lock(&hr_dev->qp_table.bank_mutex); in free_qpn()
440 hr_dev->qp_table.bank[bankid].inuse--; in free_qpn()
441 mutex_unlock(&hr_dev->qp_table.bank_mutex); in free_qpn()
447 u32 max_sge = dev->caps.max_rq_sg; in proc_rq_sge()
449 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in proc_rq_sge()
461 hr_qp->rq.rsv_sge = 1; in proc_rq_sge()
466 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, in set_rq_size() argument
474 hr_qp->rq.wqe_cnt = 0; in set_rq_size()
475 hr_qp->rq.max_gs = 0; in set_rq_size()
476 cap->max_recv_wr = 0; in set_rq_size()
477 cap->max_recv_sge = 0; in set_rq_size()
483 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || in set_rq_size()
484 cap->max_recv_sge > max_sge) { in set_rq_size()
485 ibdev_err(&hr_dev->ib_dev, in set_rq_size()
487 cap->max_recv_wr, cap->max_recv_sge); in set_rq_size()
488 return -EINVAL; in set_rq_size()
491 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); in set_rq_size()
492 if (cnt > hr_dev->caps.max_wqes) { in set_rq_size()
493 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", in set_rq_size()
494 cap->max_recv_wr); in set_rq_size()
495 return -EINVAL; in set_rq_size()
498 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + in set_rq_size()
499 hr_qp->rq.rsv_sge); in set_rq_size()
501 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * in set_rq_size()
502 hr_qp->rq.max_gs); in set_rq_size()
504 hr_qp->rq.wqe_cnt = cnt; in set_rq_size()
506 cap->max_recv_wr = cnt; in set_rq_size()
507 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; in set_rq_size()
513 struct ib_qp_cap *cap) in get_max_inline_data() argument
515 if (cap->max_inline_data) { in get_max_inline_data()
516 cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); in get_max_inline_data()
517 return min(cap->max_inline_data, in get_max_inline_data()
518 hr_dev->caps.max_sq_inline); in get_max_inline_data()
525 struct ib_qp_cap *cap) in update_inline_data() argument
527 u32 sge_num = hr_qp->sq.ext_sge_cnt; in update_inline_data()
529 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { in update_inline_data()
530 if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || in update_inline_data()
531 hr_qp->ibqp.qp_type == IB_QPT_UD)) in update_inline_data()
534 cap->max_inline_data = max(cap->max_inline_data, in update_inline_data()
538 hr_qp->max_inline_data = cap->max_inline_data; in update_inline_data()
549 return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : in get_sge_num_from_max_send_sge()
574 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) in set_ext_sge_param() argument
576 bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || in set_ext_sge_param()
577 hr_qp->ibqp.qp_type == IB_QPT_UD); in set_ext_sge_param()
583 cap->max_inline_data = get_max_inline_data(hr_dev, cap); in set_ext_sge_param()
585 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; in set_ext_sge_param()
588 cap->max_send_sge); in set_ext_sge_param()
590 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { in set_ext_sge_param()
593 cap->max_inline_data)); in set_ext_sge_param()
594 hr_qp->sq.ext_sge_cnt = inline_ext_sge ? in set_ext_sge_param()
597 hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); in set_ext_sge_param()
598 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); in set_ext_sge_param()
600 ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; in set_ext_sge_param()
602 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); in set_ext_sge_param()
603 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); in set_ext_sge_param()
604 hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; in set_ext_sge_param()
612 hr_qp->sge.sge_cnt = max(total_sge_cnt, in set_ext_sge_param()
616 update_inline_data(hr_qp, cap); in set_ext_sge_param()
620 struct ib_qp_cap *cap, in check_sq_size_with_integrity() argument
623 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); in check_sq_size_with_integrity()
627 if (ucmd->log_sq_stride > max_sq_stride || in check_sq_size_with_integrity()
628 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { in check_sq_size_with_integrity()
629 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); in check_sq_size_with_integrity()
630 return -EINVAL; in check_sq_size_with_integrity()
633 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { in check_sq_size_with_integrity()
634 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", in check_sq_size_with_integrity()
635 cap->max_send_sge); in check_sq_size_with_integrity()
636 return -EINVAL; in check_sq_size_with_integrity()
643 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, in set_user_sq_size() argument
646 struct ib_device *ibdev = &hr_dev->ib_dev; in set_user_sq_size()
650 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size()
651 cnt > hr_dev->caps.max_wqes) in set_user_sq_size()
652 return -EINVAL; in set_user_sq_size()
654 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in set_user_sq_size()
661 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); in set_user_sq_size()
663 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
664 hr_qp->sq.wqe_cnt = cnt; in set_user_sq_size()
665 cap->max_send_sge = hr_qp->sq.max_gs; in set_user_sq_size()
677 hr_qp->buff_size = 0; in set_wqe_buf_attr()
680 hr_qp->sq.offset = 0; in set_wqe_buf_attr()
681 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, in set_wqe_buf_attr()
682 hr_qp->sq.wqe_shift); in set_wqe_buf_attr()
683 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { in set_wqe_buf_attr()
684 buf_attr->region[idx].size = buf_size; in set_wqe_buf_attr()
685 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; in set_wqe_buf_attr()
687 hr_qp->buff_size += buf_size; in set_wqe_buf_attr()
691 hr_qp->sge.offset = hr_qp->buff_size; in set_wqe_buf_attr()
692 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, in set_wqe_buf_attr()
693 hr_qp->sge.sge_shift); in set_wqe_buf_attr()
694 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { in set_wqe_buf_attr()
695 buf_attr->region[idx].size = buf_size; in set_wqe_buf_attr()
696 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; in set_wqe_buf_attr()
698 hr_qp->buff_size += buf_size; in set_wqe_buf_attr()
702 hr_qp->rq.offset = hr_qp->buff_size; in set_wqe_buf_attr()
703 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, in set_wqe_buf_attr()
704 hr_qp->rq.wqe_shift); in set_wqe_buf_attr()
705 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { in set_wqe_buf_attr()
706 buf_attr->region[idx].size = buf_size; in set_wqe_buf_attr()
707 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; in set_wqe_buf_attr()
709 hr_qp->buff_size += buf_size; in set_wqe_buf_attr()
712 if (hr_qp->buff_size < 1) in set_wqe_buf_attr()
713 return -EINVAL; in set_wqe_buf_attr()
715 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; in set_wqe_buf_attr()
716 buf_attr->region_count = idx; in set_wqe_buf_attr()
722 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) in set_kernel_sq_size() argument
724 struct ib_device *ibdev = &hr_dev->ib_dev; in set_kernel_sq_size()
727 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || in set_kernel_sq_size()
728 cap->max_send_sge > hr_dev->caps.max_sq_sg) { in set_kernel_sq_size()
730 return -EINVAL; in set_kernel_sq_size()
733 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); in set_kernel_sq_size()
734 if (cnt > hr_dev->caps.max_wqes) { in set_kernel_sq_size()
737 return -EINVAL; in set_kernel_sq_size()
740 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); in set_kernel_sq_size()
741 hr_qp->sq.wqe_cnt = cnt; in set_kernel_sq_size()
743 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); in set_kernel_sq_size()
746 cap->max_send_wr = cnt; in set_kernel_sq_size()
747 cap->max_send_sge = hr_qp->sq.max_gs; in set_kernel_sq_size()
754 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) in hns_roce_qp_has_sq()
762 if (attr->qp_type == IB_QPT_XRC_INI || in hns_roce_qp_has_rq()
763 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || in hns_roce_qp_has_rq()
764 !attr->cap.max_recv_wr) in hns_roce_qp_has_rq()
774 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_qp_buf()
783 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, in alloc_qp_buf()
784 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, in alloc_qp_buf()
791 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) in alloc_qp_buf()
792 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; in alloc_qp_buf()
803 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); in free_qp_buf()
812 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in user_qp_has_sdb()
813 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && in user_qp_has_sdb()
815 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); in user_qp_has_sdb()
823 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in user_qp_has_rdb()
824 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && in user_qp_has_rdb()
831 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in kernel_qp_has_rdb()
846 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; in qp_mmap_entry()
848 hr_qp->dwqe_mmap_entry = in qp_mmap_entry()
849 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, in qp_mmap_entry()
853 if (!hr_qp->dwqe_mmap_entry) { in qp_mmap_entry()
854 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); in qp_mmap_entry()
855 return -ENOMEM; in qp_mmap_entry()
858 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; in qp_mmap_entry()
859 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); in qp_mmap_entry()
875 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_user_qp_db()
879 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); in alloc_user_qp_db()
886 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; in alloc_user_qp_db()
890 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); in alloc_user_qp_db()
897 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; in alloc_user_qp_db()
904 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in alloc_user_qp_db()
913 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_kernel_qp_db()
916 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in alloc_kernel_qp_db()
917 hr_qp->sq.db_reg = hr_dev->mem_base + in alloc_kernel_qp_db()
918 HNS_ROCE_DWQE_SIZE * hr_qp->qpn; in alloc_kernel_qp_db()
920 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + in alloc_kernel_qp_db()
921 DB_REG_OFFSET * hr_dev->priv_uar.index; in alloc_kernel_qp_db()
923 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + in alloc_kernel_qp_db()
924 DB_REG_OFFSET * hr_dev->priv_uar.index; in alloc_kernel_qp_db()
927 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); in alloc_kernel_qp_db()
934 *hr_qp->rdb.db_record = 0; in alloc_kernel_qp_db()
935 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; in alloc_kernel_qp_db()
949 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) in alloc_qp_db()
950 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; in alloc_qp_db()
953 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { in alloc_qp_db()
972 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) in alloc_qp_db()
985 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in free_qp_db()
986 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); in free_qp_db()
987 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) in free_qp_db()
988 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in free_qp_db()
989 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) in free_qp_db()
992 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in free_qp_db()
993 hns_roce_free_db(hr_dev, &hr_qp->rdb); in free_qp_db()
1000 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_kernel_wrid()
1005 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); in alloc_kernel_wrid()
1008 return -ENOMEM; in alloc_kernel_wrid()
1011 if (hr_qp->rq.wqe_cnt) { in alloc_kernel_wrid()
1012 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); in alloc_kernel_wrid()
1015 ret = -ENOMEM; in alloc_kernel_wrid()
1020 hr_qp->sq.wrid = sq_wrid; in alloc_kernel_wrid()
1021 hr_qp->rq.wrid = rq_wrid; in alloc_kernel_wrid()
1031 kfree(hr_qp->rq.wrid); in free_kernel_wrid()
1032 kfree(hr_qp->sq.wrid); in free_kernel_wrid()
1038 if (hr_qp->ibqp.qp_type == IB_QPT_UD || in default_congest_type()
1039 hr_qp->ibqp.qp_type == IB_QPT_GSI) in default_congest_type()
1040 hr_qp->cong_type = CONG_TYPE_DCQCN; in default_congest_type()
1042 hr_qp->cong_type = hr_dev->caps.default_cong_type; in default_congest_type()
1048 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); in set_congest_type()
1050 switch (ucmd->cong_type_flags) { in set_congest_type()
1052 hr_qp->cong_type = CONG_TYPE_DCQCN; in set_congest_type()
1055 hr_qp->cong_type = CONG_TYPE_LDCP; in set_congest_type()
1058 hr_qp->cong_type = CONG_TYPE_HC3; in set_congest_type()
1061 hr_qp->cong_type = CONG_TYPE_DIP; in set_congest_type()
1064 return -EINVAL; in set_congest_type()
1067 if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap)) in set_congest_type()
1068 return -EOPNOTSUPP; in set_congest_type()
1070 if (hr_qp->ibqp.qp_type == IB_QPT_UD && in set_congest_type()
1071 hr_qp->cong_type != CONG_TYPE_DCQCN) in set_congest_type()
1072 return -EOPNOTSUPP; in set_congest_type()
1081 if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) in set_congest_param()
1094 struct ib_device *ibdev = &hr_dev->ib_dev; in set_qp_param()
1098 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) in set_qp_param()
1099 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; in set_qp_param()
1101 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; in set_qp_param()
1103 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, in set_qp_param()
1113 min(udata->inlen, sizeof(*ucmd))); in set_qp_param()
1122 hr_qp->config = uctx->config; in set_qp_param()
1123 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); in set_qp_param()
1133 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in set_qp_param()
1134 hr_qp->config = HNS_ROCE_EXSGE_FLAGS; in set_qp_param()
1136 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); in set_qp_param()
1151 struct hns_roce_work *flush_work = &hr_qp->flush_work; in hns_roce_create_qp_common()
1153 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_create_qp_common()
1157 mutex_init(&hr_qp->mutex); in hns_roce_create_qp_common()
1158 spin_lock_init(&hr_qp->sq.lock); in hns_roce_create_qp_common()
1159 spin_lock_init(&hr_qp->rq.lock); in hns_roce_create_qp_common()
1160 spin_lock_init(&hr_qp->flush_lock); in hns_roce_create_qp_common()
1162 hr_qp->state = IB_QPS_RESET; in hns_roce_create_qp_common()
1163 hr_qp->flush_flag = 0; in hns_roce_create_qp_common()
1164 flush_work->hr_dev = hr_dev; in hns_roce_create_qp_common()
1165 INIT_WORK(&flush_work->work, flush_work_handle); in hns_roce_create_qp_common()
1167 if (init_attr->create_flags) in hns_roce_create_qp_common()
1168 return -EOPNOTSUPP; in hns_roce_create_qp_common()
1218 resp.cap_flags = hr_qp->en_flags; in hns_roce_create_qp_common()
1220 min(udata->outlen, sizeof(resp))); in hns_roce_create_qp_common()
1227 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in hns_roce_create_qp_common()
1228 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); in hns_roce_create_qp_common()
1233 hr_qp->ibqp.qp_num = hr_qp->qpn; in hns_roce_create_qp_common()
1234 hr_qp->event = hns_roce_ib_qp_event; in hns_roce_create_qp_common()
1235 refcount_set(&hr_qp->refcount, 1); in hns_roce_create_qp_common()
1236 init_completion(&hr_qp->free); in hns_roce_create_qp_common()
1253 mutex_destroy(&hr_qp->mutex); in hns_roce_create_qp_common()
1260 if (refcount_dec_and_test(&hr_qp->refcount)) in hns_roce_qp_destroy()
1261 complete(&hr_qp->free); in hns_roce_qp_destroy()
1262 wait_for_completion(&hr_qp->free); in hns_roce_qp_destroy()
1269 mutex_destroy(&hr_qp->mutex); in hns_roce_qp_destroy()
1278 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) in check_qp_type()
1282 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && in check_qp_type()
1296 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); in check_qp_type()
1298 return -EOPNOTSUPP; in check_qp_type()
1304 struct ib_device *ibdev = qp->device; in hns_roce_create_qp()
1309 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); in hns_roce_create_qp()
1313 if (init_attr->qp_type == IB_QPT_XRC_TGT) in hns_roce_create_qp()
1314 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; in hns_roce_create_qp()
1316 if (init_attr->qp_type == IB_QPT_GSI) { in hns_roce_create_qp()
1317 hr_qp->port = init_attr->port_num - 1; in hns_roce_create_qp()
1318 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; in hns_roce_create_qp()
1324 init_attr->qp_type, ret); in hns_roce_create_qp()
1328 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]); in hns_roce_create_qp()
1345 return -1; in to_hr_qp_type()
1356 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; in check_mtu_validate()
1357 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); in check_mtu_validate()
1359 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && in check_mtu_validate()
1360 attr->path_mtu > hr_dev->caps.max_mtu) || in check_mtu_validate()
1361 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { in check_mtu_validate()
1362 ibdev_err(&hr_dev->ib_dev, in check_mtu_validate()
1364 attr->path_mtu); in check_mtu_validate()
1365 return -EINVAL; in check_mtu_validate()
1374 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_check_qp_attr()
1379 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { in hns_roce_check_qp_attr()
1380 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", in hns_roce_check_qp_attr()
1381 attr->port_num); in hns_roce_check_qp_attr()
1382 return -EINVAL; in hns_roce_check_qp_attr()
1386 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; in hns_roce_check_qp_attr()
1387 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { in hns_roce_check_qp_attr()
1388 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1390 attr->pkey_index); in hns_roce_check_qp_attr()
1391 return -EINVAL; in hns_roce_check_qp_attr()
1396 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { in hns_roce_check_qp_attr()
1397 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1399 attr->max_rd_atomic); in hns_roce_check_qp_attr()
1400 return -EINVAL; in hns_roce_check_qp_attr()
1404 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { in hns_roce_check_qp_attr()
1405 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1407 attr->max_dest_rd_atomic); in hns_roce_check_qp_attr()
1408 return -EINVAL; in hns_roce_check_qp_attr()
1420 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_modify_qp()
1424 int ret = -EINVAL; in hns_roce_modify_qp()
1426 mutex_lock(&hr_qp->mutex); in hns_roce_modify_qp()
1428 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) in hns_roce_modify_qp()
1431 cur_state = hr_qp->state; in hns_roce_modify_qp()
1432 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; in hns_roce_modify_qp()
1434 if (ibqp->uobject && in hns_roce_modify_qp()
1436 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { in hns_roce_modify_qp()
1437 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); in hns_roce_modify_qp()
1439 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in hns_roce_modify_qp()
1440 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); in hns_roce_modify_qp()
1442 ibdev_warn(&hr_dev->ib_dev, in hns_roce_modify_qp()
1448 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, in hns_roce_modify_qp()
1450 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); in hns_roce_modify_qp()
1461 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, in hns_roce_modify_qp()
1466 if (udata && udata->outlen) { in hns_roce_modify_qp()
1467 resp.tc_mode = hr_qp->tc_mode; in hns_roce_modify_qp()
1468 resp.priority = hr_qp->sl; in hns_roce_modify_qp()
1470 min(udata->outlen, sizeof(resp))); in hns_roce_modify_qp()
1472 ibdev_err_ratelimited(&hr_dev->ib_dev, in hns_roce_modify_qp()
1477 mutex_unlock(&hr_qp->mutex); in hns_roce_modify_qp()
1479 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]); in hns_roce_modify_qp()
1485 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in hns_roce_lock_cqs()
1488 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1489 __acquire(&recv_cq->lock); in hns_roce_lock_cqs()
1491 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1492 __acquire(&recv_cq->lock); in hns_roce_lock_cqs()
1494 spin_lock(&recv_cq->lock); in hns_roce_lock_cqs()
1495 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1497 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1498 __acquire(&recv_cq->lock); in hns_roce_lock_cqs()
1499 } else if (send_cq->cqn < recv_cq->cqn) { in hns_roce_lock_cqs()
1500 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1501 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); in hns_roce_lock_cqs()
1503 spin_lock(&recv_cq->lock); in hns_roce_lock_cqs()
1504 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in hns_roce_lock_cqs()
1509 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) in hns_roce_unlock_cqs()
1510 __releases(&recv_cq->lock) in hns_roce_unlock_cqs()
1513 __release(&recv_cq->lock); in hns_roce_unlock_cqs()
1514 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1516 __release(&recv_cq->lock); in hns_roce_unlock_cqs()
1517 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1519 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1520 spin_unlock(&recv_cq->lock); in hns_roce_unlock_cqs()
1522 __release(&recv_cq->lock); in hns_roce_unlock_cqs()
1523 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1524 } else if (send_cq->cqn < recv_cq->cqn) { in hns_roce_unlock_cqs()
1525 spin_unlock(&recv_cq->lock); in hns_roce_unlock_cqs()
1526 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1528 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1529 spin_unlock(&recv_cq->lock); in hns_roce_unlock_cqs()
1535 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); in get_wqe()
1540 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); in hns_roce_get_recv_wqe()
1545 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); in hns_roce_get_send_wqe()
1550 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); in hns_roce_get_extend_sge()
1559 cur = hr_wq->head - hr_wq->tail; in hns_roce_wq_overflow()
1560 if (likely(cur + nreq < hr_wq->wqe_cnt)) in hns_roce_wq_overflow()
1564 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow()
1565 cur = hr_wq->head - hr_wq->tail; in hns_roce_wq_overflow()
1566 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
1568 return cur + nreq >= hr_wq->wqe_cnt; in hns_roce_wq_overflow()
1573 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_init_qp_table()
1577 mutex_init(&qp_table->scc_mutex); in hns_roce_init_qp_table()
1578 mutex_init(&qp_table->bank_mutex); in hns_roce_init_qp_table()
1579 xa_init(&hr_dev->qp_table_xa); in hns_roce_init_qp_table()
1580 xa_init(&qp_table->dip_xa); in hns_roce_init_qp_table()
1582 reserved_from_bot = hr_dev->caps.reserved_qps; in hns_roce_init_qp_table()
1585 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; in hns_roce_init_qp_table()
1586 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; in hns_roce_init_qp_table()
1590 ida_init(&hr_dev->qp_table.bank[i].ida); in hns_roce_init_qp_table()
1591 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / in hns_roce_init_qp_table()
1592 HNS_ROCE_QP_BANK_NUM - 1; in hns_roce_init_qp_table()
1593 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; in hns_roce_init_qp_table()
1604 ida_destroy(&hr_dev->qp_table.bank[i].ida); in hns_roce_cleanup_qp_table()
1605 xa_destroy(&hr_dev->qp_table.dip_xa); in hns_roce_cleanup_qp_table()
1606 xa_destroy(&hr_dev->qp_table_xa); in hns_roce_cleanup_qp_table()
1607 mutex_destroy(&hr_dev->qp_table.bank_mutex); in hns_roce_cleanup_qp_table()
1608 mutex_destroy(&hr_dev->qp_table.scc_mutex); in hns_roce_cleanup_qp_table()