Lines Matching full:cqe

84 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);  in get_sw_cqe()  local
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
91 return cqe; in get_sw_cqe()
120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
140 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
169 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
185 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder()
192 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
203 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_responder()
205 switch (get_cqe_opcode(cqe)) { in handle_responder()
209 wc->ex.imm_data = cqe->immediate; in handle_responder()
214 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && in handle_responder()
215 (cqe->hds_ip_ext & CQE_L4_OK)))) in handle_responder()
221 wc->ex.imm_data = cqe->immediate; in handle_responder()
226 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey); in handle_responder()
229 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; in handle_responder()
230 wc->dlid_path_bits = cqe->ml_path; in handle_responder()
231 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; in handle_responder()
234 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff; in handle_responder()
243 wc->slid = be16_to_cpu(cqe->slid); in handle_responder()
244 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; in handle_responder()
249 vlan_present = cqe->l4_l3_hdr_type & 0x1; in handle_responder()
250 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; in handle_responder()
252 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff; in handle_responder()
253 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7; in handle_responder()
273 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, in dump_cqe() argument
279 cqe, sizeof(*cqe), false); in dump_cqe()
283 struct mlx5_err_cqe *cqe, in mlx5_handle_error_cqe() argument
288 switch (cqe->syndrome) { in mlx5_handle_error_cqe()
339 wc->vendor_err = cqe->vendor_err_synd; in mlx5_handle_error_cqe()
341 dump_cqe(dev, cqe, wc, dump); in mlx5_handle_error_cqe()
365 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, in get_sig_err_item() argument
368 u16 syndrome = be16_to_cpu(cqe->syndrome); in get_sig_err_item()
376 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; in get_sig_err_item()
377 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; in get_sig_err_item()
381 item->expected = be32_to_cpu(cqe->expected_reftag); in get_sig_err_item()
382 item->actual = be32_to_cpu(cqe->actual_reftag); in get_sig_err_item()
386 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; in get_sig_err_item()
387 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; in get_sig_err_item()
393 item->sig_err_offset = be64_to_cpu(cqe->err_offset); in get_sig_err_item()
394 item->key = be32_to_cpu(cqe->mkey); in get_sig_err_item()
462 void *cqe; in mlx5_poll_one() local
466 cqe = next_cqe_sw(cq); in mlx5_poll_one()
467 if (!cqe) in mlx5_poll_one()
470 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
488 mlx5_ib_warn(dev, "unexpected resize cqe\n"); in mlx5_poll_one()
527 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", in mlx5_poll_one()
816 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n", in create_cq_user()
826 mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n", in create_cq_user()
840 "CQE padding is not supported for CQE size of %dB!\n", in create_cq_user()
877 void *cqe; in init_cq_frag_buf() local
881 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i); in init_cq_frag_buf()
882 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_frag_buf()
957 int entries = attr->cqe; in mlx5_ib_create_cq()
981 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
1083 void *cqe, *dest; in __mlx5_ib_cq_clean() local
1098 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
1105 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1106 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
1112 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1115 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
1232 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); in copy_resize_cqes()
1241 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); in copy_resize_cqes()
1257 mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); in copy_resize_cqes()
1262 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", in copy_resize_cqes()
1303 if (entries == ibcq->cqe + 1) in mlx5_ib_resize_cq()
1377 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1396 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()