Lines Matching +full:slave +full:- +full:dev
17 * - Redistributions of source code must retain the above
21 * - Redistributions in binary form must reproduce the above
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
237 struct rb_node *node = root->rb_node; in res_tracker_lookup()
243 if (res_id < res->res_id) in res_tracker_lookup()
244 node = node->rb_left; in res_tracker_lookup()
245 else if (res_id > res->res_id) in res_tracker_lookup()
246 node = node->rb_right; in res_tracker_lookup()
255 struct rb_node **new = &(root->rb_node), *parent = NULL; in res_tracker_insert()
263 if (res->res_id < this->res_id) in res_tracker_insert()
264 new = &((*new)->rb_left); in res_tracker_insert()
265 else if (res->res_id > this->res_id) in res_tracker_insert()
266 new = &((*new)->rb_right); in res_tracker_insert()
268 return -EEXIST; in res_tracker_insert()
272 rb_link_node(&res->node, parent, new); in res_tracker_insert()
273 rb_insert_color(&res->node, root); in res_tracker_insert()
306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, in mlx4_grant_resource() argument
311 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_grant_resource()
313 &priv->mfunc.master.res_tracker.res_alloc[res_type]; in mlx4_grant_resource()
314 int err = -EDQUOT; in mlx4_grant_resource()
318 if (slave > dev->persist->num_vfs) in mlx4_grant_resource()
319 return -EINVAL; in mlx4_grant_resource()
321 spin_lock(&res_alloc->alloc_lock); in mlx4_grant_resource()
323 res_alloc->allocated[(port - 1) * in mlx4_grant_resource()
324 (dev->persist->num_vfs + 1) + slave] : in mlx4_grant_resource()
325 res_alloc->allocated[slave]; in mlx4_grant_resource()
326 free = (port > 0) ? res_alloc->res_port_free[port - 1] : in mlx4_grant_resource()
327 res_alloc->res_free; in mlx4_grant_resource()
328 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : in mlx4_grant_resource()
329 res_alloc->res_reserved; in mlx4_grant_resource()
330 guaranteed = res_alloc->guaranteed[slave]; in mlx4_grant_resource()
332 if (allocated + count > res_alloc->quota[slave]) { in mlx4_grant_resource()
333 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", in mlx4_grant_resource()
334 slave, port, resource_str(res_type), count, in mlx4_grant_resource()
335 allocated, res_alloc->quota[slave]); in mlx4_grant_resource()
344 if (guaranteed - allocated > 0) in mlx4_grant_resource()
345 from_free = count - (guaranteed - allocated); in mlx4_grant_resource()
349 from_rsvd = count - from_free; in mlx4_grant_resource()
351 if (free - from_free >= reserved) in mlx4_grant_resource()
354 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", in mlx4_grant_resource()
355 slave, port, resource_str(res_type), free, in mlx4_grant_resource()
362 res_alloc->allocated[(port - 1) * in mlx4_grant_resource()
363 (dev->persist->num_vfs + 1) + slave] += count; in mlx4_grant_resource()
364 res_alloc->res_port_free[port - 1] -= count; in mlx4_grant_resource()
365 res_alloc->res_port_rsvd[port - 1] -= from_rsvd; in mlx4_grant_resource()
367 res_alloc->allocated[slave] += count; in mlx4_grant_resource()
368 res_alloc->res_free -= count; in mlx4_grant_resource()
369 res_alloc->res_reserved -= from_rsvd; in mlx4_grant_resource()
374 spin_unlock(&res_alloc->alloc_lock); in mlx4_grant_resource()
378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, in mlx4_release_resource() argument
382 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_release_resource()
384 &priv->mfunc.master.res_tracker.res_alloc[res_type]; in mlx4_release_resource()
387 if (slave > dev->persist->num_vfs) in mlx4_release_resource()
390 spin_lock(&res_alloc->alloc_lock); in mlx4_release_resource()
393 res_alloc->allocated[(port - 1) * in mlx4_release_resource()
394 (dev->persist->num_vfs + 1) + slave] : in mlx4_release_resource()
395 res_alloc->allocated[slave]; in mlx4_release_resource()
396 guaranteed = res_alloc->guaranteed[slave]; in mlx4_release_resource()
398 if (allocated - count >= guaranteed) { in mlx4_release_resource()
402 if (allocated - guaranteed > 0) in mlx4_release_resource()
403 from_rsvd = count - (allocated - guaranteed); in mlx4_release_resource()
409 res_alloc->allocated[(port - 1) * in mlx4_release_resource()
410 (dev->persist->num_vfs + 1) + slave] -= count; in mlx4_release_resource()
411 res_alloc->res_port_free[port - 1] += count; in mlx4_release_resource()
412 res_alloc->res_port_rsvd[port - 1] += from_rsvd; in mlx4_release_resource()
414 res_alloc->allocated[slave] -= count; in mlx4_release_resource()
415 res_alloc->res_free += count; in mlx4_release_resource()
416 res_alloc->res_reserved += from_rsvd; in mlx4_release_resource()
419 spin_unlock(&res_alloc->alloc_lock); in mlx4_release_resource()
423 static inline void initialize_res_quotas(struct mlx4_dev *dev, in initialize_res_quotas() argument
428 res_alloc->guaranteed[vf] = num_instances / in initialize_res_quotas()
429 (2 * (dev->persist->num_vfs + 1)); in initialize_res_quotas()
430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; in initialize_res_quotas()
431 if (vf == mlx4_master_func_num(dev)) { in initialize_res_quotas()
432 res_alloc->res_free = num_instances; in initialize_res_quotas()
435 res_alloc->res_free += dev->caps.reserved_mtts; in initialize_res_quotas()
436 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; in initialize_res_quotas()
437 res_alloc->quota[vf] += dev->caps.reserved_mtts; in initialize_res_quotas()
442 void mlx4_init_quotas(struct mlx4_dev *dev) in mlx4_init_quotas() argument
444 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_quotas()
448 if (mlx4_is_slave(dev)) in mlx4_init_quotas()
451 if (!mlx4_is_mfunc(dev)) { in mlx4_init_quotas()
452 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - in mlx4_init_quotas()
453 mlx4_num_reserved_sqps(dev); in mlx4_init_quotas()
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; in mlx4_init_quotas()
455 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; in mlx4_init_quotas()
456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; in mlx4_init_quotas()
457 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; in mlx4_init_quotas()
461 pf = mlx4_master_func_num(dev); in mlx4_init_quotas()
462 dev->quotas.qp = in mlx4_init_quotas()
463 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; in mlx4_init_quotas()
464 dev->quotas.cq = in mlx4_init_quotas()
465 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; in mlx4_init_quotas()
466 dev->quotas.srq = in mlx4_init_quotas()
467 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; in mlx4_init_quotas()
468 dev->quotas.mtt = in mlx4_init_quotas()
469 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; in mlx4_init_quotas()
470 dev->quotas.mpt = in mlx4_init_quotas()
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; in mlx4_init_quotas()
475 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, in mlx4_calc_res_counter_guaranteed() argument
483 if (vf == mlx4_master_func_num(dev)) in mlx4_calc_res_counter_guaranteed()
484 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports; in mlx4_calc_res_counter_guaranteed()
487 actv_ports = mlx4_get_active_ports(dev, vf); in mlx4_calc_res_counter_guaranteed()
488 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); in mlx4_calc_res_counter_guaranteed()
492 * allocate any for it. '-1' to reduce the sink counter. in mlx4_calc_res_counter_guaranteed()
494 if ((res_alloc->res_reserved + counters_guaranteed) > in mlx4_calc_res_counter_guaranteed()
495 (dev->caps.max_counters - 1)) in mlx4_calc_res_counter_guaranteed()
501 int mlx4_init_resource_tracker(struct mlx4_dev *dev) in mlx4_init_resource_tracker() argument
503 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_resource_tracker()
507 priv->mfunc.master.res_tracker.slave_list = in mlx4_init_resource_tracker()
508 kcalloc(dev->num_slaves, sizeof(struct slave_list), in mlx4_init_resource_tracker()
510 if (!priv->mfunc.master.res_tracker.slave_list) in mlx4_init_resource_tracker()
511 return -ENOMEM; in mlx4_init_resource_tracker()
513 for (i = 0 ; i < dev->num_slaves; i++) { in mlx4_init_resource_tracker()
515 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. in mlx4_init_resource_tracker()
517 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); in mlx4_init_resource_tracker()
520 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", in mlx4_init_resource_tracker()
521 dev->num_slaves); in mlx4_init_resource_tracker()
523 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; in mlx4_init_resource_tracker()
527 &priv->mfunc.master.res_tracker.res_alloc[i]; in mlx4_init_resource_tracker()
528 res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1, in mlx4_init_resource_tracker()
531 res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1, in mlx4_init_resource_tracker()
535 res_alloc->allocated = in mlx4_init_resource_tracker()
537 (dev->persist->num_vfs + 1), in mlx4_init_resource_tracker()
540 res_alloc->allocated = in mlx4_init_resource_tracker()
541 kcalloc(dev->persist->num_vfs + 1, in mlx4_init_resource_tracker()
545 res_alloc->res_free = dev->caps.max_counters - 1; in mlx4_init_resource_tracker()
547 if (!res_alloc->quota || !res_alloc->guaranteed || in mlx4_init_resource_tracker()
548 !res_alloc->allocated) in mlx4_init_resource_tracker()
551 spin_lock_init(&res_alloc->alloc_lock); in mlx4_init_resource_tracker()
552 for (t = 0; t < dev->persist->num_vfs + 1; t++) { in mlx4_init_resource_tracker()
554 mlx4_get_active_ports(dev, t); in mlx4_init_resource_tracker()
557 initialize_res_quotas(dev, res_alloc, RES_QP, in mlx4_init_resource_tracker()
558 t, dev->caps.num_qps - in mlx4_init_resource_tracker()
559 dev->caps.reserved_qps - in mlx4_init_resource_tracker()
560 mlx4_num_reserved_sqps(dev)); in mlx4_init_resource_tracker()
563 initialize_res_quotas(dev, res_alloc, RES_CQ, in mlx4_init_resource_tracker()
564 t, dev->caps.num_cqs - in mlx4_init_resource_tracker()
565 dev->caps.reserved_cqs); in mlx4_init_resource_tracker()
568 initialize_res_quotas(dev, res_alloc, RES_SRQ, in mlx4_init_resource_tracker()
569 t, dev->caps.num_srqs - in mlx4_init_resource_tracker()
570 dev->caps.reserved_srqs); in mlx4_init_resource_tracker()
573 initialize_res_quotas(dev, res_alloc, RES_MPT, in mlx4_init_resource_tracker()
574 t, dev->caps.num_mpts - in mlx4_init_resource_tracker()
575 dev->caps.reserved_mrws); in mlx4_init_resource_tracker()
578 initialize_res_quotas(dev, res_alloc, RES_MTT, in mlx4_init_resource_tracker()
579 t, dev->caps.num_mtts - in mlx4_init_resource_tracker()
580 dev->caps.reserved_mtts); in mlx4_init_resource_tracker()
583 if (t == mlx4_master_func_num(dev)) { in mlx4_init_resource_tracker()
587 for (j = 0; j < dev->caps.num_ports; in mlx4_init_resource_tracker()
590 mlx4_phys_to_slaves_pport(dev, j + 1); in mlx4_init_resource_tracker()
593 dev->caps.num_ports) - 1; in mlx4_init_resource_tracker()
598 res_alloc->quota[t] = in mlx4_init_resource_tracker()
599 MLX4_MAX_MAC_NUM - in mlx4_init_resource_tracker()
601 res_alloc->guaranteed[t] = 2; in mlx4_init_resource_tracker()
603 res_alloc->res_port_free[j] = in mlx4_init_resource_tracker()
606 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; in mlx4_init_resource_tracker()
607 res_alloc->guaranteed[t] = 2; in mlx4_init_resource_tracker()
611 if (t == mlx4_master_func_num(dev)) { in mlx4_init_resource_tracker()
612 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; in mlx4_init_resource_tracker()
613 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; in mlx4_init_resource_tracker()
615 res_alloc->res_port_free[j] = in mlx4_init_resource_tracker()
616 res_alloc->quota[t]; in mlx4_init_resource_tracker()
618 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; in mlx4_init_resource_tracker()
619 res_alloc->guaranteed[t] = 0; in mlx4_init_resource_tracker()
623 res_alloc->quota[t] = dev->caps.max_counters; in mlx4_init_resource_tracker()
624 res_alloc->guaranteed[t] = in mlx4_init_resource_tracker()
625 mlx4_calc_res_counter_guaranteed(dev, res_alloc, t); in mlx4_init_resource_tracker()
631 for (j = 0; j < dev->caps.num_ports; j++) in mlx4_init_resource_tracker()
633 res_alloc->res_port_rsvd[j] += in mlx4_init_resource_tracker()
634 res_alloc->guaranteed[t]; in mlx4_init_resource_tracker()
636 res_alloc->res_reserved += res_alloc->guaranteed[t]; in mlx4_init_resource_tracker()
640 spin_lock_init(&priv->mfunc.master.res_tracker.lock); in mlx4_init_resource_tracker()
645 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); in mlx4_init_resource_tracker()
646 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; in mlx4_init_resource_tracker()
647 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); in mlx4_init_resource_tracker()
648 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; in mlx4_init_resource_tracker()
649 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); in mlx4_init_resource_tracker()
650 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; in mlx4_init_resource_tracker()
652 return -ENOMEM; in mlx4_init_resource_tracker()
655 void mlx4_free_resource_tracker(struct mlx4_dev *dev, in mlx4_free_resource_tracker() argument
658 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_free_resource_tracker()
661 if (priv->mfunc.master.res_tracker.slave_list) { in mlx4_free_resource_tracker()
663 for (i = 0; i < dev->num_slaves; i++) { in mlx4_free_resource_tracker()
665 dev->caps.function != i) in mlx4_free_resource_tracker()
666 mlx4_delete_all_resources_for_slave(dev, i); in mlx4_free_resource_tracker()
669 i = dev->caps.function; in mlx4_free_resource_tracker()
670 mlx4_reset_roce_gids(dev, i); in mlx4_free_resource_tracker()
671 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); in mlx4_free_resource_tracker()
672 rem_slave_vlans(dev, i); in mlx4_free_resource_tracker()
673 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); in mlx4_free_resource_tracker()
678 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); in mlx4_free_resource_tracker()
679 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; in mlx4_free_resource_tracker()
680 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); in mlx4_free_resource_tracker()
681 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; in mlx4_free_resource_tracker()
682 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); in mlx4_free_resource_tracker()
683 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; in mlx4_free_resource_tracker()
685 kfree(priv->mfunc.master.res_tracker.slave_list); in mlx4_free_resource_tracker()
686 priv->mfunc.master.res_tracker.slave_list = NULL; in mlx4_free_resource_tracker()
691 static void update_pkey_index(struct mlx4_dev *dev, int slave, in update_pkey_index() argument
694 u8 sched = *(u8 *)(inbox->buf + 64); in update_pkey_index()
695 u8 orig_index = *(u8 *)(inbox->buf + 35); in update_pkey_index()
697 struct mlx4_priv *priv = mlx4_priv(dev); in update_pkey_index()
702 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; in update_pkey_index()
703 *(u8 *)(inbox->buf + 35) = new_index; in update_pkey_index()
706 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, in update_gid() argument
707 u8 slave) in update_gid() argument
709 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; in update_gid()
710 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); in update_gid()
711 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; in update_gid()
715 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; in update_gid()
716 if (mlx4_is_eth(dev, port)) in update_gid()
717 qp_ctx->pri_path.mgid_index = in update_gid()
718 mlx4_get_base_gid_ix(dev, slave, port) | 0x80; in update_gid()
720 qp_ctx->pri_path.mgid_index = slave | 0x80; in update_gid()
724 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; in update_gid()
725 if (mlx4_is_eth(dev, port)) { in update_gid()
726 qp_ctx->pri_path.mgid_index += in update_gid()
727 mlx4_get_base_gid_ix(dev, slave, port); in update_gid()
728 qp_ctx->pri_path.mgid_index &= 0x7f; in update_gid()
730 qp_ctx->pri_path.mgid_index = slave & 0x7F; in update_gid()
734 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; in update_gid()
735 if (mlx4_is_eth(dev, port)) { in update_gid()
736 qp_ctx->alt_path.mgid_index += in update_gid()
737 mlx4_get_base_gid_ix(dev, slave, port); in update_gid()
738 qp_ctx->alt_path.mgid_index &= 0x7f; in update_gid()
740 qp_ctx->alt_path.mgid_index = slave & 0x7F; in update_gid()
746 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
747 u8 slave, int port);
749 static int update_vport_qp_param(struct mlx4_dev *dev, in update_vport_qp_param() argument
751 u8 slave, u32 qpn) in update_vport_qp_param() argument
753 struct mlx4_qp_context *qpc = inbox->buf + 8; in update_vport_qp_param()
759 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; in update_vport_qp_param()
760 priv = mlx4_priv(dev); in update_vport_qp_param()
761 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; in update_vport_qp_param()
762 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; in update_vport_qp_param()
764 err = handle_counter(dev, qpc, slave, port); in update_vport_qp_param()
768 if (MLX4_VGT != vp_oper->state.default_vlan) { in update_vport_qp_param()
772 if (mlx4_is_qp_reserved(dev, qpn)) in update_vport_qp_param()
777 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { in update_vport_qp_param()
778 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { in update_vport_qp_param()
779 *(__be32 *)inbox->buf = in update_vport_qp_param()
780 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | in update_vport_qp_param()
782 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); in update_vport_qp_param()
786 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); in update_vport_qp_param()
793 qpc->pri_path.vlan_control &= in update_vport_qp_param()
795 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && in update_vport_qp_param()
796 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { in update_vport_qp_param()
797 qpc->pri_path.vlan_control |= in update_vport_qp_param()
804 } else if (0 != vp_oper->state.default_vlan) { in update_vport_qp_param()
805 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) { in update_vport_qp_param()
810 qpc->pri_path.vlan_control |= in update_vport_qp_param()
816 qpc->pri_path.vlan_control |= in update_vport_qp_param()
822 qpc->pri_path.vlan_control |= in update_vport_qp_param()
827 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; in update_vport_qp_param()
828 qpc->pri_path.vlan_index = vp_oper->vlan_idx; in update_vport_qp_param()
829 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN; in update_vport_qp_param()
830 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) in update_vport_qp_param()
831 qpc->pri_path.fl |= MLX4_FL_SV; in update_vport_qp_param()
833 qpc->pri_path.fl |= MLX4_FL_CV; in update_vport_qp_param()
834 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; in update_vport_qp_param()
835 qpc->pri_path.sched_queue &= 0xC7; in update_vport_qp_param()
836 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; in update_vport_qp_param()
837 qpc->qos_vport = vp_oper->state.qos_vport; in update_vport_qp_param()
839 if (vp_oper->state.spoofchk) { in update_vport_qp_param()
840 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; in update_vport_qp_param()
841 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; in update_vport_qp_param()
847 static int mpt_mask(struct mlx4_dev *dev) in mpt_mask() argument
849 return dev->caps.num_mpts - 1; in mpt_mask()
882 static void *find_res(struct mlx4_dev *dev, u64 res_id, in find_res() argument
885 struct mlx4_priv *priv = mlx4_priv(dev); in find_res()
887 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], in find_res()
891 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id, in _get_res() argument
898 spin_lock_irq(mlx4_tlock(dev)); in _get_res()
899 r = find_res(dev, res_id, type); in _get_res()
901 err = -ENONET; in _get_res()
905 if (r->state == RES_ANY_BUSY) { in _get_res()
906 mlx4_warn(dev, in _get_res()
908 func_name, slave, res_id, mlx4_resource_type_to_str(type), in _get_res()
909 r->func_name); in _get_res()
910 err = -EBUSY; in _get_res()
914 if (r->owner != slave) { in _get_res()
915 err = -EPERM; in _get_res()
919 r->from_state = r->state; in _get_res()
920 r->state = RES_ANY_BUSY; in _get_res()
921 r->func_name = func_name; in _get_res()
927 spin_unlock_irq(mlx4_tlock(dev)); in _get_res()
931 #define get_res(dev, slave, res_id, type, res) \ argument
932 _get_res((dev), (slave), (res_id), (type), (res), __func__)
934 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, in mlx4_get_slave_from_resource_id() argument
936 u64 res_id, int *slave) in mlx4_get_slave_from_resource_id() argument
940 int err = -ENOENT; in mlx4_get_slave_from_resource_id()
945 spin_lock(mlx4_tlock(dev)); in mlx4_get_slave_from_resource_id()
947 r = find_res(dev, id, type); in mlx4_get_slave_from_resource_id()
949 *slave = r->owner; in mlx4_get_slave_from_resource_id()
952 spin_unlock(mlx4_tlock(dev)); in mlx4_get_slave_from_resource_id()
957 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, in put_res() argument
962 spin_lock_irq(mlx4_tlock(dev)); in put_res()
963 r = find_res(dev, res_id, type); in put_res()
965 r->state = r->from_state; in put_res()
966 r->func_name = ""; in put_res()
968 spin_unlock_irq(mlx4_tlock(dev)); in put_res()
971 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
974 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, in handle_existing_counter() argument
981 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) in handle_existing_counter()
984 spin_lock_irq(mlx4_tlock(dev)); in handle_existing_counter()
985 r = find_res(dev, counter_index, RES_COUNTER); in handle_existing_counter()
986 if (!r || r->owner != slave) { in handle_existing_counter()
987 ret = -EINVAL; in handle_existing_counter()
990 if (!counter->port) in handle_existing_counter()
991 counter->port = port; in handle_existing_counter()
994 spin_unlock_irq(mlx4_tlock(dev)); in handle_existing_counter()
998 static int handle_unexisting_counter(struct mlx4_dev *dev, in handle_unexisting_counter() argument
999 struct mlx4_qp_context *qpc, u8 slave, in handle_unexisting_counter() argument
1002 struct mlx4_priv *priv = mlx4_priv(dev); in handle_unexisting_counter()
1003 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in handle_unexisting_counter()
1006 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev); in handle_unexisting_counter()
1009 spin_lock_irq(mlx4_tlock(dev)); in handle_unexisting_counter()
1011 &tracker->slave_list[slave].res_list[RES_COUNTER], in handle_unexisting_counter()
1014 if (port == counter->port) { in handle_unexisting_counter()
1015 qpc->pri_path.counter_index = counter->com.res_id; in handle_unexisting_counter()
1016 spin_unlock_irq(mlx4_tlock(dev)); in handle_unexisting_counter()
1020 spin_unlock_irq(mlx4_tlock(dev)); in handle_unexisting_counter()
1023 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx, in handle_unexisting_counter()
1025 if (err == -ENOENT) { in handle_unexisting_counter()
1027 } else if (err && err != -ENOSPC) { in handle_unexisting_counter()
1028 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n", in handle_unexisting_counter()
1029 __func__, slave, err); in handle_unexisting_counter()
1031 qpc->pri_path.counter_index = counter_idx; in handle_unexisting_counter()
1032 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n", in handle_unexisting_counter()
1033 __func__, slave, qpc->pri_path.counter_index); in handle_unexisting_counter()
1040 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, in handle_counter() argument
1041 u8 slave, int port) in handle_counter() argument
1043 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev)) in handle_counter()
1044 return handle_existing_counter(dev, slave, port, in handle_counter()
1045 qpc->pri_path.counter_index); in handle_counter()
1047 return handle_unexisting_counter(dev, qpc, slave, port); in handle_counter()
1058 ret->com.res_id = id; in alloc_qp_tr()
1059 ret->com.state = RES_QP_RESERVED; in alloc_qp_tr()
1060 ret->local_qpn = id; in alloc_qp_tr()
1061 INIT_LIST_HEAD(&ret->mcg_list); in alloc_qp_tr()
1062 spin_lock_init(&ret->mcg_spl); in alloc_qp_tr()
1063 atomic_set(&ret->ref_count, 0); in alloc_qp_tr()
1065 return &ret->com; in alloc_qp_tr()
1076 ret->com.res_id = id; in alloc_mtt_tr()
1077 ret->order = order; in alloc_mtt_tr()
1078 ret->com.state = RES_MTT_ALLOCATED; in alloc_mtt_tr()
1079 atomic_set(&ret->ref_count, 0); in alloc_mtt_tr()
1081 return &ret->com; in alloc_mtt_tr()
1092 ret->com.res_id = id; in alloc_mpt_tr()
1093 ret->com.state = RES_MPT_RESERVED; in alloc_mpt_tr()
1094 ret->key = key; in alloc_mpt_tr()
1096 return &ret->com; in alloc_mpt_tr()
1107 ret->com.res_id = id; in alloc_eq_tr()
1108 ret->com.state = RES_EQ_RESERVED; in alloc_eq_tr()
1110 return &ret->com; in alloc_eq_tr()
1121 ret->com.res_id = id; in alloc_cq_tr()
1122 ret->com.state = RES_CQ_ALLOCATED; in alloc_cq_tr()
1123 atomic_set(&ret->ref_count, 0); in alloc_cq_tr()
1125 return &ret->com; in alloc_cq_tr()
1136 ret->com.res_id = id; in alloc_srq_tr()
1137 ret->com.state = RES_SRQ_ALLOCATED; in alloc_srq_tr()
1138 atomic_set(&ret->ref_count, 0); in alloc_srq_tr()
1140 return &ret->com; in alloc_srq_tr()
1151 ret->com.res_id = id; in alloc_counter_tr()
1152 ret->com.state = RES_COUNTER_ALLOCATED; in alloc_counter_tr()
1153 ret->port = port; in alloc_counter_tr()
1155 return &ret->com; in alloc_counter_tr()
1166 ret->com.res_id = id; in alloc_xrcdn_tr()
1167 ret->com.state = RES_XRCD_ALLOCATED; in alloc_xrcdn_tr()
1169 return &ret->com; in alloc_xrcdn_tr()
1180 ret->com.res_id = id; in alloc_fs_rule_tr()
1181 ret->com.state = RES_FS_RULE_ALLOCATED; in alloc_fs_rule_tr()
1182 ret->qpn = qpn; in alloc_fs_rule_tr()
1183 return &ret->com; in alloc_fs_rule_tr()
1186 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, in alloc_tr() argument
1226 ret->owner = slave; in alloc_tr()
1231 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, in mlx4_calc_vf_counters() argument
1234 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_calc_vf_counters()
1235 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in mlx4_calc_vf_counters()
1243 counters_arr = kmalloc_array(dev->caps.max_counters, in mlx4_calc_vf_counters()
1246 return -ENOMEM; in mlx4_calc_vf_counters()
1248 spin_lock_irq(mlx4_tlock(dev)); in mlx4_calc_vf_counters()
1250 &tracker->slave_list[slave].res_list[RES_COUNTER], in mlx4_calc_vf_counters()
1253 if (counter->port == port) { in mlx4_calc_vf_counters()
1254 counters_arr[i] = (int)tmp->res_id; in mlx4_calc_vf_counters()
1258 spin_unlock_irq(mlx4_tlock(dev)); in mlx4_calc_vf_counters()
1259 counters_arr[i] = -1; in mlx4_calc_vf_counters()
1263 while (counters_arr[i] != -1) { in mlx4_calc_vf_counters()
1264 err = mlx4_get_counter_stats(dev, counters_arr[i], data, in mlx4_calc_vf_counters()
1278 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, in add_res_range() argument
1283 struct mlx4_priv *priv = mlx4_priv(dev); in add_res_range()
1285 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in add_res_range()
1286 struct rb_root *root = &tracker->res_tree[type]; in add_res_range()
1290 return -ENOMEM; in add_res_range()
1293 res_arr[i] = alloc_tr(base + i, type, slave, extra); in add_res_range()
1295 for (--i; i >= 0; --i) in add_res_range()
1299 return -ENOMEM; in add_res_range()
1303 spin_lock_irq(mlx4_tlock(dev)); in add_res_range()
1305 if (find_res(dev, base + i, type)) { in add_res_range()
1306 err = -EEXIST; in add_res_range()
1312 list_add_tail(&res_arr[i]->list, in add_res_range()
1313 &tracker->slave_list[slave].res_list[type]); in add_res_range()
1315 spin_unlock_irq(mlx4_tlock(dev)); in add_res_range()
1321 for (--i; i >= 0; --i) { in add_res_range()
1322 rb_erase(&res_arr[i]->node, root); in add_res_range()
1323 list_del_init(&res_arr[i]->list); in add_res_range()
1326 spin_unlock_irq(mlx4_tlock(dev)); in add_res_range()
1338 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || in remove_qp_ok()
1339 !list_empty(&res->mcg_list)) { in remove_qp_ok()
1341 res->com.state, atomic_read(&res->ref_count)); in remove_qp_ok()
1342 return -EBUSY; in remove_qp_ok()
1343 } else if (res->com.state != RES_QP_RESERVED) { in remove_qp_ok()
1344 return -EPERM; in remove_qp_ok()
1352 if (res->com.state == RES_MTT_BUSY || in remove_mtt_ok()
1353 atomic_read(&res->ref_count)) { in remove_mtt_ok()
1354 pr_devel("%s-%d: state %s, ref_count %d\n", in remove_mtt_ok()
1356 mtt_states_str(res->com.state), in remove_mtt_ok()
1357 atomic_read(&res->ref_count)); in remove_mtt_ok()
1358 return -EBUSY; in remove_mtt_ok()
1359 } else if (res->com.state != RES_MTT_ALLOCATED) in remove_mtt_ok()
1360 return -EPERM; in remove_mtt_ok()
1361 else if (res->order != order) in remove_mtt_ok()
1362 return -EINVAL; in remove_mtt_ok()
1369 if (res->com.state == RES_MPT_BUSY) in remove_mpt_ok()
1370 return -EBUSY; in remove_mpt_ok()
1371 else if (res->com.state != RES_MPT_RESERVED) in remove_mpt_ok()
1372 return -EPERM; in remove_mpt_ok()
1379 if (res->com.state == RES_MPT_BUSY) in remove_eq_ok()
1380 return -EBUSY; in remove_eq_ok()
1381 else if (res->com.state != RES_MPT_RESERVED) in remove_eq_ok()
1382 return -EPERM; in remove_eq_ok()
1389 if (res->com.state == RES_COUNTER_BUSY) in remove_counter_ok()
1390 return -EBUSY; in remove_counter_ok()
1391 else if (res->com.state != RES_COUNTER_ALLOCATED) in remove_counter_ok()
1392 return -EPERM; in remove_counter_ok()
1399 if (res->com.state == RES_XRCD_BUSY) in remove_xrcdn_ok()
1400 return -EBUSY; in remove_xrcdn_ok()
1401 else if (res->com.state != RES_XRCD_ALLOCATED) in remove_xrcdn_ok()
1402 return -EPERM; in remove_xrcdn_ok()
1409 if (res->com.state == RES_FS_RULE_BUSY) in remove_fs_rule_ok()
1410 return -EBUSY; in remove_fs_rule_ok()
1411 else if (res->com.state != RES_FS_RULE_ALLOCATED) in remove_fs_rule_ok()
1412 return -EPERM; in remove_fs_rule_ok()
1419 if (res->com.state == RES_CQ_BUSY) in remove_cq_ok()
1420 return -EBUSY; in remove_cq_ok()
1421 else if (res->com.state != RES_CQ_ALLOCATED) in remove_cq_ok()
1422 return -EPERM; in remove_cq_ok()
1429 if (res->com.state == RES_SRQ_BUSY) in remove_srq_ok()
1430 return -EBUSY; in remove_srq_ok()
1431 else if (res->com.state != RES_SRQ_ALLOCATED) in remove_srq_ok()
1432 return -EPERM; in remove_srq_ok()
1451 return -EOPNOTSUPP; in remove_ok()
1461 return -EINVAL; in remove_ok()
1465 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, in rem_res_range() argument
1470 struct mlx4_priv *priv = mlx4_priv(dev); in rem_res_range()
1471 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_res_range()
1474 spin_lock_irq(mlx4_tlock(dev)); in rem_res_range()
1476 r = res_tracker_lookup(&tracker->res_tree[type], i); in rem_res_range()
1478 err = -ENOENT; in rem_res_range()
1481 if (r->owner != slave) { in rem_res_range()
1482 err = -EPERM; in rem_res_range()
1491 r = res_tracker_lookup(&tracker->res_tree[type], i); in rem_res_range()
1492 rb_erase(&r->node, &tracker->res_tree[type]); in rem_res_range()
1493 list_del(&r->list); in rem_res_range()
1499 spin_unlock_irq(mlx4_tlock(dev)); in rem_res_range()
1504 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument
1508 struct mlx4_priv *priv = mlx4_priv(dev); in qp_res_start_move_to()
1509 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in qp_res_start_move_to()
1513 spin_lock_irq(mlx4_tlock(dev)); in qp_res_start_move_to()
1514 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); in qp_res_start_move_to()
1516 err = -ENOENT; in qp_res_start_move_to()
1517 else if (r->com.owner != slave) in qp_res_start_move_to()
1518 err = -EPERM; in qp_res_start_move_to()
1522 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", in qp_res_start_move_to()
1523 __func__, r->com.res_id); in qp_res_start_move_to()
1524 err = -EBUSY; in qp_res_start_move_to()
1528 if (r->com.state == RES_QP_MAPPED && !alloc) in qp_res_start_move_to()
1531 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); in qp_res_start_move_to()
1532 err = -EINVAL; in qp_res_start_move_to()
1536 if ((r->com.state == RES_QP_RESERVED && alloc) || in qp_res_start_move_to()
1537 r->com.state == RES_QP_HW) in qp_res_start_move_to()
1540 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", in qp_res_start_move_to()
1541 r->com.res_id); in qp_res_start_move_to()
1542 err = -EINVAL; in qp_res_start_move_to()
1548 if (r->com.state != RES_QP_MAPPED) in qp_res_start_move_to()
1549 err = -EINVAL; in qp_res_start_move_to()
1552 err = -EINVAL; in qp_res_start_move_to()
1556 r->com.from_state = r->com.state; in qp_res_start_move_to()
1557 r->com.to_state = state; in qp_res_start_move_to()
1558 r->com.state = RES_QP_BUSY; in qp_res_start_move_to()
1564 spin_unlock_irq(mlx4_tlock(dev)); in qp_res_start_move_to()
1569 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, in mr_res_start_move_to() argument
1572 struct mlx4_priv *priv = mlx4_priv(dev); in mr_res_start_move_to()
1573 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in mr_res_start_move_to()
1577 spin_lock_irq(mlx4_tlock(dev)); in mr_res_start_move_to()
1578 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); in mr_res_start_move_to()
1580 err = -ENOENT; in mr_res_start_move_to()
1581 else if (r->com.owner != slave) in mr_res_start_move_to()
1582 err = -EPERM; in mr_res_start_move_to()
1586 err = -EINVAL; in mr_res_start_move_to()
1590 if (r->com.state != RES_MPT_MAPPED) in mr_res_start_move_to()
1591 err = -EINVAL; in mr_res_start_move_to()
1595 if (r->com.state != RES_MPT_RESERVED && in mr_res_start_move_to()
1596 r->com.state != RES_MPT_HW) in mr_res_start_move_to()
1597 err = -EINVAL; in mr_res_start_move_to()
1601 if (r->com.state != RES_MPT_MAPPED) in mr_res_start_move_to()
1602 err = -EINVAL; in mr_res_start_move_to()
1605 err = -EINVAL; in mr_res_start_move_to()
1609 r->com.from_state = r->com.state; in mr_res_start_move_to()
1610 r->com.to_state = state; in mr_res_start_move_to()
1611 r->com.state = RES_MPT_BUSY; in mr_res_start_move_to()
1617 spin_unlock_irq(mlx4_tlock(dev)); in mr_res_start_move_to()
1622 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, in eq_res_start_move_to() argument
1625 struct mlx4_priv *priv = mlx4_priv(dev); in eq_res_start_move_to()
1626 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in eq_res_start_move_to()
1630 spin_lock_irq(mlx4_tlock(dev)); in eq_res_start_move_to()
1631 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); in eq_res_start_move_to()
1633 err = -ENOENT; in eq_res_start_move_to()
1634 else if (r->com.owner != slave) in eq_res_start_move_to()
1635 err = -EPERM; in eq_res_start_move_to()
1639 err = -EINVAL; in eq_res_start_move_to()
1643 if (r->com.state != RES_EQ_HW) in eq_res_start_move_to()
1644 err = -EINVAL; in eq_res_start_move_to()
1648 if (r->com.state != RES_EQ_RESERVED) in eq_res_start_move_to()
1649 err = -EINVAL; in eq_res_start_move_to()
1653 err = -EINVAL; in eq_res_start_move_to()
1657 r->com.from_state = r->com.state; in eq_res_start_move_to()
1658 r->com.to_state = state; in eq_res_start_move_to()
1659 r->com.state = RES_EQ_BUSY; in eq_res_start_move_to()
1663 spin_unlock_irq(mlx4_tlock(dev)); in eq_res_start_move_to()
1671 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, in cq_res_start_move_to() argument
1674 struct mlx4_priv *priv = mlx4_priv(dev); in cq_res_start_move_to()
1675 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in cq_res_start_move_to()
1679 spin_lock_irq(mlx4_tlock(dev)); in cq_res_start_move_to()
1680 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); in cq_res_start_move_to()
1682 err = -ENOENT; in cq_res_start_move_to()
1683 } else if (r->com.owner != slave) { in cq_res_start_move_to()
1684 err = -EPERM; in cq_res_start_move_to()
1686 if (r->com.state != RES_CQ_HW) in cq_res_start_move_to()
1687 err = -EINVAL; in cq_res_start_move_to()
1688 else if (atomic_read(&r->ref_count)) in cq_res_start_move_to()
1689 err = -EBUSY; in cq_res_start_move_to()
1692 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { in cq_res_start_move_to()
1693 err = -EINVAL; in cq_res_start_move_to()
1699 r->com.from_state = r->com.state; in cq_res_start_move_to()
1700 r->com.to_state = state; in cq_res_start_move_to()
1701 r->com.state = RES_CQ_BUSY; in cq_res_start_move_to()
1706 spin_unlock_irq(mlx4_tlock(dev)); in cq_res_start_move_to()
1711 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, in srq_res_start_move_to() argument
1714 struct mlx4_priv *priv = mlx4_priv(dev); in srq_res_start_move_to()
1715 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in srq_res_start_move_to()
1719 spin_lock_irq(mlx4_tlock(dev)); in srq_res_start_move_to()
1720 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); in srq_res_start_move_to()
1722 err = -ENOENT; in srq_res_start_move_to()
1723 } else if (r->com.owner != slave) { in srq_res_start_move_to()
1724 err = -EPERM; in srq_res_start_move_to()
1726 if (r->com.state != RES_SRQ_HW) in srq_res_start_move_to()
1727 err = -EINVAL; in srq_res_start_move_to()
1728 else if (atomic_read(&r->ref_count)) in srq_res_start_move_to()
1729 err = -EBUSY; in srq_res_start_move_to()
1730 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { in srq_res_start_move_to()
1731 err = -EINVAL; in srq_res_start_move_to()
1735 r->com.from_state = r->com.state; in srq_res_start_move_to()
1736 r->com.to_state = state; in srq_res_start_move_to()
1737 r->com.state = RES_SRQ_BUSY; in srq_res_start_move_to()
1742 spin_unlock_irq(mlx4_tlock(dev)); in srq_res_start_move_to()
1747 static void res_abort_move(struct mlx4_dev *dev, int slave, in res_abort_move() argument
1750 struct mlx4_priv *priv = mlx4_priv(dev); in res_abort_move()
1751 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in res_abort_move()
1754 spin_lock_irq(mlx4_tlock(dev)); in res_abort_move()
1755 r = res_tracker_lookup(&tracker->res_tree[type], id); in res_abort_move()
1756 if (r && (r->owner == slave)) in res_abort_move()
1757 r->state = r->from_state; in res_abort_move()
1758 spin_unlock_irq(mlx4_tlock(dev)); in res_abort_move()
1761 static void res_end_move(struct mlx4_dev *dev, int slave, in res_end_move() argument
1764 struct mlx4_priv *priv = mlx4_priv(dev); in res_end_move()
1765 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in res_end_move()
1768 spin_lock_irq(mlx4_tlock(dev)); in res_end_move()
1769 r = res_tracker_lookup(&tracker->res_tree[type], id); in res_end_move()
1770 if (r && (r->owner == slave)) in res_end_move()
1771 r->state = r->to_state; in res_end_move()
1772 spin_unlock_irq(mlx4_tlock(dev)); in res_end_move()
1775 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument
1777 return mlx4_is_qp_reserved(dev, qpn) && in valid_reserved()
1778 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); in valid_reserved()
1781 static int fw_reserved(struct mlx4_dev *dev, int qpn) in fw_reserved() argument
1783 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; in fw_reserved()
1786 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in qp_alloc_res() argument
1800 * slave tries to set. in qp_alloc_res()
1802 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; in qp_alloc_res()
1804 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); in qp_alloc_res()
1808 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); in qp_alloc_res()
1810 mlx4_release_resource(dev, slave, RES_QP, count, 0); in qp_alloc_res()
1814 err = add_res_range(dev, slave, base, count, RES_QP, 0); in qp_alloc_res()
1816 mlx4_release_resource(dev, slave, RES_QP, count, 0); in qp_alloc_res()
1817 __mlx4_qp_release_range(dev, base, count); in qp_alloc_res()
1824 if (valid_reserved(dev, slave, qpn)) { in qp_alloc_res()
1825 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); in qp_alloc_res()
1830 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, in qp_alloc_res()
1835 if (!fw_reserved(dev, qpn)) { in qp_alloc_res()
1836 err = __mlx4_qp_alloc_icm(dev, qpn); in qp_alloc_res()
1838 res_abort_move(dev, slave, RES_QP, qpn); in qp_alloc_res()
1843 res_end_move(dev, slave, RES_QP, qpn); in qp_alloc_res()
1847 err = -EINVAL; in qp_alloc_res()
1853 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mtt_alloc_res() argument
1856 int err = -EINVAL; in mtt_alloc_res()
1865 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); in mtt_alloc_res()
1869 base = __mlx4_alloc_mtt_range(dev, order); in mtt_alloc_res()
1870 if (base == -1) { in mtt_alloc_res()
1871 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); in mtt_alloc_res()
1872 return -ENOMEM; in mtt_alloc_res()
1875 err = add_res_range(dev, slave, base, 1, RES_MTT, order); in mtt_alloc_res()
1877 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); in mtt_alloc_res()
1878 __mlx4_free_mtt_range(dev, base, order); in mtt_alloc_res()
1886 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mpt_alloc_res() argument
1889 int err = -EINVAL; in mpt_alloc_res()
1896 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); in mpt_alloc_res()
1900 index = __mlx4_mpt_reserve(dev); in mpt_alloc_res()
1901 if (index == -1) { in mpt_alloc_res()
1902 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); in mpt_alloc_res()
1905 id = index & mpt_mask(dev); in mpt_alloc_res()
1907 err = add_res_range(dev, slave, id, 1, RES_MPT, index); in mpt_alloc_res()
1909 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); in mpt_alloc_res()
1910 __mlx4_mpt_release(dev, index); in mpt_alloc_res()
1917 id = index & mpt_mask(dev); in mpt_alloc_res()
1918 err = mr_res_start_move_to(dev, slave, id, in mpt_alloc_res()
1923 err = __mlx4_mpt_alloc_icm(dev, mpt->key); in mpt_alloc_res()
1925 res_abort_move(dev, slave, RES_MPT, id); in mpt_alloc_res()
1929 res_end_move(dev, slave, RES_MPT, id); in mpt_alloc_res()
1935 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in cq_alloc_res() argument
1943 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); in cq_alloc_res()
1947 err = __mlx4_cq_alloc_icm(dev, &cqn); in cq_alloc_res()
1949 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); in cq_alloc_res()
1953 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); in cq_alloc_res()
1955 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); in cq_alloc_res()
1956 __mlx4_cq_free_icm(dev, cqn); in cq_alloc_res()
1964 err = -EINVAL; in cq_alloc_res()
1970 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in srq_alloc_res() argument
1978 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); in srq_alloc_res()
1982 err = __mlx4_srq_alloc_icm(dev, &srqn); in srq_alloc_res()
1984 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); in srq_alloc_res()
1988 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); in srq_alloc_res()
1990 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); in srq_alloc_res()
1991 __mlx4_srq_free_icm(dev, srqn); in srq_alloc_res()
1999 err = -EINVAL; in srq_alloc_res()
2005 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, in mac_find_smac_ix_in_slave() argument
2008 struct mlx4_priv *priv = mlx4_priv(dev); in mac_find_smac_ix_in_slave()
2009 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in mac_find_smac_ix_in_slave()
2011 &tracker->slave_list[slave].res_list[RES_MAC]; in mac_find_smac_ix_in_slave()
2015 if (res->smac_index == smac_index && res->port == (u8) port) { in mac_find_smac_ix_in_slave()
2016 *mac = res->mac; in mac_find_smac_ix_in_slave()
2020 return -ENOENT; in mac_find_smac_ix_in_slave()
2023 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) in mac_add_to_slave() argument
2025 struct mlx4_priv *priv = mlx4_priv(dev); in mac_add_to_slave()
2026 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in mac_add_to_slave()
2028 &tracker->slave_list[slave].res_list[RES_MAC]; in mac_add_to_slave()
2032 if (res->mac == mac && res->port == (u8) port) { in mac_add_to_slave()
2034 ++res->ref_count; in mac_add_to_slave()
2039 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) in mac_add_to_slave()
2040 return -EINVAL; in mac_add_to_slave()
2043 mlx4_release_resource(dev, slave, RES_MAC, 1, port); in mac_add_to_slave()
2044 return -ENOMEM; in mac_add_to_slave()
2046 res->mac = mac; in mac_add_to_slave()
2047 res->port = (u8) port; in mac_add_to_slave()
2048 res->smac_index = smac_index; in mac_add_to_slave()
2049 res->ref_count = 1; in mac_add_to_slave()
2050 list_add_tail(&res->list, in mac_add_to_slave()
2051 &tracker->slave_list[slave].res_list[RES_MAC]); in mac_add_to_slave()
2055 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, in mac_del_from_slave() argument
2058 struct mlx4_priv *priv = mlx4_priv(dev); in mac_del_from_slave()
2059 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in mac_del_from_slave()
2061 &tracker->slave_list[slave].res_list[RES_MAC]; in mac_del_from_slave()
2065 if (res->mac == mac && res->port == (u8) port) { in mac_del_from_slave()
2066 if (!--res->ref_count) { in mac_del_from_slave()
2067 list_del(&res->list); in mac_del_from_slave()
2068 mlx4_release_resource(dev, slave, RES_MAC, 1, port); in mac_del_from_slave()
2076 static void rem_slave_macs(struct mlx4_dev *dev, int slave) in rem_slave_macs() argument
2078 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_macs()
2079 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_macs()
2081 &tracker->slave_list[slave].res_list[RES_MAC]; in rem_slave_macs()
2086 list_del(&res->list); in rem_slave_macs()
2087 /* dereference the mac the num times the slave referenced it */ in rem_slave_macs()
2088 for (i = 0; i < res->ref_count; i++) in rem_slave_macs()
2089 __mlx4_unregister_mac(dev, res->port, res->mac); in rem_slave_macs()
2090 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); in rem_slave_macs()
2095 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mac_alloc_res() argument
2098 int err = -EINVAL; in mac_alloc_res()
2108 dev, slave, port); in mac_alloc_res()
2111 return -EINVAL; in mac_alloc_res()
2114 err = __mlx4_register_mac(dev, port, mac); in mac_alloc_res()
2122 err = mac_add_to_slave(dev, slave, mac, port, smac_index); in mac_alloc_res()
2124 __mlx4_unregister_mac(dev, port, mac); in mac_alloc_res()
2129 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, in vlan_add_to_slave() argument
2132 struct mlx4_priv *priv = mlx4_priv(dev); in vlan_add_to_slave()
2133 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in vlan_add_to_slave()
2135 &tracker->slave_list[slave].res_list[RES_VLAN]; in vlan_add_to_slave()
2139 if (res->vlan == vlan && res->port == (u8) port) { in vlan_add_to_slave()
2141 ++res->ref_count; in vlan_add_to_slave()
2146 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) in vlan_add_to_slave()
2147 return -EINVAL; in vlan_add_to_slave()
2150 mlx4_release_resource(dev, slave, RES_VLAN, 1, port); in vlan_add_to_slave()
2151 return -ENOMEM; in vlan_add_to_slave()
2153 res->vlan = vlan; in vlan_add_to_slave()
2154 res->port = (u8) port; in vlan_add_to_slave()
2155 res->vlan_index = vlan_index; in vlan_add_to_slave()
2156 res->ref_count = 1; in vlan_add_to_slave()
2157 list_add_tail(&res->list, in vlan_add_to_slave()
2158 &tracker->slave_list[slave].res_list[RES_VLAN]); in vlan_add_to_slave()
2163 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, in vlan_del_from_slave() argument
2166 struct mlx4_priv *priv = mlx4_priv(dev); in vlan_del_from_slave()
2167 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in vlan_del_from_slave()
2169 &tracker->slave_list[slave].res_list[RES_VLAN]; in vlan_del_from_slave()
2173 if (res->vlan == vlan && res->port == (u8) port) { in vlan_del_from_slave()
2174 if (!--res->ref_count) { in vlan_del_from_slave()
2175 list_del(&res->list); in vlan_del_from_slave()
2176 mlx4_release_resource(dev, slave, RES_VLAN, in vlan_del_from_slave()
2185 static void rem_slave_vlans(struct mlx4_dev *dev, int slave) in rem_slave_vlans() argument
2187 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_vlans()
2188 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_vlans()
2190 &tracker->slave_list[slave].res_list[RES_VLAN]; in rem_slave_vlans()
2195 list_del(&res->list); in rem_slave_vlans()
2196 /* dereference the vlan the num times the slave referenced it */ in rem_slave_vlans()
2197 for (i = 0; i < res->ref_count; i++) in rem_slave_vlans()
2198 __mlx4_unregister_vlan(dev, res->port, res->vlan); in rem_slave_vlans()
2199 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); in rem_slave_vlans()
2204 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in vlan_alloc_res() argument
2207 struct mlx4_priv *priv = mlx4_priv(dev); in vlan_alloc_res()
2208 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; in vlan_alloc_res()
2217 return -EINVAL; in vlan_alloc_res()
2220 dev, slave, port); in vlan_alloc_res()
2223 return -EINVAL; in vlan_alloc_res()
2225 if (!in_port && port > 0 && port <= dev->caps.num_ports) { in vlan_alloc_res()
2226 slave_state[slave].old_vlan_api = true; in vlan_alloc_res()
2232 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); in vlan_alloc_res()
2235 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); in vlan_alloc_res()
2237 __mlx4_unregister_vlan(dev, port, vlan); in vlan_alloc_res()
2242 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in counter_alloc_res() argument
2249 return -EINVAL; in counter_alloc_res()
2251 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); in counter_alloc_res()
2255 err = __mlx4_counter_alloc(dev, &index); in counter_alloc_res()
2257 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); in counter_alloc_res()
2261 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port); in counter_alloc_res()
2263 __mlx4_counter_free(dev, index); in counter_alloc_res()
2264 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); in counter_alloc_res()
2272 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, in xrcdn_alloc_res() argument
2279 return -EINVAL; in xrcdn_alloc_res()
2281 err = __mlx4_xrcd_alloc(dev, &xrcdn); in xrcdn_alloc_res()
2285 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); in xrcdn_alloc_res()
2287 __mlx4_xrcd_free(dev, xrcdn); in xrcdn_alloc_res()
2294 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, in mlx4_ALLOC_RES_wrapper() argument
2301 int alop = vhcr->op_modifier; in mlx4_ALLOC_RES_wrapper()
2303 switch (vhcr->in_modifier & 0xFF) { in mlx4_ALLOC_RES_wrapper()
2305 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2306 vhcr->in_param, &vhcr->out_param); in mlx4_ALLOC_RES_wrapper()
2310 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2311 vhcr->in_param, &vhcr->out_param); in mlx4_ALLOC_RES_wrapper()
2315 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2316 vhcr->in_param, &vhcr->out_param); in mlx4_ALLOC_RES_wrapper()
2320 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2321 vhcr->in_param, &vhcr->out_param); in mlx4_ALLOC_RES_wrapper()
2325 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2326 vhcr->in_param, &vhcr->out_param); in mlx4_ALLOC_RES_wrapper()
2330 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2331 vhcr->in_param, &vhcr->out_param, in mlx4_ALLOC_RES_wrapper()
2332 (vhcr->in_modifier >> 8) & 0xFF); in mlx4_ALLOC_RES_wrapper()
2336 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2337 vhcr->in_param, &vhcr->out_param, in mlx4_ALLOC_RES_wrapper()
2338 (vhcr->in_modifier >> 8) & 0xFF); in mlx4_ALLOC_RES_wrapper()
2342 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2343 vhcr->in_param, &vhcr->out_param, 0); in mlx4_ALLOC_RES_wrapper()
2347 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, in mlx4_ALLOC_RES_wrapper()
2348 vhcr->in_param, &vhcr->out_param); in mlx4_ALLOC_RES_wrapper()
2352 err = -EINVAL; in mlx4_ALLOC_RES_wrapper()
2359 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in qp_free_res() argument
2371 err = rem_res_range(dev, slave, base, count, RES_QP, 0); in qp_free_res()
2374 mlx4_release_resource(dev, slave, RES_QP, count, 0); in qp_free_res()
2375 __mlx4_qp_release_range(dev, base, count); in qp_free_res()
2379 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, in qp_free_res()
2384 if (!fw_reserved(dev, qpn)) in qp_free_res()
2385 __mlx4_qp_free_icm(dev, qpn); in qp_free_res()
2387 res_end_move(dev, slave, RES_QP, qpn); in qp_free_res()
2389 if (valid_reserved(dev, slave, qpn)) in qp_free_res()
2390 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); in qp_free_res()
2393 err = -EINVAL; in qp_free_res()
2399 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mtt_free_res() argument
2402 int err = -EINVAL; in mtt_free_res()
2411 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); in mtt_free_res()
2413 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); in mtt_free_res()
2414 __mlx4_free_mtt_range(dev, base, order); in mtt_free_res()
2419 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mpt_free_res() argument
2422 int err = -EINVAL; in mpt_free_res()
2430 id = index & mpt_mask(dev); in mpt_free_res()
2431 err = get_res(dev, slave, id, RES_MPT, &mpt); in mpt_free_res()
2434 index = mpt->key; in mpt_free_res()
2435 put_res(dev, slave, id, RES_MPT); in mpt_free_res()
2437 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); in mpt_free_res()
2440 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); in mpt_free_res()
2441 __mlx4_mpt_release(dev, index); in mpt_free_res()
2445 id = index & mpt_mask(dev); in mpt_free_res()
2446 err = mr_res_start_move_to(dev, slave, id, in mpt_free_res()
2451 __mlx4_mpt_free_icm(dev, mpt->key); in mpt_free_res()
2452 res_end_move(dev, slave, RES_MPT, id); in mpt_free_res()
2455 err = -EINVAL; in mpt_free_res()
2461 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in cq_free_res() argument
2470 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); in cq_free_res()
2474 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); in cq_free_res()
2475 __mlx4_cq_free_icm(dev, cqn); in cq_free_res()
2479 err = -EINVAL; in cq_free_res()
2486 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in srq_free_res() argument
2495 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); in srq_free_res()
2499 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); in srq_free_res()
2500 __mlx4_srq_free_icm(dev, srqn); in srq_free_res()
2504 err = -EINVAL; in srq_free_res()
2511 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in mac_free_res() argument
2521 dev, slave, port); in mac_free_res()
2524 return -EINVAL; in mac_free_res()
2525 mac_del_from_slave(dev, slave, in_param, port); in mac_free_res()
2526 __mlx4_unregister_mac(dev, port, in_param); in mac_free_res()
2529 err = -EINVAL; in mac_free_res()
2537 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in vlan_free_res() argument
2540 struct mlx4_priv *priv = mlx4_priv(dev); in vlan_free_res()
2541 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; in vlan_free_res()
2545 dev, slave, port); in vlan_free_res()
2548 return -EINVAL; in vlan_free_res()
2551 if (slave_state[slave].old_vlan_api) in vlan_free_res()
2554 return -EINVAL; in vlan_free_res()
2555 vlan_del_from_slave(dev, slave, in_param, port); in vlan_free_res()
2556 __mlx4_unregister_vlan(dev, port, in_param); in vlan_free_res()
2559 err = -EINVAL; in vlan_free_res()
2566 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in counter_free_res() argument
2573 return -EINVAL; in counter_free_res()
2576 if (index == MLX4_SINK_COUNTER_INDEX(dev)) in counter_free_res()
2579 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); in counter_free_res()
2583 __mlx4_counter_free(dev, index); in counter_free_res()
2584 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); in counter_free_res()
2589 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, in xrcdn_free_res() argument
2596 return -EINVAL; in xrcdn_free_res()
2599 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); in xrcdn_free_res()
2603 __mlx4_xrcd_free(dev, xrcdn); in xrcdn_free_res()
2608 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, in mlx4_FREE_RES_wrapper() argument
2614 int err = -EINVAL; in mlx4_FREE_RES_wrapper()
2615 int alop = vhcr->op_modifier; in mlx4_FREE_RES_wrapper()
2617 switch (vhcr->in_modifier & 0xFF) { in mlx4_FREE_RES_wrapper()
2619 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2620 vhcr->in_param); in mlx4_FREE_RES_wrapper()
2624 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2625 vhcr->in_param, &vhcr->out_param); in mlx4_FREE_RES_wrapper()
2629 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2630 vhcr->in_param); in mlx4_FREE_RES_wrapper()
2634 err = cq_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2635 vhcr->in_param, &vhcr->out_param); in mlx4_FREE_RES_wrapper()
2639 err = srq_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2640 vhcr->in_param, &vhcr->out_param); in mlx4_FREE_RES_wrapper()
2644 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2645 vhcr->in_param, &vhcr->out_param, in mlx4_FREE_RES_wrapper()
2646 (vhcr->in_modifier >> 8) & 0xFF); in mlx4_FREE_RES_wrapper()
2650 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2651 vhcr->in_param, &vhcr->out_param, in mlx4_FREE_RES_wrapper()
2652 (vhcr->in_modifier >> 8) & 0xFF); in mlx4_FREE_RES_wrapper()
2656 err = counter_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2657 vhcr->in_param, &vhcr->out_param); in mlx4_FREE_RES_wrapper()
2661 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, in mlx4_FREE_RES_wrapper()
2662 vhcr->in_param, &vhcr->out_param); in mlx4_FREE_RES_wrapper()
2674 return (be32_to_cpu(mpt->flags) >> 9) & 1; in mr_phys_mpt()
2679 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; in mr_get_mtt_addr()
2684 return be32_to_cpu(mpt->mtt_sz); in mr_get_mtt_size()
2689 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; in mr_get_pd()
2694 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; in mr_is_fmr()
2699 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; in mr_is_bind_enabled()
2704 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; in mr_is_region()
2709 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; in qp_get_mtt_addr()
2714 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; in srq_get_mtt_addr()
2719 int page_shift = (qpc->log_page_size & 0x3f) + 12; in qp_get_mtt_size()
2720 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; in qp_get_mtt_size()
2721 int log_sq_sride = qpc->sq_size_stride & 7; in qp_get_mtt_size()
2722 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; in qp_get_mtt_size()
2723 int log_rq_stride = qpc->rq_size_stride & 7; in qp_get_mtt_size()
2724 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; in qp_get_mtt_size()
2725 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; in qp_get_mtt_size()
2726 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; in qp_get_mtt_size()
2732 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; in qp_get_mtt_size()
2744 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, in check_mtt_range() argument
2747 int res_start = mtt->com.res_id; in check_mtt_range()
2748 int res_size = (1 << mtt->order); in check_mtt_range()
2751 return -EPERM; in check_mtt_range()
2755 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_MPT_wrapper() argument
2762 int index = vhcr->in_modifier; in mlx4_SW2HW_MPT_wrapper()
2765 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_MPT_wrapper()
2771 id = index & mpt_mask(dev); in mlx4_SW2HW_MPT_wrapper()
2772 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); in mlx4_SW2HW_MPT_wrapper()
2777 if (!mr_is_region(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2778 err = -EPERM; in mlx4_SW2HW_MPT_wrapper()
2782 /* Make sure that the PD bits related to the slave id are zeros. */ in mlx4_SW2HW_MPT_wrapper()
2783 pd = mr_get_pd(inbox->buf); in mlx4_SW2HW_MPT_wrapper()
2785 if (pd_slave != 0 && --pd_slave != slave) { in mlx4_SW2HW_MPT_wrapper()
2786 err = -EPERM; in mlx4_SW2HW_MPT_wrapper()
2790 if (mr_is_fmr(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2791 /* FMR and Bind Enable are forbidden in slave devices. */ in mlx4_SW2HW_MPT_wrapper()
2792 if (mr_is_bind_enabled(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2793 err = -EPERM; in mlx4_SW2HW_MPT_wrapper()
2797 if (!mr_is_region(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2798 err = -EPERM; in mlx4_SW2HW_MPT_wrapper()
2803 phys = mr_phys_mpt(inbox->buf); in mlx4_SW2HW_MPT_wrapper()
2805 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_MPT_wrapper()
2809 err = check_mtt_range(dev, slave, mtt_base, in mlx4_SW2HW_MPT_wrapper()
2810 mr_get_mtt_size(inbox->buf), mtt); in mlx4_SW2HW_MPT_wrapper()
2814 mpt->mtt = mtt; in mlx4_SW2HW_MPT_wrapper()
2817 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_MPT_wrapper()
2822 atomic_inc(&mtt->ref_count); in mlx4_SW2HW_MPT_wrapper()
2823 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_MPT_wrapper()
2826 res_end_move(dev, slave, RES_MPT, id); in mlx4_SW2HW_MPT_wrapper()
2831 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_MPT_wrapper()
2833 res_abort_move(dev, slave, RES_MPT, id); in mlx4_SW2HW_MPT_wrapper()
2838 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_MPT_wrapper() argument
2845 int index = vhcr->in_modifier; in mlx4_HW2SW_MPT_wrapper()
2849 id = index & mpt_mask(dev); in mlx4_HW2SW_MPT_wrapper()
2850 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); in mlx4_HW2SW_MPT_wrapper()
2854 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_MPT_wrapper()
2858 if (mpt->mtt) in mlx4_HW2SW_MPT_wrapper()
2859 atomic_dec(&mpt->mtt->ref_count); in mlx4_HW2SW_MPT_wrapper()
2861 res_end_move(dev, slave, RES_MPT, id); in mlx4_HW2SW_MPT_wrapper()
2865 res_abort_move(dev, slave, RES_MPT, id); in mlx4_HW2SW_MPT_wrapper()
2870 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_MPT_wrapper() argument
2877 int index = vhcr->in_modifier; in mlx4_QUERY_MPT_wrapper()
2881 id = index & mpt_mask(dev); in mlx4_QUERY_MPT_wrapper()
2882 err = get_res(dev, slave, id, RES_MPT, &mpt); in mlx4_QUERY_MPT_wrapper()
2886 if (mpt->com.from_state == RES_MPT_MAPPED) { in mlx4_QUERY_MPT_wrapper()
2896 &mlx4_priv(dev)->mr_table.dmpt_table, in mlx4_QUERY_MPT_wrapper()
2897 mpt->key, NULL); in mlx4_QUERY_MPT_wrapper()
2899 if (NULL == mpt_entry || NULL == outbox->buf) { in mlx4_QUERY_MPT_wrapper()
2900 err = -EINVAL; in mlx4_QUERY_MPT_wrapper()
2904 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); in mlx4_QUERY_MPT_wrapper()
2907 } else if (mpt->com.from_state == RES_MPT_HW) { in mlx4_QUERY_MPT_wrapper()
2908 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_MPT_wrapper()
2910 err = -EBUSY; in mlx4_QUERY_MPT_wrapper()
2916 put_res(dev, slave, id, RES_MPT); in mlx4_QUERY_MPT_wrapper()
2922 return be32_to_cpu(qpc->cqn_recv) & 0xffffff; in qp_get_rcqn()
2927 return be32_to_cpu(qpc->cqn_send) & 0xffffff; in qp_get_scqn()
2932 return be32_to_cpu(qpc->srqn) & 0x1ffffff; in qp_get_srqn()
2935 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, in adjust_proxy_tun_qkey() argument
2938 u32 qpn = vhcr->in_modifier & 0xffffff; in adjust_proxy_tun_qkey()
2941 if (mlx4_get_parav_qkey(dev, qpn, &qkey)) in adjust_proxy_tun_qkey()
2945 context->qkey = cpu_to_be32(qkey); in adjust_proxy_tun_qkey()
2948 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2952 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_RST2INIT_QP_wrapper() argument
2959 int qpn = vhcr->in_modifier & 0x7fffff; in mlx4_RST2INIT_QP_wrapper()
2962 struct mlx4_qp_context *qpc = inbox->buf + 8; in mlx4_RST2INIT_QP_wrapper()
2963 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; in mlx4_RST2INIT_QP_wrapper()
2972 int local_qpn = vhcr->in_modifier & 0xffffff; in mlx4_RST2INIT_QP_wrapper()
2974 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); in mlx4_RST2INIT_QP_wrapper()
2978 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); in mlx4_RST2INIT_QP_wrapper()
2981 qp->local_qpn = local_qpn; in mlx4_RST2INIT_QP_wrapper()
2982 qp->sched_queue = 0; in mlx4_RST2INIT_QP_wrapper()
2983 qp->param3 = 0; in mlx4_RST2INIT_QP_wrapper()
2984 qp->vlan_control = 0; in mlx4_RST2INIT_QP_wrapper()
2985 qp->fvl_rx = 0; in mlx4_RST2INIT_QP_wrapper()
2986 qp->pri_path_fl = 0; in mlx4_RST2INIT_QP_wrapper()
2987 qp->vlan_index = 0; in mlx4_RST2INIT_QP_wrapper()
2988 qp->feup = 0; in mlx4_RST2INIT_QP_wrapper()
2989 qp->qpc_flags = be32_to_cpu(qpc->flags); in mlx4_RST2INIT_QP_wrapper()
2991 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_RST2INIT_QP_wrapper()
2995 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); in mlx4_RST2INIT_QP_wrapper()
2999 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); in mlx4_RST2INIT_QP_wrapper()
3004 err = get_res(dev, slave, scqn, RES_CQ, &scq); in mlx4_RST2INIT_QP_wrapper()
3011 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_RST2INIT_QP_wrapper()
3016 adjust_proxy_tun_qkey(dev, vhcr, qpc); in mlx4_RST2INIT_QP_wrapper()
3017 update_pkey_index(dev, slave, inbox); in mlx4_RST2INIT_QP_wrapper()
3018 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RST2INIT_QP_wrapper()
3021 atomic_inc(&mtt->ref_count); in mlx4_RST2INIT_QP_wrapper()
3022 qp->mtt = mtt; in mlx4_RST2INIT_QP_wrapper()
3023 atomic_inc(&rcq->ref_count); in mlx4_RST2INIT_QP_wrapper()
3024 qp->rcq = rcq; in mlx4_RST2INIT_QP_wrapper()
3025 atomic_inc(&scq->ref_count); in mlx4_RST2INIT_QP_wrapper()
3026 qp->scq = scq; in mlx4_RST2INIT_QP_wrapper()
3029 put_res(dev, slave, scqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
3032 atomic_inc(&srq->ref_count); in mlx4_RST2INIT_QP_wrapper()
3033 put_res(dev, slave, srqn, RES_SRQ); in mlx4_RST2INIT_QP_wrapper()
3034 qp->srq = srq; in mlx4_RST2INIT_QP_wrapper()
3038 qp->param3 = qpc->param3; in mlx4_RST2INIT_QP_wrapper()
3039 put_res(dev, slave, rcqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
3040 put_res(dev, slave, mtt_base, RES_MTT); in mlx4_RST2INIT_QP_wrapper()
3041 res_end_move(dev, slave, RES_QP, qpn); in mlx4_RST2INIT_QP_wrapper()
3047 put_res(dev, slave, srqn, RES_SRQ); in mlx4_RST2INIT_QP_wrapper()
3050 put_res(dev, slave, scqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
3052 put_res(dev, slave, rcqn, RES_CQ); in mlx4_RST2INIT_QP_wrapper()
3054 put_res(dev, slave, mtt_base, RES_MTT); in mlx4_RST2INIT_QP_wrapper()
3056 res_abort_move(dev, slave, RES_QP, qpn); in mlx4_RST2INIT_QP_wrapper()
3063 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; in eq_get_mtt_addr()
3068 int log_eq_size = eqc->log_eq_size & 0x1f; in eq_get_mtt_size()
3069 int page_shift = (eqc->log_page_size & 0x3f) + 12; in eq_get_mtt_size()
3074 return 1 << (log_eq_size + 5 - page_shift); in eq_get_mtt_size()
3079 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; in cq_get_mtt_addr()
3084 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; in cq_get_mtt_size()
3085 int page_shift = (cqc->log_page_size & 0x3f) + 12; in cq_get_mtt_size()
3090 return 1 << (log_cq_size + 5 - page_shift); in cq_get_mtt_size()
3093 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_EQ_wrapper() argument
3100 int eqn = vhcr->in_modifier; in mlx4_SW2HW_EQ_wrapper()
3101 int res_id = (slave << 10) | eqn; in mlx4_SW2HW_EQ_wrapper()
3102 struct mlx4_eq_context *eqc = inbox->buf; in mlx4_SW2HW_EQ_wrapper()
3103 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_EQ_wrapper()
3108 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); in mlx4_SW2HW_EQ_wrapper()
3111 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); in mlx4_SW2HW_EQ_wrapper()
3115 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_EQ_wrapper()
3119 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); in mlx4_SW2HW_EQ_wrapper()
3123 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_EQ_wrapper()
3127 atomic_inc(&mtt->ref_count); in mlx4_SW2HW_EQ_wrapper()
3128 eq->mtt = mtt; in mlx4_SW2HW_EQ_wrapper()
3129 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_EQ_wrapper()
3130 res_end_move(dev, slave, RES_EQ, res_id); in mlx4_SW2HW_EQ_wrapper()
3134 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_EQ_wrapper()
3136 res_abort_move(dev, slave, RES_EQ, res_id); in mlx4_SW2HW_EQ_wrapper()
3138 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); in mlx4_SW2HW_EQ_wrapper()
3142 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, in mlx4_CONFIG_DEV_wrapper() argument
3149 u8 get = vhcr->op_modifier; in mlx4_CONFIG_DEV_wrapper()
3152 return -EPERM; in mlx4_CONFIG_DEV_wrapper()
3154 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_CONFIG_DEV_wrapper()
3159 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, in get_containing_mtt() argument
3162 struct mlx4_priv *priv = mlx4_priv(dev); in get_containing_mtt()
3163 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in get_containing_mtt()
3165 int err = -EINVAL; in get_containing_mtt()
3167 spin_lock_irq(mlx4_tlock(dev)); in get_containing_mtt()
3168 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], in get_containing_mtt()
3170 if (!check_mtt_range(dev, slave, start, len, mtt)) { in get_containing_mtt()
3172 mtt->com.from_state = mtt->com.state; in get_containing_mtt()
3173 mtt->com.state = RES_MTT_BUSY; in get_containing_mtt()
3178 spin_unlock_irq(mlx4_tlock(dev)); in get_containing_mtt()
3183 static int verify_qp_parameters(struct mlx4_dev *dev, in verify_qp_parameters() argument
3186 enum qp_transition transition, u8 slave) in verify_qp_parameters() argument
3195 qp_ctx = inbox->buf + 8; in verify_qp_parameters()
3196 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; in verify_qp_parameters()
3197 optpar = be32_to_cpu(*(__be32 *) inbox->buf); in verify_qp_parameters()
3199 if (slave != mlx4_master_func_num(dev)) { in verify_qp_parameters()
3200 qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP); in verify_qp_parameters()
3201 /* setting QP rate-limit is disallowed for VFs */ in verify_qp_parameters()
3202 if (qp_ctx->rate_limit_params) in verify_qp_parameters()
3203 return -EPERM; in verify_qp_parameters()
3216 if (slave != mlx4_master_func_num(dev)) { in verify_qp_parameters()
3218 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; in verify_qp_parameters()
3219 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) in verify_qp_parameters()
3220 num_gids = mlx4_get_slave_num_gids(dev, slave, port); in verify_qp_parameters()
3223 if (qp_ctx->pri_path.mgid_index >= num_gids) in verify_qp_parameters()
3224 return -EINVAL; in verify_qp_parameters()
3227 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; in verify_qp_parameters()
3228 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) in verify_qp_parameters()
3229 num_gids = mlx4_get_slave_num_gids(dev, slave, port); in verify_qp_parameters()
3232 if (qp_ctx->alt_path.mgid_index >= num_gids) in verify_qp_parameters()
3233 return -EINVAL; in verify_qp_parameters()
3243 qpn = vhcr->in_modifier & 0x7fffff; in verify_qp_parameters()
3244 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; in verify_qp_parameters()
3246 slave != mlx4_master_func_num(dev) && in verify_qp_parameters()
3247 mlx4_is_qp_reserved(dev, qpn) && in verify_qp_parameters()
3248 !mlx4_vf_smi_enabled(dev, slave, port)) { in verify_qp_parameters()
3250 …mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n… in verify_qp_parameters()
3251 __func__, slave, port); in verify_qp_parameters()
3252 return -EPERM; in verify_qp_parameters()
3263 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_WRITE_MTT_wrapper() argument
3270 __be64 *page_list = inbox->buf; in mlx4_WRITE_MTT_wrapper()
3275 int npages = vhcr->in_modifier; in mlx4_WRITE_MTT_wrapper()
3278 err = get_containing_mtt(dev, slave, start, npages, &rmtt); in mlx4_WRITE_MTT_wrapper()
3283 * - Prepare a dummy mtt struct in mlx4_WRITE_MTT_wrapper()
3284 * - Translate inbox contents to simple addresses in host endianness */ in mlx4_WRITE_MTT_wrapper()
3292 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, in mlx4_WRITE_MTT_wrapper()
3296 put_res(dev, slave, rmtt->com.res_id, RES_MTT); in mlx4_WRITE_MTT_wrapper()
3301 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_EQ_wrapper() argument
3307 int eqn = vhcr->in_modifier; in mlx4_HW2SW_EQ_wrapper()
3308 int res_id = eqn | (slave << 10); in mlx4_HW2SW_EQ_wrapper()
3312 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); in mlx4_HW2SW_EQ_wrapper()
3316 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); in mlx4_HW2SW_EQ_wrapper()
3320 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_EQ_wrapper()
3324 atomic_dec(&eq->mtt->ref_count); in mlx4_HW2SW_EQ_wrapper()
3325 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); in mlx4_HW2SW_EQ_wrapper()
3326 res_end_move(dev, slave, RES_EQ, res_id); in mlx4_HW2SW_EQ_wrapper()
3327 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); in mlx4_HW2SW_EQ_wrapper()
3332 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); in mlx4_HW2SW_EQ_wrapper()
3334 res_abort_move(dev, slave, RES_EQ, res_id); in mlx4_HW2SW_EQ_wrapper()
3339 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) in mlx4_GEN_EQE() argument
3341 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_GEN_EQE()
3349 if (!priv->mfunc.master.slave_state) in mlx4_GEN_EQE()
3350 return -EINVAL; in mlx4_GEN_EQE()
3352 /* check for slave valid, slave not PF, and slave active */ in mlx4_GEN_EQE()
3353 if (slave < 0 || slave > dev->persist->num_vfs || in mlx4_GEN_EQE()
3354 slave == dev->caps.function || in mlx4_GEN_EQE()
3355 !priv->mfunc.master.slave_state[slave].active) in mlx4_GEN_EQE()
3358 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; in mlx4_GEN_EQE()
3360 /* Create the event only if the slave is registered */ in mlx4_GEN_EQE()
3361 if (event_eq->eqn < 0) in mlx4_GEN_EQE()
3364 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); in mlx4_GEN_EQE()
3365 res_id = (slave << 10) | event_eq->eqn; in mlx4_GEN_EQE()
3366 err = get_res(dev, slave, res_id, RES_EQ, &req); in mlx4_GEN_EQE()
3370 if (req->com.from_state != RES_EQ_HW) { in mlx4_GEN_EQE()
3371 err = -EINVAL; in mlx4_GEN_EQE()
3375 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_GEN_EQE()
3381 if (eqe->type == MLX4_EVENT_TYPE_CMD) { in mlx4_GEN_EQE()
3382 ++event_eq->token; in mlx4_GEN_EQE()
3383 eqe->event.cmd.token = cpu_to_be16(event_eq->token); in mlx4_GEN_EQE()
3386 memcpy(mailbox->buf, (u8 *) eqe, 28); in mlx4_GEN_EQE()
3388 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); in mlx4_GEN_EQE()
3390 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, in mlx4_GEN_EQE()
3394 put_res(dev, slave, res_id, RES_EQ); in mlx4_GEN_EQE()
3395 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); in mlx4_GEN_EQE()
3396 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_GEN_EQE()
3400 put_res(dev, slave, res_id, RES_EQ); in mlx4_GEN_EQE()
3403 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); in mlx4_GEN_EQE()
3407 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_EQ_wrapper() argument
3413 int eqn = vhcr->in_modifier; in mlx4_QUERY_EQ_wrapper()
3414 int res_id = eqn | (slave << 10); in mlx4_QUERY_EQ_wrapper()
3418 err = get_res(dev, slave, res_id, RES_EQ, &eq); in mlx4_QUERY_EQ_wrapper()
3422 if (eq->com.from_state != RES_EQ_HW) { in mlx4_QUERY_EQ_wrapper()
3423 err = -EINVAL; in mlx4_QUERY_EQ_wrapper()
3427 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_EQ_wrapper()
3430 put_res(dev, slave, res_id, RES_EQ); in mlx4_QUERY_EQ_wrapper()
3434 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_CQ_wrapper() argument
3441 int cqn = vhcr->in_modifier; in mlx4_SW2HW_CQ_wrapper()
3442 struct mlx4_cq_context *cqc = inbox->buf; in mlx4_SW2HW_CQ_wrapper()
3443 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_CQ_wrapper()
3447 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); in mlx4_SW2HW_CQ_wrapper()
3450 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_CQ_wrapper()
3453 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in mlx4_SW2HW_CQ_wrapper()
3456 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_CQ_wrapper()
3459 atomic_inc(&mtt->ref_count); in mlx4_SW2HW_CQ_wrapper()
3460 cq->mtt = mtt; in mlx4_SW2HW_CQ_wrapper()
3461 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_CQ_wrapper()
3462 res_end_move(dev, slave, RES_CQ, cqn); in mlx4_SW2HW_CQ_wrapper()
3466 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_CQ_wrapper()
3468 res_abort_move(dev, slave, RES_CQ, cqn); in mlx4_SW2HW_CQ_wrapper()
3472 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_CQ_wrapper() argument
3479 int cqn = vhcr->in_modifier; in mlx4_HW2SW_CQ_wrapper()
3482 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); in mlx4_HW2SW_CQ_wrapper()
3485 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_CQ_wrapper()
3488 atomic_dec(&cq->mtt->ref_count); in mlx4_HW2SW_CQ_wrapper()
3489 res_end_move(dev, slave, RES_CQ, cqn); in mlx4_HW2SW_CQ_wrapper()
3493 res_abort_move(dev, slave, RES_CQ, cqn); in mlx4_HW2SW_CQ_wrapper()
3497 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_CQ_wrapper() argument
3503 int cqn = vhcr->in_modifier; in mlx4_QUERY_CQ_wrapper()
3507 err = get_res(dev, slave, cqn, RES_CQ, &cq); in mlx4_QUERY_CQ_wrapper()
3511 if (cq->com.from_state != RES_CQ_HW) in mlx4_QUERY_CQ_wrapper()
3514 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_CQ_wrapper()
3516 put_res(dev, slave, cqn, RES_CQ); in mlx4_QUERY_CQ_wrapper()
3521 static int handle_resize(struct mlx4_dev *dev, int slave, in handle_resize() argument
3531 struct mlx4_cq_context *cqc = inbox->buf; in handle_resize()
3532 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; in handle_resize()
3534 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); in handle_resize()
3538 if (orig_mtt != cq->mtt) { in handle_resize()
3539 err = -EINVAL; in handle_resize()
3543 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in handle_resize()
3547 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); in handle_resize()
3550 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in handle_resize()
3553 atomic_dec(&orig_mtt->ref_count); in handle_resize()
3554 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); in handle_resize()
3555 atomic_inc(&mtt->ref_count); in handle_resize()
3556 cq->mtt = mtt; in handle_resize()
3557 put_res(dev, slave, mtt->com.res_id, RES_MTT); in handle_resize()
3561 put_res(dev, slave, mtt->com.res_id, RES_MTT); in handle_resize()
3563 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); in handle_resize()
3569 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_MODIFY_CQ_wrapper() argument
3575 int cqn = vhcr->in_modifier; in mlx4_MODIFY_CQ_wrapper()
3579 err = get_res(dev, slave, cqn, RES_CQ, &cq); in mlx4_MODIFY_CQ_wrapper()
3583 if (cq->com.from_state != RES_CQ_HW) in mlx4_MODIFY_CQ_wrapper()
3586 if (vhcr->op_modifier == 0) { in mlx4_MODIFY_CQ_wrapper()
3587 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); in mlx4_MODIFY_CQ_wrapper()
3591 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_MODIFY_CQ_wrapper()
3593 put_res(dev, slave, cqn, RES_CQ); in mlx4_MODIFY_CQ_wrapper()
3600 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; in srq_get_mtt_size()
3601 int log_rq_stride = srqc->logstride & 7; in srq_get_mtt_size()
3602 int page_shift = (srqc->log_page_size & 0x3f) + 12; in srq_get_mtt_size()
3607 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); in srq_get_mtt_size()
3610 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SW2HW_SRQ_wrapper() argument
3617 int srqn = vhcr->in_modifier; in mlx4_SW2HW_SRQ_wrapper()
3620 struct mlx4_srq_context *srqc = inbox->buf; in mlx4_SW2HW_SRQ_wrapper()
3621 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_SRQ_wrapper()
3623 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) in mlx4_SW2HW_SRQ_wrapper()
3624 return -EINVAL; in mlx4_SW2HW_SRQ_wrapper()
3626 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); in mlx4_SW2HW_SRQ_wrapper()
3629 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); in mlx4_SW2HW_SRQ_wrapper()
3632 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), in mlx4_SW2HW_SRQ_wrapper()
3637 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_SRQ_wrapper()
3641 atomic_inc(&mtt->ref_count); in mlx4_SW2HW_SRQ_wrapper()
3642 srq->mtt = mtt; in mlx4_SW2HW_SRQ_wrapper()
3643 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_SRQ_wrapper()
3644 res_end_move(dev, slave, RES_SRQ, srqn); in mlx4_SW2HW_SRQ_wrapper()
3648 put_res(dev, slave, mtt->com.res_id, RES_MTT); in mlx4_SW2HW_SRQ_wrapper()
3650 res_abort_move(dev, slave, RES_SRQ, srqn); in mlx4_SW2HW_SRQ_wrapper()
3655 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_HW2SW_SRQ_wrapper() argument
3662 int srqn = vhcr->in_modifier; in mlx4_HW2SW_SRQ_wrapper()
3665 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); in mlx4_HW2SW_SRQ_wrapper()
3668 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_SRQ_wrapper()
3671 atomic_dec(&srq->mtt->ref_count); in mlx4_HW2SW_SRQ_wrapper()
3672 if (srq->cq) in mlx4_HW2SW_SRQ_wrapper()
3673 atomic_dec(&srq->cq->ref_count); in mlx4_HW2SW_SRQ_wrapper()
3674 res_end_move(dev, slave, RES_SRQ, srqn); in mlx4_HW2SW_SRQ_wrapper()
3679 res_abort_move(dev, slave, RES_SRQ, srqn); in mlx4_HW2SW_SRQ_wrapper()
3684 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_SRQ_wrapper() argument
3691 int srqn = vhcr->in_modifier; in mlx4_QUERY_SRQ_wrapper()
3694 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_QUERY_SRQ_wrapper()
3697 if (srq->com.from_state != RES_SRQ_HW) { in mlx4_QUERY_SRQ_wrapper()
3698 err = -EBUSY; in mlx4_QUERY_SRQ_wrapper()
3701 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_SRQ_wrapper()
3703 put_res(dev, slave, srqn, RES_SRQ); in mlx4_QUERY_SRQ_wrapper()
3707 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, in mlx4_ARM_SRQ_wrapper() argument
3714 int srqn = vhcr->in_modifier; in mlx4_ARM_SRQ_wrapper()
3717 err = get_res(dev, slave, srqn, RES_SRQ, &srq); in mlx4_ARM_SRQ_wrapper()
3721 if (srq->com.from_state != RES_SRQ_HW) { in mlx4_ARM_SRQ_wrapper()
3722 err = -EBUSY; in mlx4_ARM_SRQ_wrapper()
3726 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_ARM_SRQ_wrapper()
3728 put_res(dev, slave, srqn, RES_SRQ); in mlx4_ARM_SRQ_wrapper()
3732 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_GEN_QP_wrapper() argument
3739 int qpn = vhcr->in_modifier & 0x7fffff; in mlx4_GEN_QP_wrapper()
3742 err = get_res(dev, slave, qpn, RES_QP, &qp); in mlx4_GEN_QP_wrapper()
3745 if (qp->com.from_state != RES_QP_HW) { in mlx4_GEN_QP_wrapper()
3746 err = -EBUSY; in mlx4_GEN_QP_wrapper()
3750 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_GEN_QP_wrapper()
3752 put_res(dev, slave, qpn, RES_QP); in mlx4_GEN_QP_wrapper()
3756 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_INIT2INIT_QP_wrapper() argument
3762 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_INIT2INIT_QP_wrapper()
3763 adjust_proxy_tun_qkey(dev, vhcr, context); in mlx4_INIT2INIT_QP_wrapper()
3764 update_pkey_index(dev, slave, inbox); in mlx4_INIT2INIT_QP_wrapper()
3765 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2INIT_QP_wrapper()
3768 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, in adjust_qp_sched_queue() argument
3772 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); in adjust_qp_sched_queue()
3775 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; in adjust_qp_sched_queue()
3778 return -EINVAL; in adjust_qp_sched_queue()
3780 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | in adjust_qp_sched_queue()
3784 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) { in adjust_qp_sched_queue()
3785 qpc->pri_path.sched_queue = pri_sched_queue; in adjust_qp_sched_queue()
3790 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) in adjust_qp_sched_queue()
3791 + 1) - 1; in adjust_qp_sched_queue()
3793 return -EINVAL; in adjust_qp_sched_queue()
3794 qpc->alt_path.sched_queue = in adjust_qp_sched_queue()
3795 (qpc->alt_path.sched_queue & ~(1 << 6)) | in adjust_qp_sched_queue()
3801 static int roce_verify_mac(struct mlx4_dev *dev, int slave, in roce_verify_mac() argument
3807 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; in roce_verify_mac()
3808 u8 sched = *(u8 *)(inbox->buf + 64); in roce_verify_mac()
3812 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { in roce_verify_mac()
3813 smac_ix = qpc->pri_path.grh_mylmc & 0x7f; in roce_verify_mac()
3814 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) in roce_verify_mac()
3815 return -ENOENT; in roce_verify_mac()
3820 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_INIT2RTR_QP_wrapper() argument
3827 struct mlx4_qp_context *qpc = inbox->buf + 8; in mlx4_INIT2RTR_QP_wrapper()
3828 int qpn = vhcr->in_modifier & 0x7fffff; in mlx4_INIT2RTR_QP_wrapper()
3831 u8 orig_vlan_control = qpc->pri_path.vlan_control; in mlx4_INIT2RTR_QP_wrapper()
3832 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; in mlx4_INIT2RTR_QP_wrapper()
3833 u8 orig_pri_path_fl = qpc->pri_path.fl; in mlx4_INIT2RTR_QP_wrapper()
3834 u8 orig_vlan_index = qpc->pri_path.vlan_index; in mlx4_INIT2RTR_QP_wrapper()
3835 u8 orig_feup = qpc->pri_path.feup; in mlx4_INIT2RTR_QP_wrapper()
3837 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); in mlx4_INIT2RTR_QP_wrapper()
3840 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); in mlx4_INIT2RTR_QP_wrapper()
3844 if (roce_verify_mac(dev, slave, qpc, inbox)) in mlx4_INIT2RTR_QP_wrapper()
3845 return -EINVAL; in mlx4_INIT2RTR_QP_wrapper()
3847 update_pkey_index(dev, slave, inbox); in mlx4_INIT2RTR_QP_wrapper()
3848 update_gid(dev, inbox, (u8)slave); in mlx4_INIT2RTR_QP_wrapper()
3849 adjust_proxy_tun_qkey(dev, vhcr, qpc); in mlx4_INIT2RTR_QP_wrapper()
3850 orig_sched_queue = qpc->pri_path.sched_queue; in mlx4_INIT2RTR_QP_wrapper()
3852 err = get_res(dev, slave, qpn, RES_QP, &qp); in mlx4_INIT2RTR_QP_wrapper()
3855 if (qp->com.from_state != RES_QP_HW) { in mlx4_INIT2RTR_QP_wrapper()
3856 err = -EBUSY; in mlx4_INIT2RTR_QP_wrapper()
3860 err = update_vport_qp_param(dev, inbox, slave, qpn); in mlx4_INIT2RTR_QP_wrapper()
3864 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2RTR_QP_wrapper()
3871 qp->sched_queue = orig_sched_queue; in mlx4_INIT2RTR_QP_wrapper()
3872 qp->vlan_control = orig_vlan_control; in mlx4_INIT2RTR_QP_wrapper()
3873 qp->fvl_rx = orig_fvl_rx; in mlx4_INIT2RTR_QP_wrapper()
3874 qp->pri_path_fl = orig_pri_path_fl; in mlx4_INIT2RTR_QP_wrapper()
3875 qp->vlan_index = orig_vlan_index; in mlx4_INIT2RTR_QP_wrapper()
3876 qp->feup = orig_feup; in mlx4_INIT2RTR_QP_wrapper()
3878 put_res(dev, slave, qpn, RES_QP); in mlx4_INIT2RTR_QP_wrapper()
3882 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_RTR2RTS_QP_wrapper() argument
3889 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_RTR2RTS_QP_wrapper()
3891 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_RTR2RTS_QP_wrapper()
3894 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); in mlx4_RTR2RTS_QP_wrapper()
3898 update_pkey_index(dev, slave, inbox); in mlx4_RTR2RTS_QP_wrapper()
3899 update_gid(dev, inbox, (u8)slave); in mlx4_RTR2RTS_QP_wrapper()
3900 adjust_proxy_tun_qkey(dev, vhcr, context); in mlx4_RTR2RTS_QP_wrapper()
3901 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTR2RTS_QP_wrapper()
3904 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_RTS2RTS_QP_wrapper() argument
3911 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_RTS2RTS_QP_wrapper()
3913 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_RTS2RTS_QP_wrapper()
3916 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); in mlx4_RTS2RTS_QP_wrapper()
3920 update_pkey_index(dev, slave, inbox); in mlx4_RTS2RTS_QP_wrapper()
3921 update_gid(dev, inbox, (u8)slave); in mlx4_RTS2RTS_QP_wrapper()
3922 adjust_proxy_tun_qkey(dev, vhcr, context); in mlx4_RTS2RTS_QP_wrapper()
3923 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTS2RTS_QP_wrapper()
3927 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SQERR2RTS_QP_wrapper() argument
3933 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQERR2RTS_QP_wrapper()
3934 int err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQERR2RTS_QP_wrapper()
3937 adjust_proxy_tun_qkey(dev, vhcr, context); in mlx4_SQERR2RTS_QP_wrapper()
3938 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQERR2RTS_QP_wrapper()
3941 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SQD2SQD_QP_wrapper() argument
3948 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQD2SQD_QP_wrapper()
3950 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQD2SQD_QP_wrapper()
3953 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); in mlx4_SQD2SQD_QP_wrapper()
3957 adjust_proxy_tun_qkey(dev, vhcr, context); in mlx4_SQD2SQD_QP_wrapper()
3958 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2SQD_QP_wrapper()
3959 update_pkey_index(dev, slave, inbox); in mlx4_SQD2SQD_QP_wrapper()
3960 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2SQD_QP_wrapper()
3963 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_SQD2RTS_QP_wrapper() argument
3970 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQD2RTS_QP_wrapper()
3972 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQD2RTS_QP_wrapper()
3975 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); in mlx4_SQD2RTS_QP_wrapper()
3979 adjust_proxy_tun_qkey(dev, vhcr, context); in mlx4_SQD2RTS_QP_wrapper()
3980 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2RTS_QP_wrapper()
3981 update_pkey_index(dev, slave, inbox); in mlx4_SQD2RTS_QP_wrapper()
3982 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2RTS_QP_wrapper()
3985 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_2RST_QP_wrapper() argument
3992 int qpn = vhcr->in_modifier & 0x7fffff; in mlx4_2RST_QP_wrapper()
3995 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); in mlx4_2RST_QP_wrapper()
3998 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_2RST_QP_wrapper()
4002 atomic_dec(&qp->mtt->ref_count); in mlx4_2RST_QP_wrapper()
4003 atomic_dec(&qp->rcq->ref_count); in mlx4_2RST_QP_wrapper()
4004 atomic_dec(&qp->scq->ref_count); in mlx4_2RST_QP_wrapper()
4005 if (qp->srq) in mlx4_2RST_QP_wrapper()
4006 atomic_dec(&qp->srq->ref_count); in mlx4_2RST_QP_wrapper()
4007 res_end_move(dev, slave, RES_QP, qpn); in mlx4_2RST_QP_wrapper()
4011 res_abort_move(dev, slave, RES_QP, qpn); in mlx4_2RST_QP_wrapper()
4016 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, in find_gid() argument
4021 list_for_each_entry(res, &rqp->mcg_list, list) { in find_gid()
4022 if (!memcmp(res->gid, gid, 16)) in find_gid()
4028 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, in add_mcg_res() argument
4037 return -ENOMEM; in add_mcg_res()
4039 spin_lock_irq(&rqp->mcg_spl); in add_mcg_res()
4040 if (find_gid(dev, slave, rqp, gid)) { in add_mcg_res()
4042 err = -EEXIST; in add_mcg_res()
4044 memcpy(res->gid, gid, 16); in add_mcg_res()
4045 res->prot = prot; in add_mcg_res()
4046 res->steer = steer; in add_mcg_res()
4047 res->reg_id = reg_id; in add_mcg_res()
4048 list_add_tail(&res->list, &rqp->mcg_list); in add_mcg_res()
4051 spin_unlock_irq(&rqp->mcg_spl); in add_mcg_res()
4056 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, in rem_mcg_res() argument
4063 spin_lock_irq(&rqp->mcg_spl); in rem_mcg_res()
4064 res = find_gid(dev, slave, rqp, gid); in rem_mcg_res()
4065 if (!res || res->prot != prot || res->steer != steer) in rem_mcg_res()
4066 err = -EINVAL; in rem_mcg_res()
4068 *reg_id = res->reg_id; in rem_mcg_res()
4069 list_del(&res->list); in rem_mcg_res()
4073 spin_unlock_irq(&rqp->mcg_spl); in rem_mcg_res()
4078 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, in qp_attach() argument
4082 switch (dev->caps.steering_mode) { in qp_attach()
4084 int port = mlx4_slave_convert_port(dev, slave, gid[5]); in qp_attach()
4087 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, in qp_attach()
4093 int port = mlx4_slave_convert_port(dev, slave, gid[5]); in qp_attach()
4098 return mlx4_qp_attach_common(dev, qp, gid, in qp_attach()
4101 return -EINVAL; in qp_attach()
4105 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, in qp_detach() argument
4109 switch (dev->caps.steering_mode) { in qp_detach()
4111 return mlx4_flow_detach(dev, reg_id); in qp_detach()
4113 return mlx4_qp_detach_common(dev, qp, gid, prot, type); in qp_detach()
4115 return -EINVAL; in qp_detach()
4119 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, in mlx4_adjust_port() argument
4127 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || in mlx4_adjust_port()
4128 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { in mlx4_adjust_port()
4129 real_port = mlx4_slave_convert_port(dev, slave, gid[5]); in mlx4_adjust_port()
4131 return -EINVAL; in mlx4_adjust_port()
4138 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QP_ATTACH_wrapper() argument
4145 u8 *gid = inbox->buf; in mlx4_QP_ATTACH_wrapper()
4146 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; in mlx4_QP_ATTACH_wrapper()
4151 int attach = vhcr->op_modifier; in mlx4_QP_ATTACH_wrapper()
4152 int block_loopback = vhcr->in_modifier >> 31; in mlx4_QP_ATTACH_wrapper()
4156 qpn = vhcr->in_modifier & 0xffffff; in mlx4_QP_ATTACH_wrapper()
4157 err = get_res(dev, slave, qpn, RES_QP, &rqp); in mlx4_QP_ATTACH_wrapper()
4163 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, in mlx4_QP_ATTACH_wrapper()
4169 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); in mlx4_QP_ATTACH_wrapper()
4173 err = mlx4_adjust_port(dev, slave, gid, prot); in mlx4_QP_ATTACH_wrapper()
4177 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); in mlx4_QP_ATTACH_wrapper()
4181 err = qp_detach(dev, &qp, gid, prot, type, reg_id); in mlx4_QP_ATTACH_wrapper()
4186 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_ATTACH_wrapper()
4190 qp_detach(dev, &qp, gid, prot, type, reg_id); in mlx4_QP_ATTACH_wrapper()
4192 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_ATTACH_wrapper()
4200 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, in validate_eth_header_mac() argument
4207 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && in validate_eth_header_mac()
4208 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { in validate_eth_header_mac()
4210 be_mac = cpu_to_be64(res->mac << 16); in validate_eth_header_mac()
4211 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) in validate_eth_header_mac()
4215 eth_header->eth.dst_mac, slave); in validate_eth_header_mac()
4216 return -EINVAL; in validate_eth_header_mac()
4225 static int add_eth_header(struct mlx4_dev *dev, int slave, in add_eth_header() argument
4238 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; in add_eth_header()
4239 port = ctrl->port; in add_eth_header()
4257 return -EINVAL; in add_eth_header()
4260 if (port == res->port) { in add_eth_header()
4261 be_mac = cpu_to_be64(res->mac << 16); in add_eth_header()
4268 return -EINVAL; in add_eth_header()
4272 eth_header->size = sizeof(*eth_header) >> 2; in add_eth_header()
4273 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); in add_eth_header()
4274 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); in add_eth_header()
4275 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); in add_eth_header()
4284 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, in mlx4_UPDATE_QP_wrapper() argument
4291 u32 qpn = vhcr->in_modifier & 0xffffff; in mlx4_UPDATE_QP_wrapper()
4299 cmd = (struct mlx4_update_qp_context *)inbox->buf; in mlx4_UPDATE_QP_wrapper()
4301 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); in mlx4_UPDATE_QP_wrapper()
4302 if (cmd->qp_mask || cmd->secondary_addr_path_mask || in mlx4_UPDATE_QP_wrapper()
4304 return -EPERM; in mlx4_UPDATE_QP_wrapper()
4308 !(dev->caps.flags2 & in mlx4_UPDATE_QP_wrapper()
4310 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n", in mlx4_UPDATE_QP_wrapper()
4311 slave); in mlx4_UPDATE_QP_wrapper()
4312 return -EOPNOTSUPP; in mlx4_UPDATE_QP_wrapper()
4316 err = get_res(dev, slave, qpn, RES_QP, &rqp); in mlx4_UPDATE_QP_wrapper()
4318 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); in mlx4_UPDATE_QP_wrapper()
4322 port = (rqp->sched_queue >> 6 & 1) + 1; in mlx4_UPDATE_QP_wrapper()
4325 smac_index = cmd->qp_context.pri_path.grh_mylmc; in mlx4_UPDATE_QP_wrapper()
4326 err = mac_find_smac_ix_in_slave(dev, slave, port, in mlx4_UPDATE_QP_wrapper()
4330 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", in mlx4_UPDATE_QP_wrapper()
4336 err = mlx4_cmd(dev, inbox->dma, in mlx4_UPDATE_QP_wrapper()
4337 vhcr->in_modifier, 0, in mlx4_UPDATE_QP_wrapper()
4341 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); in mlx4_UPDATE_QP_wrapper()
4346 put_res(dev, slave, qpn, RES_QP); in mlx4_UPDATE_QP_wrapper()
4357 while (rule_header->size) { in qp_attach_mbox_size()
4358 size += rule_header->size * sizeof(u32); in qp_attach_mbox_size()
4364 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4366 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper() argument
4373 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4374 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4375 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4385 if (dev->caps.steering_mode != in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4387 return -EOPNOTSUPP; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4389 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4390 err = mlx4_slave_convert_port(dev, slave, ctrl->port); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4392 return -EINVAL; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4393 ctrl->port = err; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4394 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4395 err = get_res(dev, slave, qpn, RES_QP, &rqp); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4401 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4408 if (validate_eth_header_mac(slave, rule_header, rlist)) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4409 err = -EINVAL; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4419 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4420 err = -EINVAL; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4423 vhcr->in_modifier += in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4428 err = -EINVAL; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4432 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4433 vhcr->in_modifier, 0, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4440 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4442 mlx4_err(dev, "Fail to add flow steering resources\n"); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4446 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4450 mbox_size = qp_attach_mbox_size(inbox->buf); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4451 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4452 if (!rrule->mirr_mbox) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4453 err = -ENOMEM; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4456 rrule->mirr_mbox_size = mbox_size; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4457 rrule->mirr_rule_id = 0; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4458 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4461 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4462 if (ctrl->port == 1) in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4463 ctrl->port = 2; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4465 ctrl->port = 1; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4467 if (mlx4_is_bonded(dev)) in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4468 mlx4_do_mirror_rule(dev, rrule); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4470 atomic_inc(&rqp->ref_count); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4473 put_res(dev, slave, vhcr->out_param, RES_FS_RULE); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4477 mlx4_cmd(dev, vhcr->out_param, 0, 0, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4481 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4485 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) in mlx4_undo_mirror_rule() argument
4489 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); in mlx4_undo_mirror_rule()
4491 mlx4_err(dev, "Fail to remove flow steering resources\n"); in mlx4_undo_mirror_rule()
4495 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, in mlx4_undo_mirror_rule()
4500 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QP_FLOW_STEERING_DETACH_wrapper() argument
4512 if (dev->caps.steering_mode != in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4514 return -EOPNOTSUPP; in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4516 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4520 if (!rrule->mirr_mbox) { in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4521 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4522 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4523 return -EINVAL; in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4525 mirr_reg_id = rrule->mirr_rule_id; in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4526 kfree(rrule->mirr_mbox); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4527 qpn = rrule->qpn; in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4530 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4531 err = get_res(dev, slave, qpn, RES_QP, &rqp); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4535 if (mirr_reg_id && mlx4_is_bonded(dev)) { in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4536 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4538 mlx4_err(dev, "Fail to get resource of mirror rule\n"); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4540 put_res(dev, slave, mirr_reg_id, RES_FS_RULE); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4541 mlx4_undo_mirror_rule(dev, rrule); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4544 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4546 mlx4_err(dev, "Fail to remove flow steering resources\n"); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4550 err = mlx4_cmd(dev, vhcr->in_param, 0, 0, in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4554 atomic_dec(&rqp->ref_count); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4556 put_res(dev, slave, qpn, RES_QP); in mlx4_QP_FLOW_STEERING_DETACH_wrapper()
4564 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, in mlx4_QUERY_IF_STAT_wrapper() argument
4571 int index = vhcr->in_modifier & 0xffff; in mlx4_QUERY_IF_STAT_wrapper()
4573 err = get_res(dev, slave, index, RES_COUNTER, NULL); in mlx4_QUERY_IF_STAT_wrapper()
4577 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_IF_STAT_wrapper()
4578 put_res(dev, slave, index, RES_COUNTER); in mlx4_QUERY_IF_STAT_wrapper()
4582 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) in detach_qp() argument
4588 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { in detach_qp()
4589 switch (dev->caps.steering_mode) { in detach_qp()
4591 mlx4_flow_detach(dev, rgid->reg_id); in detach_qp()
4594 qp.qpn = rqp->local_qpn; in detach_qp()
4595 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, in detach_qp()
4596 rgid->prot, rgid->steer); in detach_qp()
4599 list_del(&rgid->list); in detach_qp()
4604 static int _move_all_busy(struct mlx4_dev *dev, int slave, in _move_all_busy() argument
4607 struct mlx4_priv *priv = mlx4_priv(dev); in _move_all_busy()
4609 &priv->mfunc.master.res_tracker; in _move_all_busy()
4610 struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; in _move_all_busy()
4616 spin_lock_irq(mlx4_tlock(dev)); in _move_all_busy()
4618 if (r->owner == slave) { in _move_all_busy()
4619 if (!r->removing) { in _move_all_busy()
4620 if (r->state == RES_ANY_BUSY) { in _move_all_busy()
4622 mlx4_dbg(dev, in _move_all_busy()
4625 r->res_id); in _move_all_busy()
4628 r->from_state = r->state; in _move_all_busy()
4629 r->state = RES_ANY_BUSY; in _move_all_busy()
4630 r->removing = 1; in _move_all_busy()
4635 spin_unlock_irq(mlx4_tlock(dev)); in _move_all_busy()
4640 static int move_all_busy(struct mlx4_dev *dev, int slave, in move_all_busy() argument
4648 busy = _move_all_busy(dev, slave, type, 0); in move_all_busy()
4656 busy = _move_all_busy(dev, slave, type, 1); in move_all_busy()
4660 static void rem_slave_qps(struct mlx4_dev *dev, int slave) in rem_slave_qps() argument
4662 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_qps()
4663 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_qps()
4665 &tracker->slave_list[slave].res_list[RES_QP]; in rem_slave_qps()
4673 err = move_all_busy(dev, slave, RES_QP); in rem_slave_qps()
4675 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", in rem_slave_qps()
4676 slave); in rem_slave_qps()
4678 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_qps()
4680 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_qps()
4681 if (qp->com.owner == slave) { in rem_slave_qps()
4682 qpn = qp->com.res_id; in rem_slave_qps()
4683 detach_qp(dev, slave, qp); in rem_slave_qps()
4684 state = qp->com.from_state; in rem_slave_qps()
4688 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_qps()
4689 rb_erase(&qp->com.node, in rem_slave_qps()
4690 &tracker->res_tree[RES_QP]); in rem_slave_qps()
4691 list_del(&qp->com.list); in rem_slave_qps()
4692 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_qps()
4693 if (!valid_reserved(dev, slave, qpn)) { in rem_slave_qps()
4694 __mlx4_qp_release_range(dev, qpn, 1); in rem_slave_qps()
4695 mlx4_release_resource(dev, slave, in rem_slave_qps()
4702 if (!valid_reserved(dev, slave, qpn)) in rem_slave_qps()
4703 __mlx4_qp_free_icm(dev, qpn); in rem_slave_qps()
4707 in_param = slave; in rem_slave_qps()
4708 err = mlx4_cmd(dev, in_param, in rem_slave_qps()
4709 qp->local_qpn, 2, in rem_slave_qps()
4714 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", in rem_slave_qps()
4715 slave, qp->local_qpn); in rem_slave_qps()
4716 atomic_dec(&qp->rcq->ref_count); in rem_slave_qps()
4717 atomic_dec(&qp->scq->ref_count); in rem_slave_qps()
4718 atomic_dec(&qp->mtt->ref_count); in rem_slave_qps()
4719 if (qp->srq) in rem_slave_qps()
4720 atomic_dec(&qp->srq->ref_count); in rem_slave_qps()
4728 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_qps()
4730 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_qps()
4733 static void rem_slave_srqs(struct mlx4_dev *dev, int slave) in rem_slave_srqs() argument
4735 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_srqs()
4736 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_srqs()
4738 &tracker->slave_list[slave].res_list[RES_SRQ]; in rem_slave_srqs()
4746 err = move_all_busy(dev, slave, RES_SRQ); in rem_slave_srqs()
4748 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", in rem_slave_srqs()
4749 slave); in rem_slave_srqs()
4751 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_srqs()
4753 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_srqs()
4754 if (srq->com.owner == slave) { in rem_slave_srqs()
4755 srqn = srq->com.res_id; in rem_slave_srqs()
4756 state = srq->com.from_state; in rem_slave_srqs()
4760 __mlx4_srq_free_icm(dev, srqn); in rem_slave_srqs()
4761 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_srqs()
4762 rb_erase(&srq->com.node, in rem_slave_srqs()
4763 &tracker->res_tree[RES_SRQ]); in rem_slave_srqs()
4764 list_del(&srq->com.list); in rem_slave_srqs()
4765 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_srqs()
4766 mlx4_release_resource(dev, slave, in rem_slave_srqs()
4773 in_param = slave; in rem_slave_srqs()
4774 err = mlx4_cmd(dev, in_param, srqn, 1, in rem_slave_srqs()
4779 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", in rem_slave_srqs()
4780 slave, srqn); in rem_slave_srqs()
4782 atomic_dec(&srq->mtt->ref_count); in rem_slave_srqs()
4783 if (srq->cq) in rem_slave_srqs()
4784 atomic_dec(&srq->cq->ref_count); in rem_slave_srqs()
4793 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_srqs()
4795 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_srqs()
4798 static void rem_slave_cqs(struct mlx4_dev *dev, int slave) in rem_slave_cqs() argument
4800 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_cqs()
4801 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_cqs()
4803 &tracker->slave_list[slave].res_list[RES_CQ]; in rem_slave_cqs()
4811 err = move_all_busy(dev, slave, RES_CQ); in rem_slave_cqs()
4813 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", in rem_slave_cqs()
4814 slave); in rem_slave_cqs()
4816 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_cqs()
4818 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_cqs()
4819 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { in rem_slave_cqs()
4820 cqn = cq->com.res_id; in rem_slave_cqs()
4821 state = cq->com.from_state; in rem_slave_cqs()
4825 __mlx4_cq_free_icm(dev, cqn); in rem_slave_cqs()
4826 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_cqs()
4827 rb_erase(&cq->com.node, in rem_slave_cqs()
4828 &tracker->res_tree[RES_CQ]); in rem_slave_cqs()
4829 list_del(&cq->com.list); in rem_slave_cqs()
4830 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_cqs()
4831 mlx4_release_resource(dev, slave, in rem_slave_cqs()
4838 in_param = slave; in rem_slave_cqs()
4839 err = mlx4_cmd(dev, in_param, cqn, 1, in rem_slave_cqs()
4844 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", in rem_slave_cqs()
4845 slave, cqn); in rem_slave_cqs()
4846 atomic_dec(&cq->mtt->ref_count); in rem_slave_cqs()
4855 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_cqs()
4857 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_cqs()
4860 static void rem_slave_mrs(struct mlx4_dev *dev, int slave) in rem_slave_mrs() argument
4862 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_mrs()
4863 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_mrs()
4865 &tracker->slave_list[slave].res_list[RES_MPT]; in rem_slave_mrs()
4873 err = move_all_busy(dev, slave, RES_MPT); in rem_slave_mrs()
4875 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", in rem_slave_mrs()
4876 slave); in rem_slave_mrs()
4878 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_mrs()
4880 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_mrs()
4881 if (mpt->com.owner == slave) { in rem_slave_mrs()
4882 mptn = mpt->com.res_id; in rem_slave_mrs()
4883 state = mpt->com.from_state; in rem_slave_mrs()
4887 __mlx4_mpt_release(dev, mpt->key); in rem_slave_mrs()
4888 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_mrs()
4889 rb_erase(&mpt->com.node, in rem_slave_mrs()
4890 &tracker->res_tree[RES_MPT]); in rem_slave_mrs()
4891 list_del(&mpt->com.list); in rem_slave_mrs()
4892 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_mrs()
4893 mlx4_release_resource(dev, slave, in rem_slave_mrs()
4900 __mlx4_mpt_free_icm(dev, mpt->key); in rem_slave_mrs()
4905 in_param = slave; in rem_slave_mrs()
4906 err = mlx4_cmd(dev, in_param, mptn, 0, in rem_slave_mrs()
4911 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", in rem_slave_mrs()
4912 slave, mptn); in rem_slave_mrs()
4913 if (mpt->mtt) in rem_slave_mrs()
4914 atomic_dec(&mpt->mtt->ref_count); in rem_slave_mrs()
4922 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_mrs()
4924 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_mrs()
4927 static void rem_slave_mtts(struct mlx4_dev *dev, int slave) in rem_slave_mtts() argument
4929 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_mtts()
4931 &priv->mfunc.master.res_tracker; in rem_slave_mtts()
4933 &tracker->slave_list[slave].res_list[RES_MTT]; in rem_slave_mtts()
4940 err = move_all_busy(dev, slave, RES_MTT); in rem_slave_mtts()
4942 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", in rem_slave_mtts()
4943 slave); in rem_slave_mtts()
4945 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_mtts()
4947 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_mtts()
4948 if (mtt->com.owner == slave) { in rem_slave_mtts()
4949 base = mtt->com.res_id; in rem_slave_mtts()
4950 state = mtt->com.from_state; in rem_slave_mtts()
4954 __mlx4_free_mtt_range(dev, base, in rem_slave_mtts()
4955 mtt->order); in rem_slave_mtts()
4956 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_mtts()
4957 rb_erase(&mtt->com.node, in rem_slave_mtts()
4958 &tracker->res_tree[RES_MTT]); in rem_slave_mtts()
4959 list_del(&mtt->com.list); in rem_slave_mtts()
4960 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_mtts()
4961 mlx4_release_resource(dev, slave, RES_MTT, in rem_slave_mtts()
4962 1 << mtt->order, 0); in rem_slave_mtts()
4972 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_mtts()
4974 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_mtts()
4977 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) in mlx4_do_mirror_rule() argument
4984 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_do_mirror_rule()
4988 if (!fs_rule->mirr_mbox) { in mlx4_do_mirror_rule()
4989 mlx4_err(dev, "rule mirroring mailbox is null\n"); in mlx4_do_mirror_rule()
4990 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_do_mirror_rule()
4991 return -EINVAL; in mlx4_do_mirror_rule()
4993 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); in mlx4_do_mirror_rule()
4994 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, in mlx4_do_mirror_rule()
4997 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_do_mirror_rule()
5002 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); in mlx4_do_mirror_rule()
5006 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); in mlx4_do_mirror_rule()
5010 fs_rule->mirr_rule_id = reg_id; in mlx4_do_mirror_rule()
5011 mirr_rule->mirr_rule_id = 0; in mlx4_do_mirror_rule()
5012 mirr_rule->mirr_mbox_size = 0; in mlx4_do_mirror_rule()
5013 mirr_rule->mirr_mbox = NULL; in mlx4_do_mirror_rule()
5014 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); in mlx4_do_mirror_rule()
5018 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); in mlx4_do_mirror_rule()
5020 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, in mlx4_do_mirror_rule()
5026 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) in mlx4_mirror_fs_rules() argument
5028 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_mirror_fs_rules()
5030 &priv->mfunc.master.res_tracker; in mlx4_mirror_fs_rules()
5031 struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; in mlx4_mirror_fs_rules()
5039 if ((bond && fs_rule->mirr_mbox_size) || in mlx4_mirror_fs_rules()
5040 (!bond && !fs_rule->mirr_mbox_size)) in mlx4_mirror_fs_rules()
5041 list_add_tail(&fs_rule->mirr_list, &mirr_list); in mlx4_mirror_fs_rules()
5046 err += mlx4_do_mirror_rule(dev, fs_rule); in mlx4_mirror_fs_rules()
5048 err += mlx4_undo_mirror_rule(dev, fs_rule); in mlx4_mirror_fs_rules()
5053 int mlx4_bond_fs_rules(struct mlx4_dev *dev) in mlx4_bond_fs_rules() argument
5055 return mlx4_mirror_fs_rules(dev, true); in mlx4_bond_fs_rules()
5058 int mlx4_unbond_fs_rules(struct mlx4_dev *dev) in mlx4_unbond_fs_rules() argument
5060 return mlx4_mirror_fs_rules(dev, false); in mlx4_unbond_fs_rules()
5063 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) in rem_slave_fs_rule() argument
5065 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_fs_rule()
5067 &priv->mfunc.master.res_tracker; in rem_slave_fs_rule()
5069 &tracker->slave_list[slave].res_list[RES_FS_RULE]; in rem_slave_fs_rule()
5076 err = move_all_busy(dev, slave, RES_FS_RULE); in rem_slave_fs_rule()
5078 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", in rem_slave_fs_rule()
5079 slave); in rem_slave_fs_rule()
5081 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_fs_rule()
5083 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_fs_rule()
5084 if (fs_rule->com.owner == slave) { in rem_slave_fs_rule()
5085 base = fs_rule->com.res_id; in rem_slave_fs_rule()
5086 state = fs_rule->com.from_state; in rem_slave_fs_rule()
5091 err = mlx4_cmd(dev, base, 0, 0, in rem_slave_fs_rule()
5096 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_fs_rule()
5097 rb_erase(&fs_rule->com.node, in rem_slave_fs_rule()
5098 &tracker->res_tree[RES_FS_RULE]); in rem_slave_fs_rule()
5099 list_del(&fs_rule->com.list); in rem_slave_fs_rule()
5100 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_fs_rule()
5101 kfree(fs_rule->mirr_mbox); in rem_slave_fs_rule()
5111 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_fs_rule()
5113 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_fs_rule()
5116 static void rem_slave_eqs(struct mlx4_dev *dev, int slave) in rem_slave_eqs() argument
5118 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_eqs()
5119 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_eqs()
5121 &tracker->slave_list[slave].res_list[RES_EQ]; in rem_slave_eqs()
5128 err = move_all_busy(dev, slave, RES_EQ); in rem_slave_eqs()
5130 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", in rem_slave_eqs()
5131 slave); in rem_slave_eqs()
5133 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_eqs()
5135 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_eqs()
5136 if (eq->com.owner == slave) { in rem_slave_eqs()
5137 eqn = eq->com.res_id; in rem_slave_eqs()
5138 state = eq->com.from_state; in rem_slave_eqs()
5142 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_eqs()
5143 rb_erase(&eq->com.node, in rem_slave_eqs()
5144 &tracker->res_tree[RES_EQ]); in rem_slave_eqs()
5145 list_del(&eq->com.list); in rem_slave_eqs()
5146 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_eqs()
5152 err = mlx4_cmd(dev, slave, eqn & 0x3ff, in rem_slave_eqs()
5157 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", in rem_slave_eqs()
5158 slave, eqn & 0x3ff); in rem_slave_eqs()
5159 atomic_dec(&eq->mtt->ref_count); in rem_slave_eqs()
5168 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_eqs()
5170 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_eqs()
5173 static void rem_slave_counters(struct mlx4_dev *dev, int slave) in rem_slave_counters() argument
5175 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_counters()
5176 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_counters()
5178 &tracker->slave_list[slave].res_list[RES_COUNTER]; in rem_slave_counters()
5185 err = move_all_busy(dev, slave, RES_COUNTER); in rem_slave_counters()
5187 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", in rem_slave_counters()
5188 slave); in rem_slave_counters()
5190 counters_arr = kmalloc_array(dev->caps.max_counters, in rem_slave_counters()
5198 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_counters()
5200 if (counter->com.owner == slave) { in rem_slave_counters()
5201 counters_arr[i++] = counter->com.res_id; in rem_slave_counters()
5202 rb_erase(&counter->com.node, in rem_slave_counters()
5203 &tracker->res_tree[RES_COUNTER]); in rem_slave_counters()
5204 list_del(&counter->com.list); in rem_slave_counters()
5208 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_counters()
5211 __mlx4_counter_free(dev, counters_arr[j++]); in rem_slave_counters()
5212 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); in rem_slave_counters()
5219 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) in rem_slave_xrcdns() argument
5221 struct mlx4_priv *priv = mlx4_priv(dev); in rem_slave_xrcdns()
5222 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; in rem_slave_xrcdns()
5224 &tracker->slave_list[slave].res_list[RES_XRCD]; in rem_slave_xrcdns()
5230 err = move_all_busy(dev, slave, RES_XRCD); in rem_slave_xrcdns()
5232 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", in rem_slave_xrcdns()
5233 slave); in rem_slave_xrcdns()
5235 spin_lock_irq(mlx4_tlock(dev)); in rem_slave_xrcdns()
5237 if (xrcd->com.owner == slave) { in rem_slave_xrcdns()
5238 xrcdn = xrcd->com.res_id; in rem_slave_xrcdns()
5239 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); in rem_slave_xrcdns()
5240 list_del(&xrcd->com.list); in rem_slave_xrcdns()
5242 __mlx4_xrcd_free(dev, xrcdn); in rem_slave_xrcdns()
5245 spin_unlock_irq(mlx4_tlock(dev)); in rem_slave_xrcdns()
5248 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) in mlx4_delete_all_resources_for_slave() argument
5250 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_delete_all_resources_for_slave()
5251 mlx4_reset_roce_gids(dev, slave); in mlx4_delete_all_resources_for_slave()
5252 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); in mlx4_delete_all_resources_for_slave()
5253 rem_slave_vlans(dev, slave); in mlx4_delete_all_resources_for_slave()
5254 rem_slave_macs(dev, slave); in mlx4_delete_all_resources_for_slave()
5255 rem_slave_fs_rule(dev, slave); in mlx4_delete_all_resources_for_slave()
5256 rem_slave_qps(dev, slave); in mlx4_delete_all_resources_for_slave()
5257 rem_slave_srqs(dev, slave); in mlx4_delete_all_resources_for_slave()
5258 rem_slave_cqs(dev, slave); in mlx4_delete_all_resources_for_slave()
5259 rem_slave_mrs(dev, slave); in mlx4_delete_all_resources_for_slave()
5260 rem_slave_eqs(dev, slave); in mlx4_delete_all_resources_for_slave()
5261 rem_slave_mtts(dev, slave); in mlx4_delete_all_resources_for_slave()
5262 rem_slave_counters(dev, slave); in mlx4_delete_all_resources_for_slave()
5263 rem_slave_xrcdns(dev, slave); in mlx4_delete_all_resources_for_slave()
5264 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); in mlx4_delete_all_resources_for_slave()
5270 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); in update_qos_vpp()
5271 ctx->qp_context.qos_vport = work->qos_vport; in update_qos_vpp()
5280 struct mlx4_dev *dev = &work->priv->dev; in mlx4_vf_immed_vlan_work_handler() local
5282 &work->priv->mfunc.master.res_tracker; in mlx4_vf_immed_vlan_work_handler()
5284 &tracker->slave_list[work->slave].res_list[RES_QP]; in mlx4_vf_immed_vlan_work_handler()
5308 if (mlx4_is_slave(dev)) { in mlx4_vf_immed_vlan_work_handler()
5309 mlx4_warn(dev, "Trying to update-qp in slave %d\n", in mlx4_vf_immed_vlan_work_handler()
5310 work->slave); in mlx4_vf_immed_vlan_work_handler()
5314 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_vf_immed_vlan_work_handler()
5317 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ in mlx4_vf_immed_vlan_work_handler()
5324 else if (!work->vlan_id) in mlx4_vf_immed_vlan_work_handler()
5327 else if (work->vlan_proto == htons(ETH_P_8021AD)) in mlx4_vf_immed_vlan_work_handler()
5337 upd_context = mailbox->buf; in mlx4_vf_immed_vlan_work_handler()
5338 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); in mlx4_vf_immed_vlan_work_handler()
5340 spin_lock_irq(mlx4_tlock(dev)); in mlx4_vf_immed_vlan_work_handler()
5342 spin_unlock_irq(mlx4_tlock(dev)); in mlx4_vf_immed_vlan_work_handler()
5343 if (qp->com.owner == work->slave) { in mlx4_vf_immed_vlan_work_handler()
5344 if (qp->com.from_state != RES_QP_HW || in mlx4_vf_immed_vlan_work_handler()
5345 !qp->sched_queue || /* no INIT2RTR trans yet */ in mlx4_vf_immed_vlan_work_handler()
5346 mlx4_is_qp_reserved(dev, qp->local_qpn) || in mlx4_vf_immed_vlan_work_handler()
5347 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { in mlx4_vf_immed_vlan_work_handler()
5348 spin_lock_irq(mlx4_tlock(dev)); in mlx4_vf_immed_vlan_work_handler()
5351 port = (qp->sched_queue >> 6 & 1) + 1; in mlx4_vf_immed_vlan_work_handler()
5352 if (port != work->port) { in mlx4_vf_immed_vlan_work_handler()
5353 spin_lock_irq(mlx4_tlock(dev)); in mlx4_vf_immed_vlan_work_handler()
5356 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) in mlx4_vf_immed_vlan_work_handler()
5357 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); in mlx4_vf_immed_vlan_work_handler()
5359 upd_context->primary_addr_path_mask = in mlx4_vf_immed_vlan_work_handler()
5361 if (work->vlan_id == MLX4_VGT) { in mlx4_vf_immed_vlan_work_handler()
5362 upd_context->qp_context.param3 = qp->param3; in mlx4_vf_immed_vlan_work_handler()
5363 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; in mlx4_vf_immed_vlan_work_handler()
5364 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; in mlx4_vf_immed_vlan_work_handler()
5365 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; in mlx4_vf_immed_vlan_work_handler()
5366 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; in mlx4_vf_immed_vlan_work_handler()
5367 upd_context->qp_context.pri_path.feup = qp->feup; in mlx4_vf_immed_vlan_work_handler()
5368 upd_context->qp_context.pri_path.sched_queue = in mlx4_vf_immed_vlan_work_handler()
5369 qp->sched_queue; in mlx4_vf_immed_vlan_work_handler()
5371 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); in mlx4_vf_immed_vlan_work_handler()
5372 upd_context->qp_context.pri_path.vlan_control = vlan_control; in mlx4_vf_immed_vlan_work_handler()
5373 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; in mlx4_vf_immed_vlan_work_handler()
5374 upd_context->qp_context.pri_path.fvl_rx = in mlx4_vf_immed_vlan_work_handler()
5375 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; in mlx4_vf_immed_vlan_work_handler()
5376 upd_context->qp_context.pri_path.fl = in mlx4_vf_immed_vlan_work_handler()
5377 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN; in mlx4_vf_immed_vlan_work_handler()
5378 if (work->vlan_proto == htons(ETH_P_8021AD)) in mlx4_vf_immed_vlan_work_handler()
5379 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV; in mlx4_vf_immed_vlan_work_handler()
5381 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV; in mlx4_vf_immed_vlan_work_handler()
5382 upd_context->qp_context.pri_path.feup = in mlx4_vf_immed_vlan_work_handler()
5383 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; in mlx4_vf_immed_vlan_work_handler()
5384 upd_context->qp_context.pri_path.sched_queue = in mlx4_vf_immed_vlan_work_handler()
5385 qp->sched_queue & 0xC7; in mlx4_vf_immed_vlan_work_handler()
5386 upd_context->qp_context.pri_path.sched_queue |= in mlx4_vf_immed_vlan_work_handler()
5387 ((work->qos & 0x7) << 3); in mlx4_vf_immed_vlan_work_handler()
5389 if (dev->caps.flags2 & in mlx4_vf_immed_vlan_work_handler()
5394 err = mlx4_cmd(dev, mailbox->dma, in mlx4_vf_immed_vlan_work_handler()
5395 qp->local_qpn & 0xffffff, in mlx4_vf_immed_vlan_work_handler()
5399 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", in mlx4_vf_immed_vlan_work_handler()
5400 work->slave, port, qp->local_qpn, err); in mlx4_vf_immed_vlan_work_handler()
5404 spin_lock_irq(mlx4_tlock(dev)); in mlx4_vf_immed_vlan_work_handler()
5406 spin_unlock_irq(mlx4_tlock(dev)); in mlx4_vf_immed_vlan_work_handler()
5407 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_vf_immed_vlan_work_handler()
5410 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", in mlx4_vf_immed_vlan_work_handler()
5411 errors, work->slave, work->port); in mlx4_vf_immed_vlan_work_handler()
5416 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && in mlx4_vf_immed_vlan_work_handler()
5417 NO_INDX != work->orig_vlan_ix) in mlx4_vf_immed_vlan_work_handler()
5418 __mlx4_unregister_vlan(&work->priv->dev, work->port, in mlx4_vf_immed_vlan_work_handler()
5419 work->orig_vlan_id); in mlx4_vf_immed_vlan_work_handler()