Lines Matching +full:slave +full:- +full:if
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
58 int slave; member
87 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in set_local_comm_id()
90 msg->request_id = cpu_to_be32(cm_id); in set_local_comm_id()
91 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in set_local_comm_id()
96 msg->local_comm_id = cpu_to_be32(cm_id); in set_local_comm_id()
102 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in get_local_comm_id()
105 return be32_to_cpu(msg->request_id); in get_local_comm_id()
106 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in get_local_comm_id()
108 return -1; in get_local_comm_id()
111 return be32_to_cpu(msg->local_comm_id); in get_local_comm_id()
117 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in set_remote_comm_id()
120 msg->request_id = cpu_to_be32(cm_id); in set_remote_comm_id()
121 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in set_remote_comm_id()
126 msg->remote_comm_id = cpu_to_be32(cm_id); in set_remote_comm_id()
132 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in get_remote_comm_id()
135 return be32_to_cpu(msg->request_id); in get_remote_comm_id()
136 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in get_remote_comm_id()
138 return -1; in get_remote_comm_id()
141 return be32_to_cpu(msg->remote_comm_id); in get_remote_comm_id()
149 return msg->primary_path_sgid; in gid_from_req_msg()
156 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in id_map_find_by_sl_id()
157 struct rb_node *node = sl_id_map->rb_node; in id_map_find_by_sl_id()
163 if (id_map_entry->sl_cm_id > sl_cm_id) in id_map_find_by_sl_id()
164 node = node->rb_left; in id_map_find_by_sl_id()
165 else if (id_map_entry->sl_cm_id < sl_cm_id) in id_map_find_by_sl_id()
166 node = node->rb_right; in id_map_find_by_sl_id()
167 else if (id_map_entry->slave_id > slave_id) in id_map_find_by_sl_id()
168 node = node->rb_left; in id_map_find_by_sl_id()
169 else if (id_map_entry->slave_id < slave_id) in id_map_find_by_sl_id()
170 node = node->rb_right; in id_map_find_by_sl_id()
182 struct mlx4_ib_dev *dev = ent->dev; in id_map_ent_timeout()
183 struct mlx4_ib_sriov *sriov = &dev->sriov; in id_map_ent_timeout()
184 struct rb_root *sl_id_map = &sriov->sl_id_map; in id_map_ent_timeout()
186 spin_lock(&sriov->id_map_lock); in id_map_ent_timeout()
187 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id)) in id_map_ent_timeout()
189 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); in id_map_ent_timeout()
190 if (found_ent && found_ent == ent) in id_map_ent_timeout()
191 rb_erase(&found_ent->node, sl_id_map); in id_map_ent_timeout()
194 list_del(&ent->list); in id_map_ent_timeout()
195 spin_unlock(&sriov->id_map_lock); in id_map_ent_timeout()
201 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in sl_id_map_add()
202 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL; in sl_id_map_add()
204 int slave_id = new->slave_id; in sl_id_map_add()
205 int sl_cm_id = new->sl_cm_id; in sl_id_map_add()
208 if (ent) { in sl_id_map_add()
212 rb_replace_node(&ent->node, &new->node, sl_id_map); in sl_id_map_add()
221 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) in sl_id_map_add()
222 link = &(*link)->rb_left; in sl_id_map_add()
224 link = &(*link)->rb_right; in sl_id_map_add()
227 rb_link_node(&new->node, parent, link); in sl_id_map_add()
228 rb_insert_color(&new->node, sl_id_map); in sl_id_map_add()
236 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_alloc()
239 if (!ent) in id_map_alloc()
240 return ERR_PTR(-ENOMEM); in id_map_alloc()
242 ent->sl_cm_id = sl_cm_id; in id_map_alloc()
243 ent->slave_id = slave_id; in id_map_alloc()
244 ent->scheduled_delete = 0; in id_map_alloc()
245 ent->dev = to_mdev(ibdev); in id_map_alloc()
246 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); in id_map_alloc()
248 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent, in id_map_alloc()
249 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL); in id_map_alloc()
250 if (ret >= 0) { in id_map_alloc()
251 spin_lock(&sriov->id_map_lock); in id_map_alloc()
253 list_add_tail(&ent->list, &sriov->cm_list); in id_map_alloc()
254 spin_unlock(&sriov->id_map_lock); in id_map_alloc()
261 return ERR_PTR(-ENOMEM); in id_map_alloc()
268 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_get()
270 spin_lock(&sriov->id_map_lock); in id_map_get()
271 if (*pv_cm_id == -1) { in id_map_get()
273 if (ent) in id_map_get()
274 *pv_cm_id = (int) ent->pv_cm_id; in id_map_get()
276 ent = xa_load(&sriov->pv_id_table, *pv_cm_id); in id_map_get()
277 spin_unlock(&sriov->id_map_lock); in id_map_get()
284 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in schedule_delayed()
287 spin_lock(&sriov->id_map_lock); in schedule_delayed()
288 spin_lock_irqsave(&sriov->going_down_lock, flags); in schedule_delayed()
290 if (!sriov->is_going_down && !id->scheduled_delete) { in schedule_delayed()
291 id->scheduled_delete = 1; in schedule_delayed()
292 queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); in schedule_delayed()
293 } else if (id->scheduled_delete) { in schedule_delayed()
294 /* Adjust timeout if already scheduled */ in schedule_delayed()
295 mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); in schedule_delayed()
297 spin_unlock_irqrestore(&sriov->going_down_lock, flags); in schedule_delayed()
298 spin_unlock(&sriov->id_map_lock); in schedule_delayed()
301 #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
307 int pv_cm_id = -1; in mlx4_ib_multiplex_cm_handler()
309 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || in mlx4_ib_multiplex_cm_handler()
310 mad->mad_hdr.attr_id == CM_REP_ATTR_ID || in mlx4_ib_multiplex_cm_handler()
311 mad->mad_hdr.attr_id == CM_MRA_ATTR_ID || in mlx4_ib_multiplex_cm_handler()
312 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID || in mlx4_ib_multiplex_cm_handler()
313 (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) { in mlx4_ib_multiplex_cm_handler()
316 if (id) in mlx4_ib_multiplex_cm_handler()
319 if (IS_ERR(id)) { in mlx4_ib_multiplex_cm_handler()
320 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", in mlx4_ib_multiplex_cm_handler()
324 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || in mlx4_ib_multiplex_cm_handler()
325 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in mlx4_ib_multiplex_cm_handler()
332 if (!id) { in mlx4_ib_multiplex_cm_handler()
333 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n", in mlx4_ib_multiplex_cm_handler()
334 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); in mlx4_ib_multiplex_cm_handler()
335 return -EINVAL; in mlx4_ib_multiplex_cm_handler()
339 set_local_comm_id(mad, id->pv_cm_id); in mlx4_ib_multiplex_cm_handler()
341 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) in mlx4_ib_multiplex_cm_handler()
352 deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0); in rej_tmout_timeout()
354 if (deleted != item) in rej_tmout_timeout()
360 static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave) in alloc_rej_tmout() argument
366 xa_lock(&sriov->xa_rej_tmout); in alloc_rej_tmout()
367 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); in alloc_rej_tmout()
369 if (item) { in alloc_rej_tmout()
370 if (xa_err(item)) in alloc_rej_tmout()
373 /* If a retry, adjust delayed work */ in alloc_rej_tmout()
374 mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); in alloc_rej_tmout()
377 xa_unlock(&sriov->xa_rej_tmout); in alloc_rej_tmout()
380 if (!item) in alloc_rej_tmout()
381 return -ENOMEM; in alloc_rej_tmout()
383 INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout); in alloc_rej_tmout()
384 item->slave = slave; in alloc_rej_tmout()
385 item->rem_pv_cm_id = rem_pv_cm_id; in alloc_rej_tmout()
386 item->xa_rej_tmout = &sriov->xa_rej_tmout; in alloc_rej_tmout()
388 old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL); in alloc_rej_tmout()
389 if (old) { in alloc_rej_tmout()
391 "Non-null old entry (%p) or error (%d) when inserting\n", in alloc_rej_tmout()
397 queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); in alloc_rej_tmout()
402 xa_unlock(&sriov->xa_rej_tmout); in alloc_rej_tmout()
409 int slave; in lookup_rej_tmout_slave() local
411 xa_lock(&sriov->xa_rej_tmout); in lookup_rej_tmout_slave()
412 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); in lookup_rej_tmout_slave()
414 if (!item || xa_err(item)) { in lookup_rej_tmout_slave()
415 pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n", in lookup_rej_tmout_slave()
417 slave = !item ? -ENOENT : xa_err(item); in lookup_rej_tmout_slave()
419 slave = item->slave; in lookup_rej_tmout_slave()
421 xa_unlock(&sriov->xa_rej_tmout); in lookup_rej_tmout_slave()
423 return slave; in lookup_rej_tmout_slave()
426 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, in mlx4_ib_demux_cm_handler() argument
429 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in mlx4_ib_demux_cm_handler()
435 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || in mlx4_ib_demux_cm_handler()
436 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in mlx4_ib_demux_cm_handler()
439 if (!slave) in mlx4_ib_demux_cm_handler()
443 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); in mlx4_ib_demux_cm_handler()
444 if (*slave < 0) { in mlx4_ib_demux_cm_handler()
447 return -ENOENT; in mlx4_ib_demux_cm_handler()
450 sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave); in mlx4_ib_demux_cm_handler()
451 if (sts) in mlx4_ib_demux_cm_handler()
452 /* Even if this fails, we pass on the REQ to the slave */ in mlx4_ib_demux_cm_handler()
453 pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n", in mlx4_ib_demux_cm_handler()
454 rem_pv_cm_id, *slave, sts); in mlx4_ib_demux_cm_handler()
460 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); in mlx4_ib_demux_cm_handler()
462 if (!id) { in mlx4_ib_demux_cm_handler()
463 if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && in mlx4_ib_demux_cm_handler()
464 REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) { in mlx4_ib_demux_cm_handler()
465 *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id); in mlx4_ib_demux_cm_handler()
467 return (*slave < 0) ? *slave : 0; in mlx4_ib_demux_cm_handler()
470 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); in mlx4_ib_demux_cm_handler()
471 return -ENOENT; in mlx4_ib_demux_cm_handler()
474 if (slave) in mlx4_ib_demux_cm_handler()
475 *slave = id->slave_id; in mlx4_ib_demux_cm_handler()
476 set_remote_comm_id(mad, id->sl_cm_id); in mlx4_ib_demux_cm_handler()
478 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID || in mlx4_ib_demux_cm_handler()
479 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) in mlx4_ib_demux_cm_handler()
487 spin_lock_init(&dev->sriov.id_map_lock); in mlx4_ib_cm_paravirt_init()
488 INIT_LIST_HEAD(&dev->sriov.cm_list); in mlx4_ib_cm_paravirt_init()
489 dev->sriov.sl_id_map = RB_ROOT; in mlx4_ib_cm_paravirt_init()
490 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC); in mlx4_ib_cm_paravirt_init()
491 xa_init(&dev->sriov.xa_rej_tmout); in mlx4_ib_cm_paravirt_init()
494 static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave) in rej_tmout_xa_cleanup() argument
501 xa_lock(&sriov->xa_rej_tmout); in rej_tmout_xa_cleanup()
502 xa_for_each(&sriov->xa_rej_tmout, id, item) { in rej_tmout_xa_cleanup()
503 if (slave < 0 || slave == item->slave) { in rej_tmout_xa_cleanup()
504 mod_delayed_work(cm_wq, &item->timeout, 0); in rej_tmout_xa_cleanup()
509 xa_unlock(&sriov->xa_rej_tmout); in rej_tmout_xa_cleanup()
511 if (flush_needed) { in rej_tmout_xa_cleanup()
513 pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n", in rej_tmout_xa_cleanup()
514 cnt, slave); in rej_tmout_xa_cleanup()
517 if (slave < 0) in rej_tmout_xa_cleanup()
518 WARN_ON(!xa_empty(&sriov->xa_rej_tmout)); in rej_tmout_xa_cleanup()
521 /* slave = -1 ==> all slaves */
522 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
523 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) in mlx4_ib_cm_paravirt_clean() argument
525 struct mlx4_ib_sriov *sriov = &dev->sriov; in mlx4_ib_cm_paravirt_clean()
526 struct rb_root *sl_id_map = &sriov->sl_id_map; in mlx4_ib_cm_paravirt_clean()
533 spin_lock(&sriov->id_map_lock); in mlx4_ib_cm_paravirt_clean()
534 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { in mlx4_ib_cm_paravirt_clean()
535 if (slave < 0 || slave == map->slave_id) { in mlx4_ib_cm_paravirt_clean()
536 if (map->scheduled_delete) in mlx4_ib_cm_paravirt_clean()
537 need_flush |= !cancel_delayed_work(&map->timeout); in mlx4_ib_cm_paravirt_clean()
541 spin_unlock(&sriov->id_map_lock); in mlx4_ib_cm_paravirt_clean()
543 if (need_flush) in mlx4_ib_cm_paravirt_clean()
547 spin_lock(&sriov->id_map_lock); in mlx4_ib_cm_paravirt_clean()
548 if (slave < 0) { in mlx4_ib_cm_paravirt_clean()
554 rb_erase(&ent->node, sl_id_map); in mlx4_ib_cm_paravirt_clean()
555 xa_erase(&sriov->pv_id_table, ent->pv_cm_id); in mlx4_ib_cm_paravirt_clean()
557 list_splice_init(&dev->sriov.cm_list, &lh); in mlx4_ib_cm_paravirt_clean()
559 /* first, move nodes belonging to slave to db remove list */ in mlx4_ib_cm_paravirt_clean()
565 if (ent->slave_id == slave) in mlx4_ib_cm_paravirt_clean()
566 list_move_tail(&ent->list, &lh); in mlx4_ib_cm_paravirt_clean()
570 rb_erase(&map->node, sl_id_map); in mlx4_ib_cm_paravirt_clean()
571 xa_erase(&sriov->pv_id_table, map->pv_cm_id); in mlx4_ib_cm_paravirt_clean()
575 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { in mlx4_ib_cm_paravirt_clean()
576 if (slave == map->slave_id) in mlx4_ib_cm_paravirt_clean()
577 list_move_tail(&map->list, &lh); in mlx4_ib_cm_paravirt_clean()
581 spin_unlock(&sriov->id_map_lock); in mlx4_ib_cm_paravirt_clean()
585 list_del(&map->list); in mlx4_ib_cm_paravirt_clean()
589 rej_tmout_xa_cleanup(sriov, slave); in mlx4_ib_cm_paravirt_clean()
595 if (!cm_wq) in mlx4_ib_cm_init()
596 return -ENOMEM; in mlx4_ib_cm_init()