Lines Matching full:xn

315 /* API for virtchnl "transaction" support ("xn" for short).
324 * @xn: struct idpf_vc_xn* to access
326 #define idpf_vc_xn_lock(xn) \ argument
327 raw_spin_lock(&(xn)->completed.wait.lock)
331 * @xn: struct idpf_vc_xn* to access
333 #define idpf_vc_xn_unlock(xn) \ argument
334 raw_spin_unlock(&(xn)->completed.wait.lock)
339 * @xn: struct idpf_vc_xn to update
341 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) in idpf_vc_xn_release_bufs() argument
343 xn->reply.iov_base = NULL; in idpf_vc_xn_release_bufs()
344 xn->reply.iov_len = 0; in idpf_vc_xn_release_bufs()
346 if (xn->state != IDPF_VC_XN_SHUTDOWN) in idpf_vc_xn_release_bufs()
347 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_release_bufs()
361 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_init() local
363 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_init()
364 xn->idx = i; in idpf_vc_xn_init()
365 idpf_vc_xn_release_bufs(xn); in idpf_vc_xn_init()
366 init_completion(&xn->completed); in idpf_vc_xn_init()
388 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_shutdown() local
390 idpf_vc_xn_lock(xn); in idpf_vc_xn_shutdown()
391 xn->state = IDPF_VC_XN_SHUTDOWN; in idpf_vc_xn_shutdown()
392 idpf_vc_xn_release_bufs(xn); in idpf_vc_xn_shutdown()
393 idpf_vc_xn_unlock(xn); in idpf_vc_xn_shutdown()
394 complete_all(&xn->completed); in idpf_vc_xn_shutdown()
407 struct idpf_vc_xn *xn = NULL; in idpf_vc_xn_pop_free() local
416 xn = &vcxn_mngr->ring[free_idx]; in idpf_vc_xn_pop_free()
417 xn->salt = vcxn_mngr->salt++; in idpf_vc_xn_pop_free()
422 return xn; in idpf_vc_xn_pop_free()
428 * @xn: transaction to push
431 struct idpf_vc_xn *xn) in idpf_vc_xn_push_free() argument
433 idpf_vc_xn_release_bufs(xn); in idpf_vc_xn_push_free()
434 set_bit(xn->idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_push_free()
456 struct idpf_vc_xn *xn; in idpf_vc_xn_exec() local
460 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); in idpf_vc_xn_exec()
462 if (!xn) in idpf_vc_xn_exec()
465 idpf_vc_xn_lock(xn); in idpf_vc_xn_exec()
466 if (xn->state == IDPF_VC_XN_SHUTDOWN) { in idpf_vc_xn_exec()
469 } else if (xn->state != IDPF_VC_XN_IDLE) { in idpf_vc_xn_exec()
479 xn->idx, xn->vc_op); in idpf_vc_xn_exec()
482 xn->reply = params->recv_buf; in idpf_vc_xn_exec()
483 xn->reply_sz = 0; in idpf_vc_xn_exec()
484 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; in idpf_vc_xn_exec()
485 xn->vc_op = params->vc_op; in idpf_vc_xn_exec()
486 xn->async_handler = params->async_handler; in idpf_vc_xn_exec()
487 idpf_vc_xn_unlock(xn); in idpf_vc_xn_exec()
490 reinit_completion(&xn->completed); in idpf_vc_xn_exec()
491 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | in idpf_vc_xn_exec()
492 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); in idpf_vc_xn_exec()
498 idpf_vc_xn_lock(xn); in idpf_vc_xn_exec()
505 wait_for_completion_timeout(&xn->completed, in idpf_vc_xn_exec()
514 idpf_vc_xn_lock(xn); in idpf_vc_xn_exec()
515 switch (xn->state) { in idpf_vc_xn_exec()
522 params->vc_op, cookie, xn->vc_op, in idpf_vc_xn_exec()
523 xn->salt, params->timeout_ms); in idpf_vc_xn_exec()
527 retval = xn->reply_sz; in idpf_vc_xn_exec()
542 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_exec()
545 idpf_vc_xn_unlock(xn); in idpf_vc_xn_exec()
553 * @xn: transaction to handle
561 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, in idpf_vc_xn_forward_async() argument
566 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_async()
567 …&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", in idpf_vc_xn_forward_async()
568 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_async()
569 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
574 if (xn->async_handler) { in idpf_vc_xn_forward_async()
575 err = xn->async_handler(adapter, xn, ctlq_msg); in idpf_vc_xn_forward_async()
580 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
587 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_forward_async()
603 struct idpf_vc_xn *xn; in idpf_vc_xn_forward_reply() local
616 xn = &adapter->vcxn_mngr->ring[xn_idx]; in idpf_vc_xn_forward_reply()
617 idpf_vc_xn_lock(xn); in idpf_vc_xn_forward_reply()
619 if (xn->salt != salt) { in idpf_vc_xn_forward_reply()
621 xn->vc_op, xn->salt, xn->state, in idpf_vc_xn_forward_reply()
623 idpf_vc_xn_unlock(xn); in idpf_vc_xn_forward_reply()
627 switch (xn->state) { in idpf_vc_xn_forward_reply()
645 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); in idpf_vc_xn_forward_reply()
646 idpf_vc_xn_unlock(xn); in idpf_vc_xn_forward_reply()
655 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_reply()
656 …mited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", in idpf_vc_xn_forward_reply()
657 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_reply()
658 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
659 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
665 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
666 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
676 xn->reply_sz = payload_size; in idpf_vc_xn_forward_reply()
677 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; in idpf_vc_xn_forward_reply()
679 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) in idpf_vc_xn_forward_reply()
680 memcpy(xn->reply.iov_base, payload, in idpf_vc_xn_forward_reply()
681 min_t(size_t, xn->reply.iov_len, payload_size)); in idpf_vc_xn_forward_reply()
684 idpf_vc_xn_unlock(xn); in idpf_vc_xn_forward_reply()
686 complete(&xn->completed); in idpf_vc_xn_forward_reply()
3522 * @xn: transaction for message
3532 struct idpf_vc_xn *xn, in idpf_mac_filter_async_handler() argument
3549 if (xn->reply_sz < sizeof(*ma_list)) in idpf_mac_filter_async_handler()
3556 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) in idpf_mac_filter_async_handler()
3577 xn->vc_op); in idpf_mac_filter_async_handler()
3583 xn->vc_op, xn->reply_sz); in idpf_mac_filter_async_handler()