Lines Matching full:call

2 /* RxRPC individual remote procedure call handling
45 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what) in rxrpc_poke_call() argument
47 struct rxrpc_local *local = call->local; in rxrpc_poke_call()
50 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) { in rxrpc_poke_call()
52 busy = !list_empty(&call->attend_link); in rxrpc_poke_call()
53 trace_rxrpc_poke_call(call, busy, what); in rxrpc_poke_call()
54 if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke)) in rxrpc_poke_call()
57 list_add_tail(&call->attend_link, &local->call_attend_q); in rxrpc_poke_call()
67 struct rxrpc_call *call = from_timer(call, t, timer); in rxrpc_call_timer_expired() local
69 _enter("%d", call->debug_id); in rxrpc_call_timer_expired()
71 if (!__rxrpc_call_is_complete(call)) { in rxrpc_call_timer_expired()
72 trace_rxrpc_timer_expired(call); in rxrpc_call_timer_expired()
73 rxrpc_poke_call(call, rxrpc_call_poke_timer); in rxrpc_call_timer_expired()
82 * find an extant server call
88 struct rxrpc_call *call; in rxrpc_find_call_by_user_ID() local
97 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_find_call_by_user_ID()
99 if (user_call_ID < call->user_call_ID) in rxrpc_find_call_by_user_ID()
101 else if (user_call_ID > call->user_call_ID) in rxrpc_find_call_by_user_ID()
112 rxrpc_get_call(call, rxrpc_call_get_sendmsg); in rxrpc_find_call_by_user_ID()
114 _leave(" = %p [%d]", call, refcount_read(&call->ref)); in rxrpc_find_call_by_user_ID()
115 return call; in rxrpc_find_call_by_user_ID()
119 * allocate a new call
124 struct rxrpc_call *call; in rxrpc_alloc_call() local
127 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); in rxrpc_alloc_call()
128 if (!call) in rxrpc_alloc_call()
131 mutex_init(&call->user_mutex); in rxrpc_alloc_call()
137 lockdep_set_class(&call->user_mutex, in rxrpc_alloc_call()
140 timer_setup(&call->timer, rxrpc_call_timer_expired, 0); in rxrpc_alloc_call()
141 INIT_WORK(&call->destroyer, rxrpc_destroy_call); in rxrpc_alloc_call()
142 INIT_LIST_HEAD(&call->link); in rxrpc_alloc_call()
143 INIT_LIST_HEAD(&call->wait_link); in rxrpc_alloc_call()
144 INIT_LIST_HEAD(&call->accept_link); in rxrpc_alloc_call()
145 INIT_LIST_HEAD(&call->recvmsg_link); in rxrpc_alloc_call()
146 INIT_LIST_HEAD(&call->sock_link); in rxrpc_alloc_call()
147 INIT_LIST_HEAD(&call->attend_link); in rxrpc_alloc_call()
148 skb_queue_head_init(&call->rx_queue); in rxrpc_alloc_call()
149 skb_queue_head_init(&call->recvmsg_queue); in rxrpc_alloc_call()
150 skb_queue_head_init(&call->rx_oos_queue); in rxrpc_alloc_call()
151 init_waitqueue_head(&call->waitq); in rxrpc_alloc_call()
152 spin_lock_init(&call->notify_lock); in rxrpc_alloc_call()
153 refcount_set(&call->ref, 1); in rxrpc_alloc_call()
154 call->debug_id = debug_id; in rxrpc_alloc_call()
155 call->tx_total_len = -1; in rxrpc_alloc_call()
156 call->tx_jumbo_max = 1; in rxrpc_alloc_call()
157 call->next_rx_timo = 20 * HZ; in rxrpc_alloc_call()
158 call->next_req_timo = 1 * HZ; in rxrpc_alloc_call()
159 call->ackr_window = 1; in rxrpc_alloc_call()
160 call->ackr_wtop = 1; in rxrpc_alloc_call()
161 call->delay_ack_at = KTIME_MAX; in rxrpc_alloc_call()
162 call->rack_timo_at = KTIME_MAX; in rxrpc_alloc_call()
163 call->ping_at = KTIME_MAX; in rxrpc_alloc_call()
164 call->keepalive_at = KTIME_MAX; in rxrpc_alloc_call()
165 call->expect_rx_by = KTIME_MAX; in rxrpc_alloc_call()
166 call->expect_req_by = KTIME_MAX; in rxrpc_alloc_call()
167 call->expect_term_by = KTIME_MAX; in rxrpc_alloc_call()
169 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); in rxrpc_alloc_call()
171 call->rx_winsize = rxrpc_rx_window_size; in rxrpc_alloc_call()
172 call->tx_winsize = 16; in rxrpc_alloc_call()
174 call->cong_cwnd = RXRPC_MIN_CWND; in rxrpc_alloc_call()
175 call->cong_ssthresh = RXRPC_TX_MAX_WINDOW; in rxrpc_alloc_call()
177 rxrpc_call_init_rtt(call); in rxrpc_alloc_call()
179 call->rxnet = rxnet; in rxrpc_alloc_call()
180 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK; in rxrpc_alloc_call()
182 return call; in rxrpc_alloc_call()
186 * Allocate a new client call.
194 struct rxrpc_call *call; in rxrpc_alloc_client_call() local
200 call = rxrpc_alloc_call(rx, gfp, debug_id); in rxrpc_alloc_client_call()
201 if (!call) in rxrpc_alloc_client_call()
204 call->acks_latest_ts = now; in rxrpc_alloc_client_call()
205 call->cong_tstamp = now; in rxrpc_alloc_client_call()
206 call->dest_srx = cp->peer->srx; in rxrpc_alloc_client_call()
207 call->dest_srx.srx_service = cp->service_id; in rxrpc_alloc_client_call()
208 call->interruptibility = p->interruptibility; in rxrpc_alloc_client_call()
209 call->tx_total_len = p->tx_total_len; in rxrpc_alloc_client_call()
210 call->key = key_get(cp->key); in rxrpc_alloc_client_call()
211 call->peer = rxrpc_get_peer(cp->peer, rxrpc_peer_get_call); in rxrpc_alloc_client_call()
212 call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call); in rxrpc_alloc_client_call()
213 call->security_level = cp->security_level; in rxrpc_alloc_client_call()
215 __set_bit(RXRPC_CALL_KERNEL, &call->flags); in rxrpc_alloc_client_call()
217 __set_bit(RXRPC_CALL_UPGRADE, &call->flags); in rxrpc_alloc_client_call()
219 __set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); in rxrpc_alloc_client_call()
222 call->next_rx_timo = umin(p->timeouts.normal, 1); in rxrpc_alloc_client_call()
224 call->next_req_timo = umin(p->timeouts.idle, 1); in rxrpc_alloc_client_call()
226 call->hard_timo = p->timeouts.hard; in rxrpc_alloc_client_call()
228 ret = rxrpc_init_client_call_security(call); in rxrpc_alloc_client_call()
230 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret); in rxrpc_alloc_client_call()
231 rxrpc_put_call(call, rxrpc_call_put_discard_error); in rxrpc_alloc_client_call()
235 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN); in rxrpc_alloc_client_call()
237 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), in rxrpc_alloc_client_call()
240 _leave(" = %p", call); in rxrpc_alloc_client_call()
241 return call; in rxrpc_alloc_client_call()
245 * Initiate the call ack/resend/expiry timer.
247 void rxrpc_start_call_timer(struct rxrpc_call *call) in rxrpc_start_call_timer() argument
249 if (call->hard_timo) { in rxrpc_start_call_timer()
250 ktime_t delay = ms_to_ktime(call->hard_timo * 1000); in rxrpc_start_call_timer()
252 call->expect_term_by = ktime_add(ktime_get_real(), delay); in rxrpc_start_call_timer()
253 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard); in rxrpc_start_call_timer()
255 call->timer.expires = jiffies; in rxrpc_start_call_timer()
259 * Wait for a call slot to become available.
275 * Release a call slot.
277 static void rxrpc_put_call_slot(struct rxrpc_call *call) in rxrpc_put_call_slot() argument
281 if (test_bit(RXRPC_CALL_KERNEL, &call->flags)) in rxrpc_put_call_slot()
287 * Start the process of connecting a call. We obtain a peer and a connection
288 * bundle, but the actual association of a call with a connection is offloaded
291 static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) in rxrpc_connect_call() argument
293 struct rxrpc_local *local = call->local; in rxrpc_connect_call()
296 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); in rxrpc_connect_call()
298 ret = rxrpc_look_up_bundle(call, gfp); in rxrpc_connect_call()
303 rxrpc_get_call(call, rxrpc_call_get_io_thread); in rxrpc_connect_call()
305 list_add_tail(&call->wait_link, &local->new_client_calls); in rxrpc_connect_call()
311 __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); in rxrpc_connect_call()
316 * Set up a call for the given parameters.
318 * - If it returns a call, the call's lock will need releasing by the caller.
326 __acquires(&call->user_mutex) in rxrpc_new_client_call()
328 struct rxrpc_call *call, *xcall; in rxrpc_new_client_call() local
347 call = rxrpc_alloc_client_call(rx, cp, p, gfp, debug_id); in rxrpc_new_client_call()
348 if (IS_ERR(call)) { in rxrpc_new_client_call()
351 _leave(" = %ld", PTR_ERR(call)); in rxrpc_new_client_call()
352 return call; in rxrpc_new_client_call()
355 /* We need to protect a partially set up call against the user as we in rxrpc_new_client_call()
358 mutex_lock(&call->user_mutex); in rxrpc_new_client_call()
360 /* Publish the call, even though it is incompletely set up as yet */ in rxrpc_new_client_call()
377 rcu_assign_pointer(call->socket, rx); in rxrpc_new_client_call()
378 call->user_call_ID = p->user_call_ID; in rxrpc_new_client_call()
379 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); in rxrpc_new_client_call()
380 rxrpc_get_call(call, rxrpc_call_get_userid); in rxrpc_new_client_call()
381 rb_link_node(&call->sock_node, parent, pp); in rxrpc_new_client_call()
382 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_new_client_call()
383 list_add(&call->sock_link, &rx->sock_calls); in rxrpc_new_client_call()
387 rxnet = call->rxnet; in rxrpc_new_client_call()
389 list_add_tail_rcu(&call->link, &rxnet->calls); in rxrpc_new_client_call()
392 /* From this point on, the call is protected by its own lock. */ in rxrpc_new_client_call()
396 * including channel number and call ID. in rxrpc_new_client_call()
398 ret = rxrpc_connect_call(call, gfp); in rxrpc_new_client_call()
402 _leave(" = %p [new]", call); in rxrpc_new_client_call()
403 return call; in rxrpc_new_client_call()
413 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST); in rxrpc_new_client_call()
414 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0, in rxrpc_new_client_call()
416 mutex_unlock(&call->user_mutex); in rxrpc_new_client_call()
417 rxrpc_put_call(call, rxrpc_call_put_userid_exists); in rxrpc_new_client_call()
421 /* We got an error, but the call is attached to the socket and is in in rxrpc_new_client_call()
427 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret, in rxrpc_new_client_call()
429 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); in rxrpc_new_client_call()
430 _leave(" = c=%08x [err]", call->debug_id); in rxrpc_new_client_call()
431 return call; in rxrpc_new_client_call()
435 * Set up an incoming call. call->conn points to the connection.
439 struct rxrpc_call *call, in rxrpc_incoming_call() argument
442 struct rxrpc_connection *conn = call->conn; in rxrpc_incoming_call()
446 _enter(",%d", call->conn->debug_id); in rxrpc_incoming_call()
448 rcu_assign_pointer(call->socket, rx); in rxrpc_incoming_call()
449 call->call_id = sp->hdr.callNumber; in rxrpc_incoming_call()
450 call->dest_srx.srx_service = sp->hdr.serviceId; in rxrpc_incoming_call()
451 call->cid = sp->hdr.cid; in rxrpc_incoming_call()
452 call->cong_tstamp = skb->tstamp; in rxrpc_incoming_call()
454 __set_bit(RXRPC_CALL_EXPOSED, &call->flags); in rxrpc_incoming_call()
455 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST); in rxrpc_incoming_call()
462 __set_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags); in rxrpc_incoming_call()
468 rxrpc_set_call_completion(call, conn->completion, in rxrpc_incoming_call()
475 rxrpc_get_call(call, rxrpc_call_get_io_thread); in rxrpc_incoming_call()
477 /* Set the channel for this call. We don't get channel_lock as we're in rxrpc_incoming_call()
481 * call pointer). in rxrpc_incoming_call()
484 conn->channels[chan].call_counter = call->call_id; in rxrpc_incoming_call()
485 conn->channels[chan].call_id = call->call_id; in rxrpc_incoming_call()
486 conn->channels[chan].call = call; in rxrpc_incoming_call()
490 hlist_add_head(&call->error_link, &conn->peer->error_targets); in rxrpc_incoming_call()
493 rxrpc_start_call_timer(call); in rxrpc_incoming_call()
498 * Note the re-emergence of a call.
500 void rxrpc_see_call(struct rxrpc_call *call, enum rxrpc_call_trace why) in rxrpc_see_call() argument
502 if (call) { in rxrpc_see_call()
503 int r = refcount_read(&call->ref); in rxrpc_see_call()
505 trace_rxrpc_call(call->debug_id, r, 0, why); in rxrpc_see_call()
509 struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *call, in rxrpc_try_get_call() argument
514 if (!call || !__refcount_inc_not_zero(&call->ref, &r)) in rxrpc_try_get_call()
516 trace_rxrpc_call(call->debug_id, r + 1, 0, why); in rxrpc_try_get_call()
517 return call; in rxrpc_try_get_call()
521 * Note the addition of a ref on a call.
523 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why) in rxrpc_get_call() argument
527 __refcount_inc(&call->ref, &r); in rxrpc_get_call()
528 trace_rxrpc_call(call->debug_id, r + 1, 0, why); in rxrpc_get_call()
534 static void rxrpc_cleanup_tx_buffers(struct rxrpc_call *call) in rxrpc_cleanup_tx_buffers() argument
538 for (tq = call->tx_queue; tq; tq = next) { in rxrpc_cleanup_tx_buffers()
543 trace_rxrpc_tq(call, tq, 0, rxrpc_tq_cleaned); in rxrpc_cleanup_tx_buffers()
551 static void rxrpc_cleanup_rx_buffers(struct rxrpc_call *call) in rxrpc_cleanup_rx_buffers() argument
553 rxrpc_purge_queue(&call->recvmsg_queue); in rxrpc_cleanup_rx_buffers()
554 rxrpc_purge_queue(&call->rx_queue); in rxrpc_cleanup_rx_buffers()
555 rxrpc_purge_queue(&call->rx_oos_queue); in rxrpc_cleanup_rx_buffers()
559 * Detach a call from its owning socket.
561 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) in rxrpc_release_call() argument
563 struct rxrpc_connection *conn = call->conn; in rxrpc_release_call()
566 _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref)); in rxrpc_release_call()
568 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), in rxrpc_release_call()
569 call->flags, rxrpc_call_see_release); in rxrpc_release_call()
571 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) in rxrpc_release_call()
574 rxrpc_put_call_slot(call); in rxrpc_release_call()
579 if (!list_empty(&call->recvmsg_link)) { in rxrpc_release_call()
580 _debug("unlinking once-pending call %p { e=%lx f=%lx }", in rxrpc_release_call()
581 call, call->events, call->flags); in rxrpc_release_call()
582 list_del(&call->recvmsg_link); in rxrpc_release_call()
587 call->recvmsg_link.next = NULL; in rxrpc_release_call()
588 call->recvmsg_link.prev = NULL; in rxrpc_release_call()
592 rxrpc_put_call(call, rxrpc_call_put_unnotify); in rxrpc_release_call()
596 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { in rxrpc_release_call()
597 rb_erase(&call->sock_node, &rx->calls); in rxrpc_release_call()
598 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); in rxrpc_release_call()
602 list_del(&call->sock_link); in rxrpc_release_call()
605 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); in rxrpc_release_call()
608 rxrpc_put_call(call, rxrpc_call_put_userid); in rxrpc_release_call()
618 struct rxrpc_call *call; in rxrpc_release_calls_on_socket() local
623 call = list_entry(rx->to_be_accepted.next, in rxrpc_release_calls_on_socket()
625 list_del(&call->accept_link); in rxrpc_release_calls_on_socket()
626 rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET, in rxrpc_release_calls_on_socket()
628 rxrpc_put_call(call, rxrpc_call_put_release_sock_tba); in rxrpc_release_calls_on_socket()
632 call = list_entry(rx->sock_calls.next, in rxrpc_release_calls_on_socket()
634 rxrpc_get_call(call, rxrpc_call_get_release_sock); in rxrpc_release_calls_on_socket()
635 rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET, in rxrpc_release_calls_on_socket()
637 rxrpc_release_call(rx, call); in rxrpc_release_calls_on_socket()
638 rxrpc_put_call(call, rxrpc_call_put_release_sock); in rxrpc_release_calls_on_socket()
645 * release a call
647 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why) in rxrpc_put_call() argument
649 struct rxrpc_net *rxnet = call->rxnet; in rxrpc_put_call()
650 unsigned int debug_id = call->debug_id; in rxrpc_put_call()
654 ASSERT(call != NULL); in rxrpc_put_call()
656 dead = __refcount_dec_and_test(&call->ref, &r); in rxrpc_put_call()
659 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); in rxrpc_put_call()
661 if (!list_empty(&call->link)) { in rxrpc_put_call()
663 list_del_init(&call->link); in rxrpc_put_call()
667 rxrpc_cleanup_call(call); in rxrpc_put_call()
672 * Free up the call under RCU.
676 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); in rxrpc_rcu_free_call() local
677 struct rxrpc_net *rxnet = READ_ONCE(call->rxnet); in rxrpc_rcu_free_call()
679 kmem_cache_free(rxrpc_call_jar, call); in rxrpc_rcu_free_call()
685 * Final call destruction - but must be done in process context.
689 struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer); in rxrpc_destroy_call() local
691 del_timer_sync(&call->timer); in rxrpc_destroy_call()
693 rxrpc_cleanup_tx_buffers(call); in rxrpc_destroy_call()
694 rxrpc_cleanup_rx_buffers(call); in rxrpc_destroy_call()
695 rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned); in rxrpc_destroy_call()
696 rxrpc_put_connection(call->conn, rxrpc_conn_put_call); in rxrpc_destroy_call()
697 rxrpc_deactivate_bundle(call->bundle); in rxrpc_destroy_call()
698 rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call); in rxrpc_destroy_call()
699 rxrpc_put_peer(call->peer, rxrpc_peer_put_call); in rxrpc_destroy_call()
700 rxrpc_put_local(call->local, rxrpc_local_put_call); in rxrpc_destroy_call()
701 call_rcu(&call->rcu, rxrpc_rcu_free_call); in rxrpc_destroy_call()
705 * clean up a call
707 void rxrpc_cleanup_call(struct rxrpc_call *call) in rxrpc_cleanup_call() argument
709 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); in rxrpc_cleanup_call()
711 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); in rxrpc_cleanup_call()
712 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); in rxrpc_cleanup_call()
714 del_timer(&call->timer); in rxrpc_cleanup_call()
720 schedule_work(&call->destroyer); in rxrpc_cleanup_call()
722 rxrpc_destroy_call(&call->destroyer); in rxrpc_cleanup_call()
732 struct rxrpc_call *call; in rxrpc_destroy_all_calls() local
740 call = list_entry(rxnet->calls.next, in rxrpc_destroy_all_calls()
742 _debug("Zapping call %p", call); in rxrpc_destroy_all_calls()
744 rxrpc_see_call(call, rxrpc_call_see_zap); in rxrpc_destroy_all_calls()
745 list_del_init(&call->link); in rxrpc_destroy_all_calls()
747 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", in rxrpc_destroy_all_calls()
748 call, refcount_read(&call->ref), in rxrpc_destroy_all_calls()
749 rxrpc_call_states[__rxrpc_call_state(call)], in rxrpc_destroy_all_calls()
750 call->flags, call->events); in rxrpc_destroy_all_calls()