Lines Matching +full:tri +full:- +full:band
1 // SPDX-License-Identifier: GPL-2.0-only
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <[email protected]>.
9 Copyright (C) 2002-2008, Lars Ellenberg <[email protected]>.
54 device = bio->bi_private; in drbd_md_endio()
55 device->md_io.error = blk_status_to_errno(bio->bi_status); in drbd_md_endio()
58 if (device->ldev) in drbd_md_endio()
65 * drbd_md_put_buffer() may allow us to finally try and re-attach. in drbd_md_endio()
71 * ASSERT(atomic_read(&device->md_io_in_use) == 1) there. in drbd_md_endio()
74 device->md_io.done = 1; in drbd_md_endio()
75 wake_up(&device->misc_wait); in drbd_md_endio()
84 struct drbd_peer_device *peer_device = peer_req->peer_device; in drbd_endio_read_sec_final()
85 struct drbd_device *device = peer_device->device; in drbd_endio_read_sec_final()
87 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_endio_read_sec_final()
88 device->read_cnt += peer_req->i.size >> 9; in drbd_endio_read_sec_final()
89 list_del(&peer_req->w.list); in drbd_endio_read_sec_final()
90 if (list_empty(&device->read_ee)) in drbd_endio_read_sec_final()
91 wake_up(&device->ee_wait); in drbd_endio_read_sec_final()
92 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) in drbd_endio_read_sec_final()
94 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_endio_read_sec_final()
96 drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w); in drbd_endio_read_sec_final()
105 struct drbd_peer_device *peer_device = peer_req->peer_device; in drbd_endio_write_sec_final()
106 struct drbd_device *device = peer_device->device; in drbd_endio_write_sec_final()
107 struct drbd_connection *connection = peer_device->connection; in drbd_endio_write_sec_final()
117 i = peer_req->i; in drbd_endio_write_sec_final()
118 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; in drbd_endio_write_sec_final()
119 block_id = peer_req->block_id; in drbd_endio_write_sec_final()
120 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; in drbd_endio_write_sec_final()
122 if (peer_req->flags & EE_WAS_ERROR) { in drbd_endio_write_sec_final()
125 if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags)) in drbd_endio_write_sec_final()
127 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final()
130 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_endio_write_sec_final()
131 device->writ_cnt += peer_req->i.size >> 9; in drbd_endio_write_sec_final()
132 list_move_tail(&peer_req->w.list, &device->done_ee); in drbd_endio_write_sec_final()
142 do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); in drbd_endio_write_sec_final()
145 * ((peer_req->flags & (EE_WAS_ERROR|EE_TRIM)) == EE_WAS_ERROR) */ in drbd_endio_write_sec_final()
146 if (peer_req->flags & EE_WAS_ERROR) in drbd_endio_write_sec_final()
149 if (connection->cstate >= C_WF_REPORT_PARAMS) { in drbd_endio_write_sec_final()
150 kref_get(&device->kref); /* put is in drbd_send_acks_wf() */ in drbd_endio_write_sec_final()
151 if (!queue_work(connection->ack_sender, &peer_device->send_acks_work)) in drbd_endio_write_sec_final()
152 kref_put(&device->kref, drbd_destroy_device); in drbd_endio_write_sec_final()
154 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_endio_write_sec_final()
160 wake_up(&device->ee_wait); in drbd_endio_write_sec_final()
173 struct drbd_peer_request *peer_req = bio->bi_private; in drbd_peer_request_endio()
174 struct drbd_device *device = peer_req->peer_device->device; in drbd_peer_request_endio()
179 if (bio->bi_status && drbd_ratelimit()) in drbd_peer_request_endio()
182 : "read", bio->bi_status, in drbd_peer_request_endio()
183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
185 if (bio->bi_status) in drbd_peer_request_endio()
186 set_bit(__EE_WAS_ERROR, &peer_req->flags); in drbd_peer_request_endio()
189 if (atomic_dec_and_test(&peer_req->pending_bios)) { in drbd_peer_request_endio()
201 device->minor, device->resource->name, device->vnr); in drbd_panic_after_delayed_completion_of_aborted_request()
209 struct drbd_request *req = bio->bi_private; in drbd_request_endio()
210 struct drbd_device *device = req->device; in drbd_request_endio()
218 * "aborting" requests, or force-detaching the disk, is intended for in drbd_request_endio()
221 * situation, usually a hard-reset and failover is the only way out. in drbd_request_endio()
223 * By "aborting", basically faking a local error-completion, in drbd_request_endio()
227 * By completing these requests, we allow the upper layers to re-use in drbd_request_endio()
242 if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) { in drbd_request_endio()
244 …drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressiv… in drbd_request_endio()
246 if (!bio->bi_status) in drbd_request_endio()
251 if (unlikely(bio->bi_status)) { in drbd_request_endio()
255 if (bio->bi_status == BLK_STS_NOTSUPP) in drbd_request_endio()
261 if (bio->bi_opf & REQ_RAHEAD) in drbd_request_endio()
274 req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); in drbd_request_endio()
278 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_request_endio()
280 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_request_endio()
290 struct page *page = peer_req->pages; in drbd_csum_ee()
295 desc->tfm = tfm; in drbd_csum_ee()
308 len = peer_req->i.size & (PAGE_SIZE - 1); in drbd_csum_ee()
322 desc->tfm = tfm; in drbd_csum_bio()
341 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_send_csum()
342 struct drbd_device *device = peer_device->device; in w_e_send_csum()
350 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) in w_e_send_csum()
353 digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm); in w_e_send_csum()
356 sector_t sector = peer_req->i.sector; in w_e_send_csum()
357 unsigned int size = peer_req->i.size; in w_e_send_csum()
358 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_send_csum()
373 err = -ENOMEM; in w_e_send_csum()
389 struct drbd_device *device = peer_device->device; in read_for_csum()
393 return -EIO; in read_for_csum()
402 peer_req->w.cb = w_e_send_csum; in read_for_csum()
403 peer_req->opf = REQ_OP_READ; in read_for_csum()
404 spin_lock_irq(&device->resource->req_lock); in read_for_csum()
405 list_add_tail(&peer_req->w.list, &device->read_ee); in read_for_csum()
406 spin_unlock_irq(&device->resource->req_lock); in read_for_csum()
408 atomic_add(size >> 9, &device->rs_sect_ev); in read_for_csum()
416 spin_lock_irq(&device->resource->req_lock); in read_for_csum()
417 list_del(&peer_req->w.list); in read_for_csum()
418 spin_unlock_irq(&device->resource->req_lock); in read_for_csum()
423 return -EAGAIN; in read_for_csum()
431 switch (device->state.conn) { in w_resync_timer()
448 &first_peer_device(device)->connection->sender_work, in resync_timer_fn()
449 &device->resync_work); in resync_timer_fn()
456 for (i = 0; i < fb->size; i++) in fifo_set()
457 fb->values[i] = value; in fifo_set()
464 ov = fb->values[fb->head_index]; in fifo_push()
465 fb->values[fb->head_index++] = value; in fifo_push()
467 if (fb->head_index >= fb->size) in fifo_push()
468 fb->head_index = 0; in fifo_push()
477 for (i = 0; i < fb->size; i++) in fifo_add_val()
478 fb->values[i] += value; in fifo_add_val()
489 fb->head_index = 0; in fifo_alloc()
490 fb->size = fifo_size; in fifo_alloc()
491 fb->total = 0; in fifo_alloc()
498 struct drbd_device *device = peer_device->device; in drbd_rs_controller()
500 unsigned int want; /* The number of sectors we want in-flight */ in drbd_rs_controller()
502 int correction; /* Number of sectors more we need in-flight */ in drbd_rs_controller()
509 dc = rcu_dereference(device->ldev->disk_conf); in drbd_rs_controller()
510 plan = rcu_dereference(device->rs_plan_s); in drbd_rs_controller()
512 steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ in drbd_rs_controller()
514 if (device->rs_in_flight + sect_in == 0) { /* At start of resync */ in drbd_rs_controller()
515 want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps; in drbd_rs_controller()
517 want = dc->c_fill_target ? dc->c_fill_target : in drbd_rs_controller()
518 sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10); in drbd_rs_controller()
521 correction = want - device->rs_in_flight - plan->total; in drbd_rs_controller()
526 plan->total += cps * steps; in drbd_rs_controller()
530 plan->total -= curr_corr; in drbd_rs_controller()
536 max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ; in drbd_rs_controller()
542 sect_in, device->rs_in_flight, want, correction, in drbd_rs_controller()
543 steps, cps, device->rs_planed, curr_corr, req_sect); in drbd_rs_controller()
551 struct drbd_device *device = peer_device->device; in drbd_rs_number_requests()
555 sect_in = atomic_xchg(&device->rs_sect_in, 0); in drbd_rs_number_requests()
556 device->rs_in_flight -= sect_in; in drbd_rs_number_requests()
560 if (rcu_dereference(device->rs_plan_s)->size) { in drbd_rs_number_requests()
561 number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9); in drbd_rs_number_requests()
562 device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; in drbd_rs_number_requests()
564 device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; in drbd_rs_number_requests()
565 number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); in drbd_rs_number_requests()
569 /* Don't have more than "max-buffers"/2 in-flight. in drbd_rs_number_requests()
572 * online-verify or (checksum-based) resync, if max-buffers, in drbd_rs_number_requests()
573 * socket buffer sizes and resync rate settings are mis-configured. */ in drbd_rs_number_requests()
579 if (mxb - device->rs_in_flight/8 < number) in drbd_rs_number_requests()
580 number = mxb - device->rs_in_flight/8; in drbd_rs_number_requests()
587 struct drbd_device *const device = peer_device->device; in make_resync_request()
588 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; in make_resync_request()
591 const sector_t capacity = get_capacity(device->vdisk); in make_resync_request()
601 if (device->rs_total == 0) { in make_resync_request()
608 /* Since we only need to access device->rsync a in make_resync_request()
616 if (connection->agreed_features & DRBD_FF_THIN_RESYNC) { in make_resync_request()
618 discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity; in make_resync_request()
622 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; in make_resync_request()
630 mutex_lock(&connection->data.mutex); in make_resync_request()
631 if (connection->data.socket) { in make_resync_request()
632 struct sock *sk = connection->data.socket->sk; in make_resync_request()
633 int queued = sk->sk_wmem_queued; in make_resync_request()
634 int sndbuf = sk->sk_sndbuf; in make_resync_request()
637 if (sk->sk_socket) in make_resync_request()
638 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in make_resync_request()
642 mutex_unlock(&connection->data.mutex); in make_resync_request()
648 bit = drbd_bm_find_next(device, device->bm_resync_fo); in make_resync_request()
651 device->bm_resync_fo = drbd_bm_bits(device); in make_resync_request()
659 device->bm_resync_fo = bit; in make_resync_request()
662 device->bm_resync_fo = bit + 1; in make_resync_request()
683 if (sector & ((1<<(align+3))-1)) in make_resync_request()
693 * caution, drbd_bm_test_bit is tri-state for some in make_resync_request()
694 * obscure reason; ( b == 0 ) would get the out-of-band in make_resync_request()
708 device->bm_resync_fo = bit + 1; in make_resync_request()
713 size = (capacity-sector)<<9; in make_resync_request()
715 if (device->use_csums) { in make_resync_request()
717 case -EIO: /* Disk failure */ in make_resync_request()
719 return -EIO; in make_resync_request()
720 case -EAGAIN: /* allocation failed, or ldev busy */ in make_resync_request()
722 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in make_resync_request()
747 if (device->bm_resync_fo >= drbd_bm_bits(device)) { in make_resync_request()
759 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); in make_resync_request()
760 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); in make_resync_request()
767 struct drbd_device *device = peer_device->device; in make_ov_request()
770 const sector_t capacity = get_capacity(device->vdisk); in make_ov_request()
778 sector = device->ov_position; in make_ov_request()
788 && sector >= device->ov_stop_sector; in make_ov_request()
795 device->ov_position = sector; in make_ov_request()
800 size = (capacity-sector)<<9; in make_ov_request()
809 device->ov_position = sector; in make_ov_request()
812 device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); in make_ov_request()
814 mod_timer(&device->resync_timer, jiffies + SLEEP_TIME); in make_ov_request()
822 struct drbd_device *device = dw->device; in w_ov_finished()
834 struct drbd_device *device = dw->device; in w_resync_finished()
844 struct drbd_connection *connection = first_peer_device(device)->connection; in ping_peer()
846 clear_bit(GOT_PING_ACK, &connection->flags); in ping_peer()
848 wait_event(connection->ping_wait, in ping_peer()
849 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); in ping_peer()
854 struct drbd_device *device = peer_device->device; in drbd_resync_finished()
855 struct drbd_connection *connection = peer_device->connection; in drbd_resync_finished()
875 dw->w.cb = w_resync_finished; in drbd_resync_finished()
876 dw->device = device; in drbd_resync_finished()
877 drbd_queue_work(&connection->sender_work, &dw->w); in drbd_resync_finished()
883 dt = (jiffies - device->rs_start - device->rs_paused) / HZ; in drbd_resync_finished()
887 db = device->rs_total; in drbd_resync_finished()
889 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) in drbd_resync_finished()
890 db -= device->ov_left; in drbd_resync_finished()
893 device->rs_paused /= HZ; in drbd_resync_finished()
900 spin_lock_irq(&device->resource->req_lock); in drbd_resync_finished()
915 dt + device->rs_paused, device->rs_paused, dbdt); in drbd_resync_finished()
923 khelper_cmd = "out-of-sync"; in drbd_resync_finished()
926 D_ASSERT(device, (n_oos - device->rs_failed) == 0); in drbd_resync_finished()
929 khelper_cmd = "after-resync-target"; in drbd_resync_finished()
931 if (device->use_csums && device->rs_total) { in drbd_resync_finished()
932 const unsigned long s = device->rs_same_csum; in drbd_resync_finished()
933 const unsigned long t = device->rs_total; in drbd_resync_finished()
940 Bit2KB(device->rs_same_csum), in drbd_resync_finished()
941 Bit2KB(device->rs_total - device->rs_same_csum), in drbd_resync_finished()
942 Bit2KB(device->rs_total)); in drbd_resync_finished()
946 if (device->rs_failed) { in drbd_resync_finished()
947 drbd_info(device, " %lu failed blocks\n", device->rs_failed); in drbd_resync_finished()
961 if (device->p_uuid) { in drbd_resync_finished()
964 _drbd_uuid_set(device, i, device->p_uuid[i]); in drbd_resync_finished()
965 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]); in drbd_resync_finished()
966 _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]); in drbd_resync_finished()
968 drbd_err(device, "device->p_uuid is NULL! BUG\n"); in drbd_resync_finished()
977 if (device->p_uuid) { in drbd_resync_finished()
982 device->p_uuid[i] = device->ldev->md.uuid[i]; in drbd_resync_finished()
989 spin_unlock_irq(&device->resource->req_lock); in drbd_resync_finished()
991 /* If we have been sync source, and have an effective fencing-policy, in drbd_resync_finished()
999 fp = rcu_dereference(device->ldev->disk_conf)->fencing; in drbd_resync_finished()
1003 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in drbd_resync_finished()
1004 struct drbd_device *device = peer_device->device; in drbd_resync_finished()
1005 disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk); in drbd_resync_finished()
1006 pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk); in drbd_resync_finished()
1011 conn_khelper(connection, "unfence-peer"); in drbd_resync_finished()
1016 device->rs_total = 0; in drbd_resync_finished()
1017 device->rs_failed = 0; in drbd_resync_finished()
1018 device->rs_paused = 0; in drbd_resync_finished()
1021 if (verify_done && device->ov_left == 0) in drbd_resync_finished()
1022 device->ov_start_sector = 0; in drbd_resync_finished()
1037 int i = PFN_UP(peer_req->i.size); in move_to_net_ee_or_free()
1038 atomic_add(i, &device->pp_in_use_by_net); in move_to_net_ee_or_free()
1039 atomic_sub(i, &device->pp_in_use); in move_to_net_ee_or_free()
1040 spin_lock_irq(&device->resource->req_lock); in move_to_net_ee_or_free()
1041 list_add_tail(&peer_req->w.list, &device->net_ee); in move_to_net_ee_or_free()
1042 spin_unlock_irq(&device->resource->req_lock); in move_to_net_ee_or_free()
1049 …* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQU…
1056 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_data_req()
1057 struct drbd_device *device = peer_device->device; in w_e_end_data_req()
1066 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_data_req()
1071 (unsigned long long)peer_req->i.sector); in w_e_end_data_req()
1087 struct page *page = peer_req->pages; in all_zero()
1088 unsigned int len = peer_req->i.size; in all_zero()
1103 len -= l; in all_zero()
1110 …* w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DA…
1117 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_rsdata_req()
1118 struct drbd_device *device = peer_device->device; in w_e_end_rsdata_req()
1128 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_rsdata_req()
1132 if (device->state.conn == C_AHEAD) { in w_e_end_rsdata_req()
1134 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_rsdata_req()
1135 if (likely(device->state.pdsk >= D_INCONSISTENT)) { in w_e_end_rsdata_req()
1137 if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req)) in w_e_end_rsdata_req()
1150 (unsigned long long)peer_req->i.sector); in w_e_end_rsdata_req()
1155 drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size); in w_e_end_rsdata_req()
1170 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_csum_rs_req()
1171 struct drbd_device *device = peer_device->device; in w_e_end_csum_rs_req()
1184 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_csum_rs_req()
1188 di = peer_req->digest; in w_e_end_csum_rs_req()
1190 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_csum_rs_req()
1194 if (peer_device->connection->csums_tfm) { in w_e_end_csum_rs_req()
1195 digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm); in w_e_end_csum_rs_req()
1196 D_ASSERT(device, digest_size == di->digest_size); in w_e_end_csum_rs_req()
1200 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_end_csum_rs_req()
1201 eq = !memcmp(digest, di->digest, digest_size); in w_e_end_csum_rs_req()
1206 drbd_set_in_sync(peer_device, peer_req->i.sector, peer_req->i.size); in w_e_end_csum_rs_req()
1208 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; in w_e_end_csum_rs_req()
1212 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ in w_e_end_csum_rs_req()
1213 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ in w_e_end_csum_rs_req()
1234 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_ov_req()
1235 struct drbd_device *device = peer_device->device; in w_e_end_ov_req()
1236 sector_t sector = peer_req->i.sector; in w_e_end_ov_req()
1237 unsigned int size = peer_req->i.size; in w_e_end_ov_req()
1245 digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm); in w_e_end_ov_req()
1252 if (likely(!(peer_req->flags & EE_WAS_ERROR))) in w_e_end_ov_req()
1253 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_req()
1279 struct drbd_device *device = peer_device->device; in drbd_ov_out_of_sync_found()
1280 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { in drbd_ov_out_of_sync_found()
1281 device->ov_last_oos_size += size>>9; in drbd_ov_out_of_sync_found()
1283 device->ov_last_oos_start = sector; in drbd_ov_out_of_sync_found()
1284 device->ov_last_oos_size = size>>9; in drbd_ov_out_of_sync_found()
1292 struct drbd_peer_device *peer_device = peer_req->peer_device; in w_e_end_ov_reply()
1293 struct drbd_device *device = peer_device->device; in w_e_end_ov_reply()
1296 sector_t sector = peer_req->i.sector; in w_e_end_ov_reply()
1297 unsigned int size = peer_req->i.size; in w_e_end_ov_reply()
1311 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_ov_reply()
1315 di = peer_req->digest; in w_e_end_ov_reply()
1317 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { in w_e_end_ov_reply()
1318 digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm); in w_e_end_ov_reply()
1321 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_reply()
1323 D_ASSERT(device, digest_size == di->digest_size); in w_e_end_ov_reply()
1324 eq = !memcmp(digest, di->digest, digest_size); in w_e_end_ov_reply()
1345 --device->ov_left; in w_e_end_ov_reply()
1348 if ((device->ov_left & 0x200) == 0x200) in w_e_end_ov_reply()
1349 drbd_advance_rs_marks(peer_device, device->ov_left); in w_e_end_ov_reply()
1352 (sector + (size>>9)) >= device->ov_stop_sector; in w_e_end_ov_reply()
1354 if (device->ov_left == 0 || stop_sector_reached) { in w_e_end_ov_reply()
1372 sock = &connection->data; in drbd_send_barrier()
1375 return -EIO; in drbd_send_barrier()
1376 p->barrier = connection->send.current_epoch_nr; in drbd_send_barrier()
1377 p->pad = 0; in drbd_send_barrier()
1378 connection->send.current_epoch_writes = 0; in drbd_send_barrier()
1379 connection->send.last_sent_barrier_jif = jiffies; in drbd_send_barrier()
1386 struct drbd_socket *sock = &pd->connection->data; in pd_send_unplug_remote()
1388 return -EIO; in pd_send_unplug_remote()
1404 if (!connection->send.seen_any_write_yet) { in re_init_if_first_write()
1405 connection->send.seen_any_write_yet = true; in re_init_if_first_write()
1406 connection->send.current_epoch_nr = epoch; in re_init_if_first_write()
1407 connection->send.current_epoch_writes = 0; in re_init_if_first_write()
1408 connection->send.last_sent_barrier_jif = jiffies; in re_init_if_first_write()
1414 /* re-init if first write on this connection */ in maybe_send_barrier()
1415 if (!connection->send.seen_any_write_yet) in maybe_send_barrier()
1417 if (connection->send.current_epoch_nr != epoch) { in maybe_send_barrier()
1418 if (connection->send.current_epoch_writes) in maybe_send_barrier()
1420 connection->send.current_epoch_nr = epoch; in maybe_send_barrier()
1427 struct drbd_device *device = req->device; in w_send_out_of_sync()
1429 struct drbd_connection *const connection = peer_device->connection; in w_send_out_of_sync()
1436 req->pre_send_jif = jiffies; in w_send_out_of_sync()
1438 /* this time, no connection->send.current_epoch_writes++; in w_send_out_of_sync()
1442 maybe_send_barrier(connection, req->epoch); in w_send_out_of_sync()
1451 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1458 struct drbd_device *device = req->device; in w_send_dblock()
1460 struct drbd_connection *connection = peer_device->connection; in w_send_dblock()
1461 bool do_send_unplug = req->rq_state & RQ_UNPLUG; in w_send_dblock()
1468 req->pre_send_jif = jiffies; in w_send_dblock()
1470 re_init_if_first_write(connection, req->epoch); in w_send_dblock()
1471 maybe_send_barrier(connection, req->epoch); in w_send_dblock()
1472 connection->send.current_epoch_writes++; in w_send_dblock()
1484 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1491 struct drbd_device *device = req->device; in w_send_read_req()
1493 struct drbd_connection *connection = peer_device->connection; in w_send_read_req()
1494 bool do_send_unplug = req->rq_state & RQ_UNPLUG; in w_send_read_req()
1501 req->pre_send_jif = jiffies; in w_send_read_req()
1505 maybe_send_barrier(connection, req->epoch); in w_send_read_req()
1507 err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size, in w_send_read_req()
1521 struct drbd_device *device = req->device; in w_restart_disk_io()
1523 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) in w_restart_disk_io()
1524 drbd_al_begin_io(device, &req->i); in w_restart_disk_io()
1526 req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, in w_restart_disk_io()
1527 req->master_bio, GFP_NOIO, in w_restart_disk_io()
1529 req->private_bio->bi_private = req; in w_restart_disk_io()
1530 req->private_bio->bi_end_io = drbd_request_endio; in w_restart_disk_io()
1531 submit_bio_noacct(req->private_bio); in w_restart_disk_io()
1542 if (!odev->ldev || odev->state.disk == D_DISKLESS) in _drbd_may_sync_now()
1545 resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after; in _drbd_may_sync_now()
1547 if (resync_after == -1) in _drbd_may_sync_now()
1552 if ((odev->state.conn >= C_SYNC_SOURCE && in _drbd_may_sync_now()
1553 odev->state.conn <= C_PAUSED_SYNC_T) || in _drbd_may_sync_now()
1554 odev->state.aftr_isp || odev->state.peer_isp || in _drbd_may_sync_now()
1555 odev->state.user_isp) in _drbd_may_sync_now()
1561 * drbd_pause_after() - Pause resync on all devices that may not resync now
1574 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) in drbd_pause_after()
1587 * drbd_resume_next() - Resume resync on all devices that may resync now
1600 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) in drbd_resume_next()
1602 if (odev->state.aftr_isp) { in drbd_resume_next()
1633 if (o_minor == -1) in drbd_resync_after_valid()
1635 if (o_minor < -1 || o_minor > MINORMASK) in drbd_resync_after_valid()
1644 /* You are free to depend on diskless, non-existing, in drbd_resync_after_valid()
1650 if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS) in drbd_resync_after_valid()
1654 resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after; in drbd_resync_after_valid()
1657 if (resync_after == -1) in drbd_resync_after_valid()
1678 struct drbd_device *device = peer_device->device; in drbd_rs_controller_reset()
1679 struct gendisk *disk = device->ldev->backing_bdev->bd_disk; in drbd_rs_controller_reset()
1682 atomic_set(&device->rs_sect_in, 0); in drbd_rs_controller_reset()
1683 atomic_set(&device->rs_sect_ev, 0); in drbd_rs_controller_reset()
1684 device->rs_in_flight = 0; in drbd_rs_controller_reset()
1685 device->rs_last_events = in drbd_rs_controller_reset()
1686 (int)part_stat_read_accum(disk->part0, sectors); in drbd_rs_controller_reset()
1693 plan = rcu_dereference(device->rs_plan_s); in drbd_rs_controller_reset()
1694 plan->total = 0; in drbd_rs_controller_reset()
1707 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { in do_start_resync()
1709 device->start_resync_timer.expires = jiffies + HZ/10; in do_start_resync()
1710 add_timer(&device->start_resync_timer); in do_start_resync()
1715 clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags); in do_start_resync()
1722 csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only; in use_checksum_based_resync()
1724 return connection->agreed_pro_version >= 89 && /* supported? */ in use_checksum_based_resync()
1725 connection->csums_tfm && /* configured? */ in use_checksum_based_resync()
1727 || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */ in use_checksum_based_resync()
1731 * drbd_start_resync() - Start the resync process
1741 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; in drbd_start_resync()
1745 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) { in drbd_start_resync()
1755 if (!test_bit(B_RS_H_DONE, &device->flags)) { in drbd_start_resync()
1760 r = drbd_khelper(device, "before-resync-target"); in drbd_start_resync()
1763 drbd_info(device, "before-resync-target handler returned %d, " in drbd_start_resync()
1769 r = drbd_khelper(device, "before-resync-source"); in drbd_start_resync()
1773 drbd_info(device, "before-resync-source handler returned %d, " in drbd_start_resync()
1776 drbd_info(device, "before-resync-source handler returned %d, " in drbd_start_resync()
1786 if (current == connection->worker.task) { in drbd_start_resync()
1789 if (!mutex_trylock(device->state_mutex)) { in drbd_start_resync()
1790 set_bit(B_RS_H_DONE, &device->flags); in drbd_start_resync()
1791 device->start_resync_timer.expires = jiffies + HZ/5; in drbd_start_resync()
1792 add_timer(&device->start_resync_timer); in drbd_start_resync()
1796 mutex_lock(device->state_mutex); in drbd_start_resync()
1800 clear_bit(B_RS_H_DONE, &device->flags); in drbd_start_resync()
1802 if (device->state.conn < C_CONNECTED in drbd_start_resync()
1830 device->rs_failed = 0; in drbd_start_resync()
1831 device->rs_paused = 0; in drbd_start_resync()
1832 device->rs_same_csum = 0; in drbd_start_resync()
1833 device->rs_last_sect_ev = 0; in drbd_start_resync()
1834 device->rs_total = tw; in drbd_start_resync()
1835 device->rs_start = now; in drbd_start_resync()
1837 device->rs_mark_left[i] = tw; in drbd_start_resync()
1838 device->rs_mark_time[i] = now; in drbd_start_resync()
1841 /* Forget potentially stale cached per resync extent bit-counts. in drbd_start_resync()
1844 spin_lock(&device->al_lock); in drbd_start_resync()
1845 lc_reset(device->resync); in drbd_start_resync()
1846 device->resync_locked = 0; in drbd_start_resync()
1847 device->resync_wenr = LC_FREE; in drbd_start_resync()
1848 spin_unlock(&device->al_lock); in drbd_start_resync()
1853 wake_up(&device->al_wait); /* for lc_reset() above */ in drbd_start_resync()
1856 device->rs_last_bcast = jiffies - HZ; in drbd_start_resync()
1860 (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), in drbd_start_resync()
1861 (unsigned long) device->rs_total); in drbd_start_resync()
1863 device->bm_resync_fo = 0; in drbd_start_resync()
1864 device->use_csums = use_checksum_based_resync(connection, device); in drbd_start_resync()
1866 device->use_csums = false; in drbd_start_resync()
1876 if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96) in drbd_start_resync()
1879 if (connection->agreed_pro_version < 95 && device->rs_total == 0) { in drbd_start_resync()
1883 * resync-finished notifications, but the fix in drbd_start_resync()
1895 nc = rcu_dereference(connection->net_conf); in drbd_start_resync()
1896 timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; in drbd_start_resync()
1904 /* ns.conn may already be != device->state.conn, in drbd_start_resync()
1909 mod_timer(&device->resync_timer, jiffies); in drbd_start_resync()
1915 mutex_unlock(device->state_mutex); in drbd_start_resync()
1920 struct drbd_device *device = peer_device->device; in update_on_disk_bitmap()
1922 device->rs_last_bcast = jiffies; in update_on_disk_bitmap()
1928 if (resync_done && is_sync_state(device->state.conn)) in update_on_disk_bitmap()
1933 device->rs_last_bcast = jiffies; in update_on_disk_bitmap()
1939 lc_destroy(device->resync); in drbd_ldev_destroy()
1940 device->resync = NULL; in drbd_ldev_destroy()
1941 lc_destroy(device->act_log); in drbd_ldev_destroy()
1942 device->act_log = NULL; in drbd_ldev_destroy()
1945 drbd_backing_dev_free(device, device->ldev); in drbd_ldev_destroy()
1946 device->ldev = NULL; in drbd_ldev_destroy()
1949 clear_bit(GOING_DISKLESS, &device->flags); in drbd_ldev_destroy()
1950 wake_up(&device->misc_wait); in drbd_ldev_destroy()
1956 D_ASSERT(device, device->state.disk == D_FAILED); in go_diskless()
1975 if (device->bitmap && device->ldev) { in go_diskless()
1982 if (test_bit(WAS_READ_ERROR, &device->flags)) { in go_diskless()
2009 td->start_jif = jiffies; in __update_timing_details()
2010 td->cb_addr = cb; in __update_timing_details()
2011 td->caller_fn = fn; in __update_timing_details()
2012 td->line = line; in __update_timing_details()
2013 td->cb_nr = *cb_nr; in __update_timing_details()
2062 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in do_unqueued_work()
2063 struct drbd_device *device = peer_device->device; in do_unqueued_work()
2064 unsigned long todo = get_work_bits(&device->flags); in do_unqueued_work()
2068 kref_get(&device->kref); in do_unqueued_work()
2071 kref_put(&device->kref, drbd_destroy_device); in do_unqueued_work()
2079 spin_lock_irq(&queue->q_lock); in dequeue_work_batch()
2080 list_splice_tail_init(&queue->q, work_list); in dequeue_work_batch()
2081 spin_unlock_irq(&queue->q_lock); in dequeue_work_batch()
2091 dequeue_work_batch(&connection->sender_work, work_list); in wait_for_work()
2102 nc = rcu_dereference(connection->net_conf); in wait_for_work()
2103 uncork = nc ? nc->tcp_cork : 0; in wait_for_work()
2106 mutex_lock(&connection->data.mutex); in wait_for_work()
2107 if (connection->data.socket) in wait_for_work()
2108 tcp_sock_set_cork(connection->data.socket->sk, false); in wait_for_work()
2109 mutex_unlock(&connection->data.mutex); in wait_for_work()
2114 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); in wait_for_work()
2115 spin_lock_irq(&connection->resource->req_lock); in wait_for_work()
2116 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ in wait_for_work()
2117 if (!list_empty(&connection->sender_work.q)) in wait_for_work()
2118 list_splice_tail_init(&connection->sender_work.q, work_list); in wait_for_work()
2119 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ in wait_for_work()
2121 spin_unlock_irq(&connection->resource->req_lock); in wait_for_work()
2125 /* We found nothing new to do, no to-be-communicated request, in wait_for_work()
2127 * epoch. Next incoming request epoch will be connection -> in wait_for_work()
2133 atomic_read(&connection->current_tle_nr) != in wait_for_work()
2134 connection->send.current_epoch_nr; in wait_for_work()
2135 spin_unlock_irq(&connection->resource->req_lock); in wait_for_work()
2139 connection->send.current_epoch_nr + 1); in wait_for_work()
2141 if (test_bit(DEVICE_WORK_PENDING, &connection->flags)) in wait_for_work()
2145 if (get_t_state(&connection->worker) != RUNNING) in wait_for_work()
2153 finish_wait(&connection->sender_work.q_wait, &wait); in wait_for_work()
2157 nc = rcu_dereference(connection->net_conf); in wait_for_work()
2158 cork = nc ? nc->tcp_cork : 0; in wait_for_work()
2160 mutex_lock(&connection->data.mutex); in wait_for_work()
2161 if (connection->data.socket) { in wait_for_work()
2163 tcp_sock_set_cork(connection->data.socket->sk, true); in wait_for_work()
2165 tcp_sock_set_cork(connection->data.socket->sk, false); in wait_for_work()
2167 mutex_unlock(&connection->data.mutex); in wait_for_work()
2172 struct drbd_connection *connection = thi->connection; in drbd_worker()
2186 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { in drbd_worker()
2205 list_del_init(&w->list); in drbd_worker()
2206 update_worker_timing_details(connection, w->cb); in drbd_worker()
2207 if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0) in drbd_worker()
2209 if (connection->cstate >= C_WF_REPORT_PARAMS) in drbd_worker()
2215 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { in drbd_worker()
2221 list_del_init(&w->list); in drbd_worker()
2222 update_worker_timing_details(connection, w->cb); in drbd_worker()
2223 w->cb(w, 1); in drbd_worker()
2225 dequeue_work_batch(&connection->sender_work, &work_list); in drbd_worker()
2226 } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags)); in drbd_worker()
2229 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in drbd_worker()
2230 struct drbd_device *device = peer_device->device; in drbd_worker()
2231 D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); in drbd_worker()
2232 kref_get(&device->kref); in drbd_worker()
2235 kref_put(&device->kref, drbd_destroy_device); in drbd_worker()