1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "ctree.h"
20 #include "extent-tree.h"
21 #include "transaction.h"
22 #include "disk-io.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "locking.h"
27 #include "free-space-cache.h"
28 #include "free-space-tree.h"
29 #include "qgroup.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
33 #include "discard.h"
34 #include "zoned.h"
35 #include "dev-replace.h"
36 #include "fs.h"
37 #include "accessors.h"
38 #include "root-tree.h"
39 #include "file-item.h"
40 #include "orphan.h"
41 #include "tree-checker.h"
42 #include "raid-stripe-tree.h"
43 
44 #undef SCRAMBLE_DELAYED_REFS
45 
46 
47 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
48 			       struct btrfs_delayed_ref_head *href,
49 			       struct btrfs_delayed_ref_node *node,
50 			       struct btrfs_delayed_extent_op *extra_op);
51 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
52 				    struct extent_buffer *leaf,
53 				    struct btrfs_extent_item *ei);
54 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
55 				      u64 parent, u64 root_objectid,
56 				      u64 flags, u64 owner, u64 offset,
57 				      struct btrfs_key *ins, int ref_mod, u64 oref_root);
58 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
59 				     struct btrfs_delayed_ref_node *node,
60 				     struct btrfs_delayed_extent_op *extent_op);
61 static int find_next_key(struct btrfs_path *path, int level,
62 			 struct btrfs_key *key);
63 
block_group_bits(struct btrfs_block_group * cache,u64 bits)64 static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
65 {
66 	return (cache->flags & bits) == bits;
67 }
68 
69 /* simple helper to search for an existing data extent at a given offset */
btrfs_lookup_data_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len)70 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
71 {
72 	struct btrfs_root *root = btrfs_extent_root(fs_info, start);
73 	int ret;
74 	struct btrfs_key key;
75 	struct btrfs_path *path;
76 
77 	path = btrfs_alloc_path();
78 	if (!path)
79 		return -ENOMEM;
80 
81 	key.objectid = start;
82 	key.offset = len;
83 	key.type = BTRFS_EXTENT_ITEM_KEY;
84 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
85 	btrfs_free_path(path);
86 	return ret;
87 }
88 
89 /*
90  * helper function to lookup reference count and flags of a tree block.
91  *
92  * the head node for delayed ref is used to store the sum of all the
93  * reference count modifications queued up in the rbtree. the head
94  * node may also store the extent flags to set. This way you can check
95  * to see what the reference count and extent flags would be if all of
96  * the delayed refs are not processed.
97  */
btrfs_lookup_extent_info(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 offset,int metadata,u64 * refs,u64 * flags,u64 * owning_root)98 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
99 			     struct btrfs_fs_info *fs_info, u64 bytenr,
100 			     u64 offset, int metadata, u64 *refs, u64 *flags,
101 			     u64 *owning_root)
102 {
103 	struct btrfs_root *extent_root;
104 	struct btrfs_delayed_ref_head *head;
105 	struct btrfs_delayed_ref_root *delayed_refs;
106 	struct btrfs_path *path;
107 	struct btrfs_key key;
108 	u64 num_refs;
109 	u64 extent_flags;
110 	u64 owner = 0;
111 	int ret;
112 
113 	/*
114 	 * If we don't have skinny metadata, don't bother doing anything
115 	 * different
116 	 */
117 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
118 		offset = fs_info->nodesize;
119 		metadata = 0;
120 	}
121 
122 	path = btrfs_alloc_path();
123 	if (!path)
124 		return -ENOMEM;
125 
126 search_again:
127 	key.objectid = bytenr;
128 	key.offset = offset;
129 	if (metadata)
130 		key.type = BTRFS_METADATA_ITEM_KEY;
131 	else
132 		key.type = BTRFS_EXTENT_ITEM_KEY;
133 
134 	extent_root = btrfs_extent_root(fs_info, bytenr);
135 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
136 	if (ret < 0)
137 		goto out_free;
138 
139 	if (ret > 0 && key.type == BTRFS_METADATA_ITEM_KEY) {
140 		if (path->slots[0]) {
141 			path->slots[0]--;
142 			btrfs_item_key_to_cpu(path->nodes[0], &key,
143 					      path->slots[0]);
144 			if (key.objectid == bytenr &&
145 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
146 			    key.offset == fs_info->nodesize)
147 				ret = 0;
148 		}
149 	}
150 
151 	if (ret == 0) {
152 		struct extent_buffer *leaf = path->nodes[0];
153 		struct btrfs_extent_item *ei;
154 		const u32 item_size = btrfs_item_size(leaf, path->slots[0]);
155 
156 		if (unlikely(item_size < sizeof(*ei))) {
157 			ret = -EUCLEAN;
158 			btrfs_err(fs_info,
159 			"unexpected extent item size, has %u expect >= %zu",
160 				  item_size, sizeof(*ei));
161 			btrfs_abort_transaction(trans, ret);
162 			goto out_free;
163 		}
164 
165 		ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
166 		num_refs = btrfs_extent_refs(leaf, ei);
167 		if (unlikely(num_refs == 0)) {
168 			ret = -EUCLEAN;
169 			btrfs_err(fs_info,
170 		"unexpected zero reference count for extent item (%llu %u %llu)",
171 				  key.objectid, key.type, key.offset);
172 			btrfs_abort_transaction(trans, ret);
173 			goto out_free;
174 		}
175 		extent_flags = btrfs_extent_flags(leaf, ei);
176 		owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]);
177 	} else {
178 		num_refs = 0;
179 		extent_flags = 0;
180 		ret = 0;
181 	}
182 
183 	delayed_refs = &trans->transaction->delayed_refs;
184 	spin_lock(&delayed_refs->lock);
185 	head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
186 	if (head) {
187 		if (!mutex_trylock(&head->mutex)) {
188 			refcount_inc(&head->refs);
189 			spin_unlock(&delayed_refs->lock);
190 
191 			btrfs_release_path(path);
192 
193 			/*
194 			 * Mutex was contended, block until it's released and try
195 			 * again
196 			 */
197 			mutex_lock(&head->mutex);
198 			mutex_unlock(&head->mutex);
199 			btrfs_put_delayed_ref_head(head);
200 			goto search_again;
201 		}
202 		spin_lock(&head->lock);
203 		if (head->extent_op && head->extent_op->update_flags)
204 			extent_flags |= head->extent_op->flags_to_set;
205 
206 		num_refs += head->ref_mod;
207 		spin_unlock(&head->lock);
208 		mutex_unlock(&head->mutex);
209 	}
210 	spin_unlock(&delayed_refs->lock);
211 
212 	WARN_ON(num_refs == 0);
213 	if (refs)
214 		*refs = num_refs;
215 	if (flags)
216 		*flags = extent_flags;
217 	if (owning_root)
218 		*owning_root = owner;
219 out_free:
220 	btrfs_free_path(path);
221 	return ret;
222 }
223 
224 /*
225  * Back reference rules.  Back refs have three main goals:
226  *
227  * 1) differentiate between all holders of references to an extent so that
228  *    when a reference is dropped we can make sure it was a valid reference
229  *    before freeing the extent.
230  *
231  * 2) Provide enough information to quickly find the holders of an extent
232  *    if we notice a given block is corrupted or bad.
233  *
234  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
235  *    maintenance.  This is actually the same as #2, but with a slightly
236  *    different use case.
237  *
238  * There are two kinds of back refs. The implicit back refs is optimized
239  * for pointers in non-shared tree blocks. For a given pointer in a block,
240  * back refs of this kind provide information about the block's owner tree
241  * and the pointer's key. These information allow us to find the block by
242  * b-tree searching. The full back refs is for pointers in tree blocks not
243  * referenced by their owner trees. The location of tree block is recorded
244  * in the back refs. Actually the full back refs is generic, and can be
245  * used in all cases the implicit back refs is used. The major shortcoming
246  * of the full back refs is its overhead. Every time a tree block gets
247  * COWed, we have to update back refs entry for all pointers in it.
248  *
249  * For a newly allocated tree block, we use implicit back refs for
250  * pointers in it. This means most tree related operations only involve
251  * implicit back refs. For a tree block created in old transaction, the
252  * only way to drop a reference to it is COW it. So we can detect the
253  * event that tree block loses its owner tree's reference and do the
254  * back refs conversion.
255  *
256  * When a tree block is COWed through a tree, there are four cases:
257  *
258  * The reference count of the block is one and the tree is the block's
259  * owner tree. Nothing to do in this case.
260  *
261  * The reference count of the block is one and the tree is not the
262  * block's owner tree. In this case, full back refs is used for pointers
263  * in the block. Remove these full back refs, add implicit back refs for
264  * every pointers in the new block.
265  *
266  * The reference count of the block is greater than one and the tree is
267  * the block's owner tree. In this case, implicit back refs is used for
268  * pointers in the block. Add full back refs for every pointers in the
269  * block, increase lower level extents' reference counts. The original
270  * implicit back refs are entailed to the new block.
271  *
272  * The reference count of the block is greater than one and the tree is
273  * not the block's owner tree. Add implicit back refs for every pointer in
274  * the new block, increase lower level extents' reference count.
275  *
276  * Back Reference Key composing:
277  *
278  * The key objectid corresponds to the first byte in the extent,
279  * The key type is used to differentiate between types of back refs.
280  * There are different meanings of the key offset for different types
281  * of back refs.
282  *
283  * File extents can be referenced by:
284  *
285  * - multiple snapshots, subvolumes, or different generations in one subvol
286  * - different files inside a single subvolume
287  * - different offsets inside a file (bookend extents in file.c)
288  *
289  * The extent ref structure for the implicit back refs has fields for:
290  *
291  * - Objectid of the subvolume root
292  * - objectid of the file holding the reference
293  * - original offset in the file
294  * - how many bookend extents
295  *
296  * The key offset for the implicit back refs is hash of the first
297  * three fields.
298  *
299  * The extent ref structure for the full back refs has field for:
300  *
301  * - number of pointers in the tree leaf
302  *
303  * The key offset for the implicit back refs is the first byte of
304  * the tree leaf
305  *
306  * When a file extent is allocated, The implicit back refs is used.
307  * the fields are filled in:
308  *
309  *     (root_key.objectid, inode objectid, offset in file, 1)
310  *
311  * When a file extent is removed file truncation, we find the
312  * corresponding implicit back refs and check the following fields:
313  *
314  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
315  *
316  * Btree extents can be referenced by:
317  *
318  * - Different subvolumes
319  *
320  * Both the implicit back refs and the full back refs for tree blocks
321  * only consist of key. The key offset for the implicit back refs is
322  * objectid of block's owner tree. The key offset for the full back refs
323  * is the first byte of parent block.
324  *
325  * When implicit back refs is used, information about the lowest key and
326  * level of the tree block are required. These information are stored in
327  * tree block info structure.
328  */
329 
330 /*
331  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
332  * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
333  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
334  */
btrfs_get_extent_inline_ref_type(const struct extent_buffer * eb,struct btrfs_extent_inline_ref * iref,enum btrfs_inline_ref_type is_data)335 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
336 				     struct btrfs_extent_inline_ref *iref,
337 				     enum btrfs_inline_ref_type is_data)
338 {
339 	struct btrfs_fs_info *fs_info = eb->fs_info;
340 	int type = btrfs_extent_inline_ref_type(eb, iref);
341 	u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
342 
343 	if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
344 		ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
345 		return type;
346 	}
347 
348 	if (type == BTRFS_TREE_BLOCK_REF_KEY ||
349 	    type == BTRFS_SHARED_BLOCK_REF_KEY ||
350 	    type == BTRFS_SHARED_DATA_REF_KEY ||
351 	    type == BTRFS_EXTENT_DATA_REF_KEY) {
352 		if (is_data == BTRFS_REF_TYPE_BLOCK) {
353 			if (type == BTRFS_TREE_BLOCK_REF_KEY)
354 				return type;
355 			if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
356 				ASSERT(fs_info);
357 				/*
358 				 * Every shared one has parent tree block,
359 				 * which must be aligned to sector size.
360 				 */
361 				if (offset && IS_ALIGNED(offset, fs_info->sectorsize))
362 					return type;
363 			}
364 		} else if (is_data == BTRFS_REF_TYPE_DATA) {
365 			if (type == BTRFS_EXTENT_DATA_REF_KEY)
366 				return type;
367 			if (type == BTRFS_SHARED_DATA_REF_KEY) {
368 				ASSERT(fs_info);
369 				/*
370 				 * Every shared one has parent tree block,
371 				 * which must be aligned to sector size.
372 				 */
373 				if (offset &&
374 				    IS_ALIGNED(offset, fs_info->sectorsize))
375 					return type;
376 			}
377 		} else {
378 			ASSERT(is_data == BTRFS_REF_TYPE_ANY);
379 			return type;
380 		}
381 	}
382 
383 	WARN_ON(1);
384 	btrfs_print_leaf(eb);
385 	btrfs_err(fs_info,
386 		  "eb %llu iref 0x%lx invalid extent inline ref type %d",
387 		  eb->start, (unsigned long)iref, type);
388 
389 	return BTRFS_REF_TYPE_INVALID;
390 }
391 
hash_extent_data_ref(u64 root_objectid,u64 owner,u64 offset)392 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
393 {
394 	u32 high_crc = ~(u32)0;
395 	u32 low_crc = ~(u32)0;
396 	__le64 lenum;
397 
398 	lenum = cpu_to_le64(root_objectid);
399 	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
400 	lenum = cpu_to_le64(owner);
401 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
402 	lenum = cpu_to_le64(offset);
403 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
404 
405 	return ((u64)high_crc << 31) ^ (u64)low_crc;
406 }
407 
hash_extent_data_ref_item(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref)408 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
409 				     struct btrfs_extent_data_ref *ref)
410 {
411 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
412 				    btrfs_extent_data_ref_objectid(leaf, ref),
413 				    btrfs_extent_data_ref_offset(leaf, ref));
414 }
415 
match_extent_data_ref(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref,u64 root_objectid,u64 owner,u64 offset)416 static int match_extent_data_ref(struct extent_buffer *leaf,
417 				 struct btrfs_extent_data_ref *ref,
418 				 u64 root_objectid, u64 owner, u64 offset)
419 {
420 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
421 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
422 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
423 		return 0;
424 	return 1;
425 }
426 
lookup_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset)427 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
428 					   struct btrfs_path *path,
429 					   u64 bytenr, u64 parent,
430 					   u64 root_objectid,
431 					   u64 owner, u64 offset)
432 {
433 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
434 	struct btrfs_key key;
435 	struct btrfs_extent_data_ref *ref;
436 	struct extent_buffer *leaf;
437 	u32 nritems;
438 	int recow;
439 	int ret;
440 
441 	key.objectid = bytenr;
442 	if (parent) {
443 		key.type = BTRFS_SHARED_DATA_REF_KEY;
444 		key.offset = parent;
445 	} else {
446 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
447 		key.offset = hash_extent_data_ref(root_objectid,
448 						  owner, offset);
449 	}
450 again:
451 	recow = 0;
452 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
453 	if (ret < 0)
454 		return ret;
455 
456 	if (parent) {
457 		if (ret)
458 			return -ENOENT;
459 		return 0;
460 	}
461 
462 	ret = -ENOENT;
463 	leaf = path->nodes[0];
464 	nritems = btrfs_header_nritems(leaf);
465 	while (1) {
466 		if (path->slots[0] >= nritems) {
467 			ret = btrfs_next_leaf(root, path);
468 			if (ret) {
469 				if (ret > 0)
470 					return -ENOENT;
471 				return ret;
472 			}
473 
474 			leaf = path->nodes[0];
475 			nritems = btrfs_header_nritems(leaf);
476 			recow = 1;
477 		}
478 
479 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
480 		if (key.objectid != bytenr ||
481 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
482 			goto fail;
483 
484 		ref = btrfs_item_ptr(leaf, path->slots[0],
485 				     struct btrfs_extent_data_ref);
486 
487 		if (match_extent_data_ref(leaf, ref, root_objectid,
488 					  owner, offset)) {
489 			if (recow) {
490 				btrfs_release_path(path);
491 				goto again;
492 			}
493 			ret = 0;
494 			break;
495 		}
496 		path->slots[0]++;
497 	}
498 fail:
499 	return ret;
500 }
501 
insert_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_ref_node * node,u64 bytenr)502 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
503 					   struct btrfs_path *path,
504 					   struct btrfs_delayed_ref_node *node,
505 					   u64 bytenr)
506 {
507 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
508 	struct btrfs_key key;
509 	struct extent_buffer *leaf;
510 	u64 owner = btrfs_delayed_ref_owner(node);
511 	u64 offset = btrfs_delayed_ref_offset(node);
512 	u32 size;
513 	u32 num_refs;
514 	int ret;
515 
516 	key.objectid = bytenr;
517 	if (node->parent) {
518 		key.type = BTRFS_SHARED_DATA_REF_KEY;
519 		key.offset = node->parent;
520 		size = sizeof(struct btrfs_shared_data_ref);
521 	} else {
522 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
523 		key.offset = hash_extent_data_ref(node->ref_root, owner, offset);
524 		size = sizeof(struct btrfs_extent_data_ref);
525 	}
526 
527 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
528 	if (ret && ret != -EEXIST)
529 		goto fail;
530 
531 	leaf = path->nodes[0];
532 	if (node->parent) {
533 		struct btrfs_shared_data_ref *ref;
534 		ref = btrfs_item_ptr(leaf, path->slots[0],
535 				     struct btrfs_shared_data_ref);
536 		if (ret == 0) {
537 			btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod);
538 		} else {
539 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
540 			num_refs += node->ref_mod;
541 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
542 		}
543 	} else {
544 		struct btrfs_extent_data_ref *ref;
545 		while (ret == -EEXIST) {
546 			ref = btrfs_item_ptr(leaf, path->slots[0],
547 					     struct btrfs_extent_data_ref);
548 			if (match_extent_data_ref(leaf, ref, node->ref_root,
549 						  owner, offset))
550 				break;
551 			btrfs_release_path(path);
552 			key.offset++;
553 			ret = btrfs_insert_empty_item(trans, root, path, &key,
554 						      size);
555 			if (ret && ret != -EEXIST)
556 				goto fail;
557 
558 			leaf = path->nodes[0];
559 		}
560 		ref = btrfs_item_ptr(leaf, path->slots[0],
561 				     struct btrfs_extent_data_ref);
562 		if (ret == 0) {
563 			btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root);
564 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
565 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
566 			btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod);
567 		} else {
568 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
569 			num_refs += node->ref_mod;
570 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
571 		}
572 	}
573 	ret = 0;
574 fail:
575 	btrfs_release_path(path);
576 	return ret;
577 }
578 
remove_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int refs_to_drop)579 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
580 					   struct btrfs_root *root,
581 					   struct btrfs_path *path,
582 					   int refs_to_drop)
583 {
584 	struct btrfs_key key;
585 	struct btrfs_extent_data_ref *ref1 = NULL;
586 	struct btrfs_shared_data_ref *ref2 = NULL;
587 	struct extent_buffer *leaf;
588 	u32 num_refs = 0;
589 	int ret = 0;
590 
591 	leaf = path->nodes[0];
592 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
593 
594 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
595 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
596 				      struct btrfs_extent_data_ref);
597 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
598 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
599 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
600 				      struct btrfs_shared_data_ref);
601 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
602 	} else {
603 		btrfs_err(trans->fs_info,
604 			  "unrecognized backref key (%llu %u %llu)",
605 			  key.objectid, key.type, key.offset);
606 		btrfs_abort_transaction(trans, -EUCLEAN);
607 		return -EUCLEAN;
608 	}
609 
610 	BUG_ON(num_refs < refs_to_drop);
611 	num_refs -= refs_to_drop;
612 
613 	if (num_refs == 0) {
614 		ret = btrfs_del_item(trans, root, path);
615 	} else {
616 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
617 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
618 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
619 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
620 	}
621 	return ret;
622 }
623 
extent_data_ref_count(struct btrfs_path * path,struct btrfs_extent_inline_ref * iref)624 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
625 					  struct btrfs_extent_inline_ref *iref)
626 {
627 	struct btrfs_key key;
628 	struct extent_buffer *leaf;
629 	struct btrfs_extent_data_ref *ref1;
630 	struct btrfs_shared_data_ref *ref2;
631 	u32 num_refs = 0;
632 	int type;
633 
634 	leaf = path->nodes[0];
635 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
636 
637 	if (iref) {
638 		/*
639 		 * If type is invalid, we should have bailed out earlier than
640 		 * this call.
641 		 */
642 		type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
643 		ASSERT(type != BTRFS_REF_TYPE_INVALID);
644 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
645 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
646 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
647 		} else {
648 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
649 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
650 		}
651 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
652 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
653 				      struct btrfs_extent_data_ref);
654 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
655 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
656 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
657 				      struct btrfs_shared_data_ref);
658 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
659 	} else {
660 		WARN_ON(1);
661 	}
662 	return num_refs;
663 }
664 
lookup_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)665 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
666 					  struct btrfs_path *path,
667 					  u64 bytenr, u64 parent,
668 					  u64 root_objectid)
669 {
670 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
671 	struct btrfs_key key;
672 	int ret;
673 
674 	key.objectid = bytenr;
675 	if (parent) {
676 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
677 		key.offset = parent;
678 	} else {
679 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
680 		key.offset = root_objectid;
681 	}
682 
683 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
684 	if (ret > 0)
685 		ret = -ENOENT;
686 	return ret;
687 }
688 
insert_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_ref_node * node,u64 bytenr)689 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
690 					  struct btrfs_path *path,
691 					  struct btrfs_delayed_ref_node *node,
692 					  u64 bytenr)
693 {
694 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
695 	struct btrfs_key key;
696 	int ret;
697 
698 	key.objectid = bytenr;
699 	if (node->parent) {
700 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
701 		key.offset = node->parent;
702 	} else {
703 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
704 		key.offset = node->ref_root;
705 	}
706 
707 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
708 	btrfs_release_path(path);
709 	return ret;
710 }
711 
extent_ref_type(u64 parent,u64 owner)712 static inline int extent_ref_type(u64 parent, u64 owner)
713 {
714 	int type;
715 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
716 		if (parent > 0)
717 			type = BTRFS_SHARED_BLOCK_REF_KEY;
718 		else
719 			type = BTRFS_TREE_BLOCK_REF_KEY;
720 	} else {
721 		if (parent > 0)
722 			type = BTRFS_SHARED_DATA_REF_KEY;
723 		else
724 			type = BTRFS_EXTENT_DATA_REF_KEY;
725 	}
726 	return type;
727 }
728 
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)729 static int find_next_key(struct btrfs_path *path, int level,
730 			 struct btrfs_key *key)
731 
732 {
733 	for (; level < BTRFS_MAX_LEVEL; level++) {
734 		if (!path->nodes[level])
735 			break;
736 		if (path->slots[level] + 1 >=
737 		    btrfs_header_nritems(path->nodes[level]))
738 			continue;
739 		if (level == 0)
740 			btrfs_item_key_to_cpu(path->nodes[level], key,
741 					      path->slots[level] + 1);
742 		else
743 			btrfs_node_key_to_cpu(path->nodes[level], key,
744 					      path->slots[level] + 1);
745 		return 0;
746 	}
747 	return 1;
748 }
749 
750 /*
751  * look for inline back ref. if back ref is found, *ref_ret is set
752  * to the address of inline back ref, and 0 is returned.
753  *
754  * if back ref isn't found, *ref_ret is set to the address where it
755  * should be inserted, and -ENOENT is returned.
756  *
757  * if insert is true and there are too many inline back refs, the path
758  * points to the extent item, and -EAGAIN is returned.
759  *
760  * NOTE: inline back refs are ordered in the same way that back ref
761  *	 items in the tree are ordered.
762  */
763 static noinline_for_stack
lookup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int insert)764 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
765 				 struct btrfs_path *path,
766 				 struct btrfs_extent_inline_ref **ref_ret,
767 				 u64 bytenr, u64 num_bytes,
768 				 u64 parent, u64 root_objectid,
769 				 u64 owner, u64 offset, int insert)
770 {
771 	struct btrfs_fs_info *fs_info = trans->fs_info;
772 	struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
773 	struct btrfs_key key;
774 	struct extent_buffer *leaf;
775 	struct btrfs_extent_item *ei;
776 	struct btrfs_extent_inline_ref *iref;
777 	u64 flags;
778 	u64 item_size;
779 	unsigned long ptr;
780 	unsigned long end;
781 	int extra_size;
782 	int type;
783 	int want;
784 	int ret;
785 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
786 	int needed;
787 
788 	key.objectid = bytenr;
789 	key.type = BTRFS_EXTENT_ITEM_KEY;
790 	key.offset = num_bytes;
791 
792 	want = extent_ref_type(parent, owner);
793 	if (insert) {
794 		extra_size = btrfs_extent_inline_ref_size(want);
795 		path->search_for_extension = 1;
796 	} else
797 		extra_size = -1;
798 
799 	/*
800 	 * Owner is our level, so we can just add one to get the level for the
801 	 * block we are interested in.
802 	 */
803 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
804 		key.type = BTRFS_METADATA_ITEM_KEY;
805 		key.offset = owner;
806 	}
807 
808 again:
809 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
810 	if (ret < 0)
811 		goto out;
812 
813 	/*
814 	 * We may be a newly converted file system which still has the old fat
815 	 * extent entries for metadata, so try and see if we have one of those.
816 	 */
817 	if (ret > 0 && skinny_metadata) {
818 		skinny_metadata = false;
819 		if (path->slots[0]) {
820 			path->slots[0]--;
821 			btrfs_item_key_to_cpu(path->nodes[0], &key,
822 					      path->slots[0]);
823 			if (key.objectid == bytenr &&
824 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
825 			    key.offset == num_bytes)
826 				ret = 0;
827 		}
828 		if (ret) {
829 			key.objectid = bytenr;
830 			key.type = BTRFS_EXTENT_ITEM_KEY;
831 			key.offset = num_bytes;
832 			btrfs_release_path(path);
833 			goto again;
834 		}
835 	}
836 
837 	if (ret && !insert) {
838 		ret = -ENOENT;
839 		goto out;
840 	} else if (WARN_ON(ret)) {
841 		btrfs_print_leaf(path->nodes[0]);
842 		btrfs_err(fs_info,
843 "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
844 			  bytenr, num_bytes, parent, root_objectid, owner,
845 			  offset);
846 		ret = -EUCLEAN;
847 		goto out;
848 	}
849 
850 	leaf = path->nodes[0];
851 	item_size = btrfs_item_size(leaf, path->slots[0]);
852 	if (unlikely(item_size < sizeof(*ei))) {
853 		ret = -EUCLEAN;
854 		btrfs_err(fs_info,
855 			  "unexpected extent item size, has %llu expect >= %zu",
856 			  item_size, sizeof(*ei));
857 		btrfs_abort_transaction(trans, ret);
858 		goto out;
859 	}
860 
861 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
862 	flags = btrfs_extent_flags(leaf, ei);
863 
864 	ptr = (unsigned long)(ei + 1);
865 	end = (unsigned long)ei + item_size;
866 
867 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
868 		ptr += sizeof(struct btrfs_tree_block_info);
869 		BUG_ON(ptr > end);
870 	}
871 
872 	if (owner >= BTRFS_FIRST_FREE_OBJECTID)
873 		needed = BTRFS_REF_TYPE_DATA;
874 	else
875 		needed = BTRFS_REF_TYPE_BLOCK;
876 
877 	ret = -ENOENT;
878 	while (ptr < end) {
879 		iref = (struct btrfs_extent_inline_ref *)ptr;
880 		type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
881 		if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
882 			ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
883 			ptr += btrfs_extent_inline_ref_size(type);
884 			continue;
885 		}
886 		if (type == BTRFS_REF_TYPE_INVALID) {
887 			ret = -EUCLEAN;
888 			goto out;
889 		}
890 
891 		if (want < type)
892 			break;
893 		if (want > type) {
894 			ptr += btrfs_extent_inline_ref_size(type);
895 			continue;
896 		}
897 
898 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
899 			struct btrfs_extent_data_ref *dref;
900 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
901 			if (match_extent_data_ref(leaf, dref, root_objectid,
902 						  owner, offset)) {
903 				ret = 0;
904 				break;
905 			}
906 			if (hash_extent_data_ref_item(leaf, dref) <
907 			    hash_extent_data_ref(root_objectid, owner, offset))
908 				break;
909 		} else {
910 			u64 ref_offset;
911 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
912 			if (parent > 0) {
913 				if (parent == ref_offset) {
914 					ret = 0;
915 					break;
916 				}
917 				if (ref_offset < parent)
918 					break;
919 			} else {
920 				if (root_objectid == ref_offset) {
921 					ret = 0;
922 					break;
923 				}
924 				if (ref_offset < root_objectid)
925 					break;
926 			}
927 		}
928 		ptr += btrfs_extent_inline_ref_size(type);
929 	}
930 
931 	if (unlikely(ptr > end)) {
932 		ret = -EUCLEAN;
933 		btrfs_print_leaf(path->nodes[0]);
934 		btrfs_crit(fs_info,
935 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu",
936 			   path->slots[0], root_objectid, owner, offset, parent);
937 		goto out;
938 	}
939 
940 	if (ret == -ENOENT && insert) {
941 		if (item_size + extra_size >=
942 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
943 			ret = -EAGAIN;
944 			goto out;
945 		}
946 
947 		if (path->slots[0] + 1 < btrfs_header_nritems(path->nodes[0])) {
948 			struct btrfs_key tmp_key;
949 
950 			btrfs_item_key_to_cpu(path->nodes[0], &tmp_key, path->slots[0] + 1);
951 			if (tmp_key.objectid == bytenr &&
952 			    tmp_key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
953 				ret = -EAGAIN;
954 				goto out;
955 			}
956 			goto out_no_entry;
957 		}
958 
959 		if (!path->keep_locks) {
960 			btrfs_release_path(path);
961 			path->keep_locks = 1;
962 			goto again;
963 		}
964 
965 		/*
966 		 * To add new inline back ref, we have to make sure
967 		 * there is no corresponding back ref item.
968 		 * For simplicity, we just do not add new inline back
969 		 * ref if there is any kind of item for this block
970 		 */
971 		if (find_next_key(path, 0, &key) == 0 &&
972 		    key.objectid == bytenr &&
973 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
974 			ret = -EAGAIN;
975 			goto out;
976 		}
977 	}
978 out_no_entry:
979 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
980 out:
981 	if (path->keep_locks) {
982 		path->keep_locks = 0;
983 		btrfs_unlock_up_safe(path, 1);
984 	}
985 	if (insert)
986 		path->search_for_extension = 0;
987 	return ret;
988 }
989 
990 /*
991  * helper to add new inline back ref
992  */
993 static noinline_for_stack
setup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)994 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
995 				 struct btrfs_path *path,
996 				 struct btrfs_extent_inline_ref *iref,
997 				 u64 parent, u64 root_objectid,
998 				 u64 owner, u64 offset, int refs_to_add,
999 				 struct btrfs_delayed_extent_op *extent_op)
1000 {
1001 	struct extent_buffer *leaf;
1002 	struct btrfs_extent_item *ei;
1003 	unsigned long ptr;
1004 	unsigned long end;
1005 	unsigned long item_offset;
1006 	u64 refs;
1007 	int size;
1008 	int type;
1009 
1010 	leaf = path->nodes[0];
1011 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1012 	item_offset = (unsigned long)iref - (unsigned long)ei;
1013 
1014 	type = extent_ref_type(parent, owner);
1015 	size = btrfs_extent_inline_ref_size(type);
1016 
1017 	btrfs_extend_item(trans, path, size);
1018 
1019 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1020 	refs = btrfs_extent_refs(leaf, ei);
1021 	refs += refs_to_add;
1022 	btrfs_set_extent_refs(leaf, ei, refs);
1023 	if (extent_op)
1024 		__run_delayed_extent_op(extent_op, leaf, ei);
1025 
1026 	ptr = (unsigned long)ei + item_offset;
1027 	end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]);
1028 	if (ptr < end - size)
1029 		memmove_extent_buffer(leaf, ptr + size, ptr,
1030 				      end - size - ptr);
1031 
1032 	iref = (struct btrfs_extent_inline_ref *)ptr;
1033 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1034 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1035 		struct btrfs_extent_data_ref *dref;
1036 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1037 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1038 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1039 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1040 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1041 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1042 		struct btrfs_shared_data_ref *sref;
1043 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1044 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1045 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1046 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1047 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1048 	} else {
1049 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1050 	}
1051 }
1052 
lookup_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)1053 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1054 				 struct btrfs_path *path,
1055 				 struct btrfs_extent_inline_ref **ref_ret,
1056 				 u64 bytenr, u64 num_bytes, u64 parent,
1057 				 u64 root_objectid, u64 owner, u64 offset)
1058 {
1059 	int ret;
1060 
1061 	ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1062 					   num_bytes, parent, root_objectid,
1063 					   owner, offset, 0);
1064 	if (ret != -ENOENT)
1065 		return ret;
1066 
1067 	btrfs_release_path(path);
1068 	*ref_ret = NULL;
1069 
1070 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1071 		ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1072 					    root_objectid);
1073 	} else {
1074 		ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1075 					     root_objectid, owner, offset);
1076 	}
1077 	return ret;
1078 }
1079 
1080 /*
1081  * helper to update/remove inline back ref
1082  */
update_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_mod,struct btrfs_delayed_extent_op * extent_op)1083 static noinline_for_stack int update_inline_extent_backref(
1084 				  struct btrfs_trans_handle *trans,
1085 				  struct btrfs_path *path,
1086 				  struct btrfs_extent_inline_ref *iref,
1087 				  int refs_to_mod,
1088 				  struct btrfs_delayed_extent_op *extent_op)
1089 {
1090 	struct extent_buffer *leaf = path->nodes[0];
1091 	struct btrfs_fs_info *fs_info = leaf->fs_info;
1092 	struct btrfs_extent_item *ei;
1093 	struct btrfs_extent_data_ref *dref = NULL;
1094 	struct btrfs_shared_data_ref *sref = NULL;
1095 	unsigned long ptr;
1096 	unsigned long end;
1097 	u32 item_size;
1098 	int size;
1099 	int type;
1100 	u64 refs;
1101 
1102 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1103 	refs = btrfs_extent_refs(leaf, ei);
1104 	if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
1105 		struct btrfs_key key;
1106 		u32 extent_size;
1107 
1108 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1109 		if (key.type == BTRFS_METADATA_ITEM_KEY)
1110 			extent_size = fs_info->nodesize;
1111 		else
1112 			extent_size = key.offset;
1113 		btrfs_print_leaf(leaf);
1114 		btrfs_err(fs_info,
1115 	"invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
1116 			  key.objectid, extent_size, refs_to_mod, refs);
1117 		return -EUCLEAN;
1118 	}
1119 	refs += refs_to_mod;
1120 	btrfs_set_extent_refs(leaf, ei, refs);
1121 	if (extent_op)
1122 		__run_delayed_extent_op(extent_op, leaf, ei);
1123 
1124 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1125 	/*
1126 	 * Function btrfs_get_extent_inline_ref_type() has already printed
1127 	 * error messages.
1128 	 */
1129 	if (unlikely(type == BTRFS_REF_TYPE_INVALID))
1130 		return -EUCLEAN;
1131 
1132 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1133 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1134 		refs = btrfs_extent_data_ref_count(leaf, dref);
1135 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1136 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1137 		refs = btrfs_shared_data_ref_count(leaf, sref);
1138 	} else {
1139 		refs = 1;
1140 		/*
1141 		 * For tree blocks we can only drop one ref for it, and tree
1142 		 * blocks should not have refs > 1.
1143 		 *
1144 		 * Furthermore if we're inserting a new inline backref, we
1145 		 * won't reach this path either. That would be
1146 		 * setup_inline_extent_backref().
1147 		 */
1148 		if (unlikely(refs_to_mod != -1)) {
1149 			struct btrfs_key key;
1150 
1151 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1152 
1153 			btrfs_print_leaf(leaf);
1154 			btrfs_err(fs_info,
1155 			"invalid refs_to_mod for tree block %llu, has %d expect -1",
1156 				  key.objectid, refs_to_mod);
1157 			return -EUCLEAN;
1158 		}
1159 	}
1160 
1161 	if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
1162 		struct btrfs_key key;
1163 		u32 extent_size;
1164 
1165 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1166 		if (key.type == BTRFS_METADATA_ITEM_KEY)
1167 			extent_size = fs_info->nodesize;
1168 		else
1169 			extent_size = key.offset;
1170 		btrfs_print_leaf(leaf);
1171 		btrfs_err(fs_info,
1172 "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
1173 			  (unsigned long)iref, key.objectid, extent_size,
1174 			  refs_to_mod, refs);
1175 		return -EUCLEAN;
1176 	}
1177 	refs += refs_to_mod;
1178 
1179 	if (refs > 0) {
1180 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1181 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1182 		else
1183 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1184 	} else {
1185 		size =  btrfs_extent_inline_ref_size(type);
1186 		item_size = btrfs_item_size(leaf, path->slots[0]);
1187 		ptr = (unsigned long)iref;
1188 		end = (unsigned long)ei + item_size;
1189 		if (ptr + size < end)
1190 			memmove_extent_buffer(leaf, ptr, ptr + size,
1191 					      end - ptr - size);
1192 		item_size -= size;
1193 		btrfs_truncate_item(trans, path, item_size, 1);
1194 	}
1195 	return 0;
1196 }
1197 
1198 static noinline_for_stack
insert_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1199 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1200 				 struct btrfs_path *path,
1201 				 u64 bytenr, u64 num_bytes, u64 parent,
1202 				 u64 root_objectid, u64 owner,
1203 				 u64 offset, int refs_to_add,
1204 				 struct btrfs_delayed_extent_op *extent_op)
1205 {
1206 	struct btrfs_extent_inline_ref *iref;
1207 	int ret;
1208 
1209 	ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1210 					   num_bytes, parent, root_objectid,
1211 					   owner, offset, 1);
1212 	if (ret == 0) {
1213 		/*
1214 		 * We're adding refs to a tree block we already own, this
1215 		 * should not happen at all.
1216 		 */
1217 		if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1218 			btrfs_print_leaf(path->nodes[0]);
1219 			btrfs_crit(trans->fs_info,
1220 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u",
1221 				   bytenr, num_bytes, root_objectid, path->slots[0]);
1222 			return -EUCLEAN;
1223 		}
1224 		ret = update_inline_extent_backref(trans, path, iref,
1225 						   refs_to_add, extent_op);
1226 	} else if (ret == -ENOENT) {
1227 		setup_inline_extent_backref(trans, path, iref, parent,
1228 					    root_objectid, owner, offset,
1229 					    refs_to_add, extent_op);
1230 		ret = 0;
1231 	}
1232 	return ret;
1233 }
1234 
remove_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_drop,int is_data)1235 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1236 				 struct btrfs_root *root,
1237 				 struct btrfs_path *path,
1238 				 struct btrfs_extent_inline_ref *iref,
1239 				 int refs_to_drop, int is_data)
1240 {
1241 	int ret = 0;
1242 
1243 	BUG_ON(!is_data && refs_to_drop != 1);
1244 	if (iref)
1245 		ret = update_inline_extent_backref(trans, path, iref,
1246 						   -refs_to_drop, NULL);
1247 	else if (is_data)
1248 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1249 	else
1250 		ret = btrfs_del_item(trans, root, path);
1251 	return ret;
1252 }
1253 
btrfs_issue_discard(struct block_device * bdev,u64 start,u64 len,u64 * discarded_bytes)1254 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1255 			       u64 *discarded_bytes)
1256 {
1257 	int j, ret = 0;
1258 	u64 bytes_left, end;
1259 	u64 aligned_start = ALIGN(start, SECTOR_SIZE);
1260 
1261 	/* Adjust the range to be aligned to 512B sectors if necessary. */
1262 	if (start != aligned_start) {
1263 		len -= aligned_start - start;
1264 		len = round_down(len, SECTOR_SIZE);
1265 		start = aligned_start;
1266 	}
1267 
1268 	*discarded_bytes = 0;
1269 
1270 	if (!len)
1271 		return 0;
1272 
1273 	end = start + len;
1274 	bytes_left = len;
1275 
1276 	/* Skip any superblocks on this device. */
1277 	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1278 		u64 sb_start = btrfs_sb_offset(j);
1279 		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1280 		u64 size = sb_start - start;
1281 
1282 		if (!in_range(sb_start, start, bytes_left) &&
1283 		    !in_range(sb_end, start, bytes_left) &&
1284 		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1285 			continue;
1286 
1287 		/*
1288 		 * Superblock spans beginning of range.  Adjust start and
1289 		 * try again.
1290 		 */
1291 		if (sb_start <= start) {
1292 			start += sb_end - start;
1293 			if (start > end) {
1294 				bytes_left = 0;
1295 				break;
1296 			}
1297 			bytes_left = end - start;
1298 			continue;
1299 		}
1300 
1301 		if (size) {
1302 			ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1303 						   size >> SECTOR_SHIFT,
1304 						   GFP_NOFS);
1305 			if (!ret)
1306 				*discarded_bytes += size;
1307 			else if (ret != -EOPNOTSUPP)
1308 				return ret;
1309 		}
1310 
1311 		start = sb_end;
1312 		if (start > end) {
1313 			bytes_left = 0;
1314 			break;
1315 		}
1316 		bytes_left = end - start;
1317 	}
1318 
1319 	while (bytes_left) {
1320 		u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left);
1321 
1322 		ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1323 					   bytes_to_discard >> SECTOR_SHIFT,
1324 					   GFP_NOFS);
1325 
1326 		if (ret) {
1327 			if (ret != -EOPNOTSUPP)
1328 				break;
1329 			continue;
1330 		}
1331 
1332 		start += bytes_to_discard;
1333 		bytes_left -= bytes_to_discard;
1334 		*discarded_bytes += bytes_to_discard;
1335 
1336 		if (btrfs_trim_interrupted()) {
1337 			ret = -ERESTARTSYS;
1338 			break;
1339 		}
1340 	}
1341 
1342 	return ret;
1343 }
1344 
do_discard_extent(struct btrfs_discard_stripe * stripe,u64 * bytes)1345 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes)
1346 {
1347 	struct btrfs_device *dev = stripe->dev;
1348 	struct btrfs_fs_info *fs_info = dev->fs_info;
1349 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1350 	u64 phys = stripe->physical;
1351 	u64 len = stripe->length;
1352 	u64 discarded = 0;
1353 	int ret = 0;
1354 
1355 	/* Zone reset on a zoned filesystem */
1356 	if (btrfs_can_zone_reset(dev, phys, len)) {
1357 		u64 src_disc;
1358 
1359 		ret = btrfs_reset_device_zone(dev, phys, len, &discarded);
1360 		if (ret)
1361 			goto out;
1362 
1363 		if (!btrfs_dev_replace_is_ongoing(dev_replace) ||
1364 		    dev != dev_replace->srcdev)
1365 			goto out;
1366 
1367 		src_disc = discarded;
1368 
1369 		/* Send to replace target as well */
1370 		ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
1371 					      &discarded);
1372 		discarded += src_disc;
1373 	} else if (bdev_max_discard_sectors(stripe->dev->bdev)) {
1374 		ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
1375 	} else {
1376 		ret = 0;
1377 		*bytes = 0;
1378 	}
1379 
1380 out:
1381 	*bytes = discarded;
1382 	return ret;
1383 }
1384 
btrfs_discard_extent(struct btrfs_fs_info * fs_info,u64 bytenr,u64 num_bytes,u64 * actual_bytes)1385 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1386 			 u64 num_bytes, u64 *actual_bytes)
1387 {
1388 	int ret = 0;
1389 	u64 discarded_bytes = 0;
1390 	u64 end = bytenr + num_bytes;
1391 	u64 cur = bytenr;
1392 
1393 	/*
1394 	 * Avoid races with device replace and make sure the devices in the
1395 	 * stripes don't go away while we are discarding.
1396 	 */
1397 	btrfs_bio_counter_inc_blocked(fs_info);
1398 	while (cur < end) {
1399 		struct btrfs_discard_stripe *stripes;
1400 		unsigned int num_stripes;
1401 		int i;
1402 
1403 		num_bytes = end - cur;
1404 		stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes);
1405 		if (IS_ERR(stripes)) {
1406 			ret = PTR_ERR(stripes);
1407 			if (ret == -EOPNOTSUPP)
1408 				ret = 0;
1409 			break;
1410 		}
1411 
1412 		for (i = 0; i < num_stripes; i++) {
1413 			struct btrfs_discard_stripe *stripe = stripes + i;
1414 			u64 bytes;
1415 
1416 			if (!stripe->dev->bdev) {
1417 				ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1418 				continue;
1419 			}
1420 
1421 			if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
1422 					&stripe->dev->dev_state))
1423 				continue;
1424 
1425 			ret = do_discard_extent(stripe, &bytes);
1426 			if (ret) {
1427 				/*
1428 				 * Keep going if discard is not supported by the
1429 				 * device.
1430 				 */
1431 				if (ret != -EOPNOTSUPP)
1432 					break;
1433 				ret = 0;
1434 			} else {
1435 				discarded_bytes += bytes;
1436 			}
1437 		}
1438 		kfree(stripes);
1439 		if (ret)
1440 			break;
1441 		cur += num_bytes;
1442 	}
1443 	btrfs_bio_counter_dec(fs_info);
1444 	if (actual_bytes)
1445 		*actual_bytes = discarded_bytes;
1446 	return ret;
1447 }
1448 
1449 /* Can return -ENOMEM */
btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref)1450 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1451 			 struct btrfs_ref *generic_ref)
1452 {
1453 	struct btrfs_fs_info *fs_info = trans->fs_info;
1454 	int ret;
1455 
1456 	ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
1457 	       generic_ref->action);
1458 	BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
1459 	       generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID);
1460 
1461 	if (generic_ref->type == BTRFS_REF_METADATA)
1462 		ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
1463 	else
1464 		ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
1465 
1466 	btrfs_ref_tree_mod(fs_info, generic_ref);
1467 
1468 	return ret;
1469 }
1470 
1471 /*
1472  * Insert backreference for a given extent.
1473  *
1474  * The counterpart is in __btrfs_free_extent(), with examples and more details
1475  * how it works.
1476  *
1477  * @trans:	    Handle of transaction
1478  *
1479  * @node:	    The delayed ref node used to get the bytenr/length for
1480  *		    extent whose references are incremented.
1481  *
1482  * @extent_op       Pointer to a structure, holding information necessary when
1483  *                  updating a tree block's flags
1484  *
1485  */
__btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)1486 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1487 				  struct btrfs_delayed_ref_node *node,
1488 				  struct btrfs_delayed_extent_op *extent_op)
1489 {
1490 	struct btrfs_path *path;
1491 	struct extent_buffer *leaf;
1492 	struct btrfs_extent_item *item;
1493 	struct btrfs_key key;
1494 	u64 bytenr = node->bytenr;
1495 	u64 num_bytes = node->num_bytes;
1496 	u64 owner = btrfs_delayed_ref_owner(node);
1497 	u64 offset = btrfs_delayed_ref_offset(node);
1498 	u64 refs;
1499 	int refs_to_add = node->ref_mod;
1500 	int ret;
1501 
1502 	path = btrfs_alloc_path();
1503 	if (!path)
1504 		return -ENOMEM;
1505 
1506 	/* this will setup the path even if it fails to insert the back ref */
1507 	ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
1508 					   node->parent, node->ref_root, owner,
1509 					   offset, refs_to_add, extent_op);
1510 	if ((ret < 0 && ret != -EAGAIN) || !ret)
1511 		goto out;
1512 
1513 	/*
1514 	 * Ok we had -EAGAIN which means we didn't have space to insert and
1515 	 * inline extent ref, so just update the reference count and add a
1516 	 * normal backref.
1517 	 */
1518 	leaf = path->nodes[0];
1519 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1520 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1521 	refs = btrfs_extent_refs(leaf, item);
1522 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1523 	if (extent_op)
1524 		__run_delayed_extent_op(extent_op, leaf, item);
1525 
1526 	btrfs_release_path(path);
1527 
1528 	/* now insert the actual backref */
1529 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1530 		ret = insert_tree_block_ref(trans, path, node, bytenr);
1531 	else
1532 		ret = insert_extent_data_ref(trans, path, node, bytenr);
1533 
1534 	if (ret)
1535 		btrfs_abort_transaction(trans, ret);
1536 out:
1537 	btrfs_free_path(path);
1538 	return ret;
1539 }
1540 
free_head_ref_squota_rsv(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * href)1541 static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
1542 				     struct btrfs_delayed_ref_head *href)
1543 {
1544 	u64 root = href->owning_root;
1545 
1546 	/*
1547 	 * Don't check must_insert_reserved, as this is called from contexts
1548 	 * where it has already been unset.
1549 	 */
1550 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE ||
1551 	    !href->is_data || !is_fstree(root))
1552 		return;
1553 
1554 	btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes,
1555 				  BTRFS_QGROUP_RSV_DATA);
1556 }
1557 
run_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1558 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1559 				struct btrfs_delayed_ref_head *href,
1560 				struct btrfs_delayed_ref_node *node,
1561 				struct btrfs_delayed_extent_op *extent_op,
1562 				bool insert_reserved)
1563 {
1564 	int ret = 0;
1565 	u64 parent = 0;
1566 	u64 flags = 0;
1567 
1568 	trace_run_delayed_data_ref(trans->fs_info, node);
1569 
1570 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1571 		parent = node->parent;
1572 
1573 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1574 		struct btrfs_key key;
1575 		struct btrfs_squota_delta delta = {
1576 			.root = href->owning_root,
1577 			.num_bytes = node->num_bytes,
1578 			.is_data = true,
1579 			.is_inc	= true,
1580 			.generation = trans->transid,
1581 		};
1582 		u64 owner = btrfs_delayed_ref_owner(node);
1583 		u64 offset = btrfs_delayed_ref_offset(node);
1584 
1585 		if (extent_op)
1586 			flags |= extent_op->flags_to_set;
1587 
1588 		key.objectid = node->bytenr;
1589 		key.type = BTRFS_EXTENT_ITEM_KEY;
1590 		key.offset = node->num_bytes;
1591 
1592 		ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
1593 						 flags, owner, offset, &key,
1594 						 node->ref_mod,
1595 						 href->owning_root);
1596 		free_head_ref_squota_rsv(trans->fs_info, href);
1597 		if (!ret)
1598 			ret = btrfs_record_squota_delta(trans->fs_info, &delta);
1599 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1600 		ret = __btrfs_inc_extent_ref(trans, node, extent_op);
1601 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1602 		ret = __btrfs_free_extent(trans, href, node, extent_op);
1603 	} else {
1604 		BUG();
1605 	}
1606 	return ret;
1607 }
1608 
__run_delayed_extent_op(struct btrfs_delayed_extent_op * extent_op,struct extent_buffer * leaf,struct btrfs_extent_item * ei)1609 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1610 				    struct extent_buffer *leaf,
1611 				    struct btrfs_extent_item *ei)
1612 {
1613 	u64 flags = btrfs_extent_flags(leaf, ei);
1614 	if (extent_op->update_flags) {
1615 		flags |= extent_op->flags_to_set;
1616 		btrfs_set_extent_flags(leaf, ei, flags);
1617 	}
1618 
1619 	if (extent_op->update_key) {
1620 		struct btrfs_tree_block_info *bi;
1621 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1622 		bi = (struct btrfs_tree_block_info *)(ei + 1);
1623 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1624 	}
1625 }
1626 
run_delayed_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_extent_op * extent_op)1627 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1628 				 struct btrfs_delayed_ref_head *head,
1629 				 struct btrfs_delayed_extent_op *extent_op)
1630 {
1631 	struct btrfs_fs_info *fs_info = trans->fs_info;
1632 	struct btrfs_root *root;
1633 	struct btrfs_key key;
1634 	struct btrfs_path *path;
1635 	struct btrfs_extent_item *ei;
1636 	struct extent_buffer *leaf;
1637 	u32 item_size;
1638 	int ret;
1639 	int metadata = 1;
1640 
1641 	if (TRANS_ABORTED(trans))
1642 		return 0;
1643 
1644 	if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1645 		metadata = 0;
1646 
1647 	path = btrfs_alloc_path();
1648 	if (!path)
1649 		return -ENOMEM;
1650 
1651 	key.objectid = head->bytenr;
1652 
1653 	if (metadata) {
1654 		key.type = BTRFS_METADATA_ITEM_KEY;
1655 		key.offset = head->level;
1656 	} else {
1657 		key.type = BTRFS_EXTENT_ITEM_KEY;
1658 		key.offset = head->num_bytes;
1659 	}
1660 
1661 	root = btrfs_extent_root(fs_info, key.objectid);
1662 again:
1663 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1664 	if (ret < 0) {
1665 		goto out;
1666 	} else if (ret > 0) {
1667 		if (metadata) {
1668 			if (path->slots[0] > 0) {
1669 				path->slots[0]--;
1670 				btrfs_item_key_to_cpu(path->nodes[0], &key,
1671 						      path->slots[0]);
1672 				if (key.objectid == head->bytenr &&
1673 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
1674 				    key.offset == head->num_bytes)
1675 					ret = 0;
1676 			}
1677 			if (ret > 0) {
1678 				btrfs_release_path(path);
1679 				metadata = 0;
1680 
1681 				key.objectid = head->bytenr;
1682 				key.offset = head->num_bytes;
1683 				key.type = BTRFS_EXTENT_ITEM_KEY;
1684 				goto again;
1685 			}
1686 		} else {
1687 			ret = -EUCLEAN;
1688 			btrfs_err(fs_info,
1689 		  "missing extent item for extent %llu num_bytes %llu level %d",
1690 				  head->bytenr, head->num_bytes, head->level);
1691 			goto out;
1692 		}
1693 	}
1694 
1695 	leaf = path->nodes[0];
1696 	item_size = btrfs_item_size(leaf, path->slots[0]);
1697 
1698 	if (unlikely(item_size < sizeof(*ei))) {
1699 		ret = -EUCLEAN;
1700 		btrfs_err(fs_info,
1701 			  "unexpected extent item size, has %u expect >= %zu",
1702 			  item_size, sizeof(*ei));
1703 		btrfs_abort_transaction(trans, ret);
1704 		goto out;
1705 	}
1706 
1707 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1708 	__run_delayed_extent_op(extent_op, leaf, ei);
1709 out:
1710 	btrfs_free_path(path);
1711 	return ret;
1712 }
1713 
run_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1714 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1715 				struct btrfs_delayed_ref_head *href,
1716 				struct btrfs_delayed_ref_node *node,
1717 				struct btrfs_delayed_extent_op *extent_op,
1718 				bool insert_reserved)
1719 {
1720 	int ret = 0;
1721 	struct btrfs_fs_info *fs_info = trans->fs_info;
1722 	u64 parent = 0;
1723 	u64 ref_root = 0;
1724 
1725 	trace_run_delayed_tree_ref(trans->fs_info, node);
1726 
1727 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1728 		parent = node->parent;
1729 	ref_root = node->ref_root;
1730 
1731 	if (unlikely(node->ref_mod != 1)) {
1732 		btrfs_err(trans->fs_info,
1733 	"btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
1734 			  node->bytenr, node->ref_mod, node->action, ref_root,
1735 			  parent);
1736 		return -EUCLEAN;
1737 	}
1738 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1739 		struct btrfs_squota_delta delta = {
1740 			.root = href->owning_root,
1741 			.num_bytes = fs_info->nodesize,
1742 			.is_data = false,
1743 			.is_inc = true,
1744 			.generation = trans->transid,
1745 		};
1746 
1747 		ret = alloc_reserved_tree_block(trans, node, extent_op);
1748 		if (!ret)
1749 			btrfs_record_squota_delta(fs_info, &delta);
1750 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1751 		ret = __btrfs_inc_extent_ref(trans, node, extent_op);
1752 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1753 		ret = __btrfs_free_extent(trans, href, node, extent_op);
1754 	} else {
1755 		BUG();
1756 	}
1757 	return ret;
1758 }
1759 
1760 /* helper function to actually process a single delayed ref entry */
run_one_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1761 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1762 			       struct btrfs_delayed_ref_head *href,
1763 			       struct btrfs_delayed_ref_node *node,
1764 			       struct btrfs_delayed_extent_op *extent_op,
1765 			       bool insert_reserved)
1766 {
1767 	int ret = 0;
1768 
1769 	if (TRANS_ABORTED(trans)) {
1770 		if (insert_reserved) {
1771 			btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1772 			free_head_ref_squota_rsv(trans->fs_info, href);
1773 		}
1774 		return 0;
1775 	}
1776 
1777 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1778 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1779 		ret = run_delayed_tree_ref(trans, href, node, extent_op,
1780 					   insert_reserved);
1781 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1782 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
1783 		ret = run_delayed_data_ref(trans, href, node, extent_op,
1784 					   insert_reserved);
1785 	else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY)
1786 		ret = 0;
1787 	else
1788 		BUG();
1789 	if (ret && insert_reserved)
1790 		btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1791 	if (ret < 0)
1792 		btrfs_err(trans->fs_info,
1793 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
1794 			  node->bytenr, node->num_bytes, node->type,
1795 			  node->action, node->ref_mod, ret);
1796 	return ret;
1797 }
1798 
cleanup_extent_op(struct btrfs_delayed_ref_head * head)1799 static struct btrfs_delayed_extent_op *cleanup_extent_op(
1800 				struct btrfs_delayed_ref_head *head)
1801 {
1802 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
1803 
1804 	if (!extent_op)
1805 		return NULL;
1806 
1807 	if (head->must_insert_reserved) {
1808 		head->extent_op = NULL;
1809 		btrfs_free_delayed_extent_op(extent_op);
1810 		return NULL;
1811 	}
1812 	return extent_op;
1813 }
1814 
run_and_cleanup_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)1815 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
1816 				     struct btrfs_delayed_ref_head *head)
1817 {
1818 	struct btrfs_delayed_extent_op *extent_op;
1819 	int ret;
1820 
1821 	extent_op = cleanup_extent_op(head);
1822 	if (!extent_op)
1823 		return 0;
1824 	head->extent_op = NULL;
1825 	spin_unlock(&head->lock);
1826 	ret = run_delayed_extent_op(trans, head, extent_op);
1827 	btrfs_free_delayed_extent_op(extent_op);
1828 	return ret ? ret : 1;
1829 }
1830 
btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)1831 u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1832 				  struct btrfs_delayed_ref_root *delayed_refs,
1833 				  struct btrfs_delayed_ref_head *head)
1834 {
1835 	u64 ret = 0;
1836 
1837 	/*
1838 	 * We had csum deletions accounted for in our delayed refs rsv, we need
1839 	 * to drop the csum leaves for this update from our delayed_refs_rsv.
1840 	 */
1841 	if (head->total_ref_mod < 0 && head->is_data) {
1842 		int nr_csums;
1843 
1844 		spin_lock(&delayed_refs->lock);
1845 		delayed_refs->pending_csums -= head->num_bytes;
1846 		spin_unlock(&delayed_refs->lock);
1847 		nr_csums = btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
1848 
1849 		btrfs_delayed_refs_rsv_release(fs_info, 0, nr_csums);
1850 
1851 		ret = btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
1852 	}
1853 	/* must_insert_reserved can be set only if we didn't run the head ref. */
1854 	if (head->must_insert_reserved)
1855 		free_head_ref_squota_rsv(fs_info, head);
1856 
1857 	return ret;
1858 }
1859 
cleanup_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head,u64 * bytes_released)1860 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
1861 			    struct btrfs_delayed_ref_head *head,
1862 			    u64 *bytes_released)
1863 {
1864 
1865 	struct btrfs_fs_info *fs_info = trans->fs_info;
1866 	struct btrfs_delayed_ref_root *delayed_refs;
1867 	int ret;
1868 
1869 	delayed_refs = &trans->transaction->delayed_refs;
1870 
1871 	ret = run_and_cleanup_extent_op(trans, head);
1872 	if (ret < 0) {
1873 		btrfs_unselect_ref_head(delayed_refs, head);
1874 		btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
1875 		return ret;
1876 	} else if (ret) {
1877 		return ret;
1878 	}
1879 
1880 	/*
1881 	 * Need to drop our head ref lock and re-acquire the delayed ref lock
1882 	 * and then re-check to make sure nobody got added.
1883 	 */
1884 	spin_unlock(&head->lock);
1885 	spin_lock(&delayed_refs->lock);
1886 	spin_lock(&head->lock);
1887 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
1888 		spin_unlock(&head->lock);
1889 		spin_unlock(&delayed_refs->lock);
1890 		return 1;
1891 	}
1892 	btrfs_delete_ref_head(fs_info, delayed_refs, head);
1893 	spin_unlock(&head->lock);
1894 	spin_unlock(&delayed_refs->lock);
1895 
1896 	if (head->must_insert_reserved) {
1897 		btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
1898 		if (head->is_data) {
1899 			struct btrfs_root *csum_root;
1900 
1901 			csum_root = btrfs_csum_root(fs_info, head->bytenr);
1902 			ret = btrfs_del_csums(trans, csum_root, head->bytenr,
1903 					      head->num_bytes);
1904 		}
1905 	}
1906 
1907 	*bytes_released += btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1908 
1909 	trace_run_delayed_ref_head(fs_info, head, 0);
1910 	btrfs_delayed_ref_unlock(head);
1911 	btrfs_put_delayed_ref_head(head);
1912 	return ret;
1913 }
1914 
btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * locked_ref,u64 * bytes_released)1915 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
1916 					   struct btrfs_delayed_ref_head *locked_ref,
1917 					   u64 *bytes_released)
1918 {
1919 	struct btrfs_fs_info *fs_info = trans->fs_info;
1920 	struct btrfs_delayed_ref_root *delayed_refs;
1921 	struct btrfs_delayed_extent_op *extent_op;
1922 	struct btrfs_delayed_ref_node *ref;
1923 	bool must_insert_reserved;
1924 	int ret;
1925 
1926 	delayed_refs = &trans->transaction->delayed_refs;
1927 
1928 	lockdep_assert_held(&locked_ref->mutex);
1929 	lockdep_assert_held(&locked_ref->lock);
1930 
1931 	while ((ref = btrfs_select_delayed_ref(locked_ref))) {
1932 		if (ref->seq &&
1933 		    btrfs_check_delayed_seq(fs_info, ref->seq)) {
1934 			spin_unlock(&locked_ref->lock);
1935 			btrfs_unselect_ref_head(delayed_refs, locked_ref);
1936 			return -EAGAIN;
1937 		}
1938 
1939 		rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
1940 		RB_CLEAR_NODE(&ref->ref_node);
1941 		if (!list_empty(&ref->add_list))
1942 			list_del(&ref->add_list);
1943 		/*
1944 		 * When we play the delayed ref, also correct the ref_mod on
1945 		 * head
1946 		 */
1947 		switch (ref->action) {
1948 		case BTRFS_ADD_DELAYED_REF:
1949 		case BTRFS_ADD_DELAYED_EXTENT:
1950 			locked_ref->ref_mod -= ref->ref_mod;
1951 			break;
1952 		case BTRFS_DROP_DELAYED_REF:
1953 			locked_ref->ref_mod += ref->ref_mod;
1954 			break;
1955 		default:
1956 			WARN_ON(1);
1957 		}
1958 
1959 		/*
1960 		 * Record the must_insert_reserved flag before we drop the
1961 		 * spin lock.
1962 		 */
1963 		must_insert_reserved = locked_ref->must_insert_reserved;
1964 		/*
1965 		 * Unsetting this on the head ref relinquishes ownership of
1966 		 * the rsv_bytes, so it is critical that every possible code
1967 		 * path from here forward frees all reserves including qgroup
1968 		 * reserve.
1969 		 */
1970 		locked_ref->must_insert_reserved = false;
1971 
1972 		extent_op = locked_ref->extent_op;
1973 		locked_ref->extent_op = NULL;
1974 		spin_unlock(&locked_ref->lock);
1975 
1976 		ret = run_one_delayed_ref(trans, locked_ref, ref, extent_op,
1977 					  must_insert_reserved);
1978 		btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
1979 		*bytes_released += btrfs_calc_delayed_ref_bytes(fs_info, 1);
1980 
1981 		btrfs_free_delayed_extent_op(extent_op);
1982 		if (ret) {
1983 			btrfs_unselect_ref_head(delayed_refs, locked_ref);
1984 			btrfs_put_delayed_ref(ref);
1985 			return ret;
1986 		}
1987 
1988 		btrfs_put_delayed_ref(ref);
1989 		cond_resched();
1990 
1991 		spin_lock(&locked_ref->lock);
1992 		btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
1993 	}
1994 
1995 	return 0;
1996 }
1997 
1998 /*
1999  * Returns 0 on success or if called with an already aborted transaction.
2000  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2001  */
__btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,u64 min_bytes)2002 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2003 					     u64 min_bytes)
2004 {
2005 	struct btrfs_fs_info *fs_info = trans->fs_info;
2006 	struct btrfs_delayed_ref_root *delayed_refs;
2007 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2008 	int ret;
2009 	unsigned long count = 0;
2010 	unsigned long max_count = 0;
2011 	u64 bytes_processed = 0;
2012 
2013 	delayed_refs = &trans->transaction->delayed_refs;
2014 	if (min_bytes == 0) {
2015 		max_count = delayed_refs->num_heads_ready;
2016 		min_bytes = U64_MAX;
2017 	}
2018 
2019 	do {
2020 		if (!locked_ref) {
2021 			locked_ref = btrfs_select_ref_head(fs_info, delayed_refs);
2022 			if (IS_ERR_OR_NULL(locked_ref)) {
2023 				if (PTR_ERR(locked_ref) == -EAGAIN) {
2024 					continue;
2025 				} else {
2026 					break;
2027 				}
2028 			}
2029 			count++;
2030 		}
2031 		/*
2032 		 * We need to try and merge add/drops of the same ref since we
2033 		 * can run into issues with relocate dropping the implicit ref
2034 		 * and then it being added back again before the drop can
2035 		 * finish.  If we merged anything we need to re-loop so we can
2036 		 * get a good ref.
2037 		 * Or we can get node references of the same type that weren't
2038 		 * merged when created due to bumps in the tree mod seq, and
2039 		 * we need to merge them to prevent adding an inline extent
2040 		 * backref before dropping it (triggering a BUG_ON at
2041 		 * insert_inline_extent_backref()).
2042 		 */
2043 		spin_lock(&locked_ref->lock);
2044 		btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
2045 
2046 		ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, &bytes_processed);
2047 		if (ret < 0 && ret != -EAGAIN) {
2048 			/*
2049 			 * Error, btrfs_run_delayed_refs_for_head already
2050 			 * unlocked everything so just bail out
2051 			 */
2052 			return ret;
2053 		} else if (!ret) {
2054 			/*
2055 			 * Success, perform the usual cleanup of a processed
2056 			 * head
2057 			 */
2058 			ret = cleanup_ref_head(trans, locked_ref, &bytes_processed);
2059 			if (ret > 0 ) {
2060 				/* We dropped our lock, we need to loop. */
2061 				ret = 0;
2062 				continue;
2063 			} else if (ret) {
2064 				return ret;
2065 			}
2066 		}
2067 
2068 		/*
2069 		 * Either success case or btrfs_run_delayed_refs_for_head
2070 		 * returned -EAGAIN, meaning we need to select another head
2071 		 */
2072 
2073 		locked_ref = NULL;
2074 		cond_resched();
2075 	} while ((min_bytes != U64_MAX && bytes_processed < min_bytes) ||
2076 		 (max_count > 0 && count < max_count) ||
2077 		 locked_ref);
2078 
2079 	return 0;
2080 }
2081 
2082 #ifdef SCRAMBLE_DELAYED_REFS
2083 /*
2084  * Normally delayed refs get processed in ascending bytenr order. This
2085  * correlates in most cases to the order added. To expose dependencies on this
2086  * order, we start to process the tree in the middle instead of the beginning
2087  */
find_middle(struct rb_root * root)2088 static u64 find_middle(struct rb_root *root)
2089 {
2090 	struct rb_node *n = root->rb_node;
2091 	struct btrfs_delayed_ref_node *entry;
2092 	int alt = 1;
2093 	u64 middle;
2094 	u64 first = 0, last = 0;
2095 
2096 	n = rb_first(root);
2097 	if (n) {
2098 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2099 		first = entry->bytenr;
2100 	}
2101 	n = rb_last(root);
2102 	if (n) {
2103 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2104 		last = entry->bytenr;
2105 	}
2106 	n = root->rb_node;
2107 
2108 	while (n) {
2109 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2110 		WARN_ON(!entry->in_tree);
2111 
2112 		middle = entry->bytenr;
2113 
2114 		if (alt)
2115 			n = n->rb_left;
2116 		else
2117 			n = n->rb_right;
2118 
2119 		alt = 1 - alt;
2120 	}
2121 	return middle;
2122 }
2123 #endif
2124 
2125 /*
2126  * Start processing the delayed reference count updates and extent insertions
2127  * we have queued up so far.
2128  *
2129  * @trans:	Transaction handle.
2130  * @min_bytes:	How many bytes of delayed references to process. After this
2131  *		many bytes we stop processing delayed references if there are
2132  *		any more. If 0 it means to run all existing delayed references,
2133  *		but not new ones added after running all existing ones.
2134  *		Use (u64)-1 (U64_MAX) to run all existing delayed references
2135  *		plus any new ones that are added.
2136  *
2137  * Returns 0 on success or if called with an aborted transaction
2138  * Returns <0 on error and aborts the transaction
2139  */
btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,u64 min_bytes)2140 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes)
2141 {
2142 	struct btrfs_fs_info *fs_info = trans->fs_info;
2143 	struct btrfs_delayed_ref_root *delayed_refs;
2144 	int ret;
2145 
2146 	/* We'll clean this up in btrfs_cleanup_transaction */
2147 	if (TRANS_ABORTED(trans))
2148 		return 0;
2149 
2150 	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2151 		return 0;
2152 
2153 	delayed_refs = &trans->transaction->delayed_refs;
2154 again:
2155 #ifdef SCRAMBLE_DELAYED_REFS
2156 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2157 #endif
2158 	ret = __btrfs_run_delayed_refs(trans, min_bytes);
2159 	if (ret < 0) {
2160 		btrfs_abort_transaction(trans, ret);
2161 		return ret;
2162 	}
2163 
2164 	if (min_bytes == U64_MAX) {
2165 		btrfs_create_pending_block_groups(trans);
2166 
2167 		spin_lock(&delayed_refs->lock);
2168 		if (xa_empty(&delayed_refs->head_refs)) {
2169 			spin_unlock(&delayed_refs->lock);
2170 			return 0;
2171 		}
2172 		spin_unlock(&delayed_refs->lock);
2173 
2174 		cond_resched();
2175 		goto again;
2176 	}
2177 
2178 	return 0;
2179 }
2180 
btrfs_set_disk_extent_flags(struct btrfs_trans_handle * trans,struct extent_buffer * eb,u64 flags)2181 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2182 				struct extent_buffer *eb, u64 flags)
2183 {
2184 	struct btrfs_delayed_extent_op *extent_op;
2185 	int ret;
2186 
2187 	extent_op = btrfs_alloc_delayed_extent_op();
2188 	if (!extent_op)
2189 		return -ENOMEM;
2190 
2191 	extent_op->flags_to_set = flags;
2192 	extent_op->update_flags = true;
2193 	extent_op->update_key = false;
2194 
2195 	ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len,
2196 					  btrfs_header_level(eb), extent_op);
2197 	if (ret)
2198 		btrfs_free_delayed_extent_op(extent_op);
2199 	return ret;
2200 }
2201 
check_delayed_ref(struct btrfs_inode * inode,struct btrfs_path * path,u64 offset,u64 bytenr)2202 static noinline int check_delayed_ref(struct btrfs_inode *inode,
2203 				      struct btrfs_path *path,
2204 				      u64 offset, u64 bytenr)
2205 {
2206 	struct btrfs_root *root = inode->root;
2207 	struct btrfs_delayed_ref_head *head;
2208 	struct btrfs_delayed_ref_node *ref;
2209 	struct btrfs_delayed_ref_root *delayed_refs;
2210 	struct btrfs_transaction *cur_trans;
2211 	struct rb_node *node;
2212 	int ret = 0;
2213 
2214 	spin_lock(&root->fs_info->trans_lock);
2215 	cur_trans = root->fs_info->running_transaction;
2216 	if (cur_trans)
2217 		refcount_inc(&cur_trans->use_count);
2218 	spin_unlock(&root->fs_info->trans_lock);
2219 	if (!cur_trans)
2220 		return 0;
2221 
2222 	delayed_refs = &cur_trans->delayed_refs;
2223 	spin_lock(&delayed_refs->lock);
2224 	head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
2225 	if (!head) {
2226 		spin_unlock(&delayed_refs->lock);
2227 		btrfs_put_transaction(cur_trans);
2228 		return 0;
2229 	}
2230 
2231 	if (!mutex_trylock(&head->mutex)) {
2232 		if (path->nowait) {
2233 			spin_unlock(&delayed_refs->lock);
2234 			btrfs_put_transaction(cur_trans);
2235 			return -EAGAIN;
2236 		}
2237 
2238 		refcount_inc(&head->refs);
2239 		spin_unlock(&delayed_refs->lock);
2240 
2241 		btrfs_release_path(path);
2242 
2243 		/*
2244 		 * Mutex was contended, block until it's released and let
2245 		 * caller try again
2246 		 */
2247 		mutex_lock(&head->mutex);
2248 		mutex_unlock(&head->mutex);
2249 		btrfs_put_delayed_ref_head(head);
2250 		btrfs_put_transaction(cur_trans);
2251 		return -EAGAIN;
2252 	}
2253 	spin_unlock(&delayed_refs->lock);
2254 
2255 	spin_lock(&head->lock);
2256 	/*
2257 	 * XXX: We should replace this with a proper search function in the
2258 	 * future.
2259 	 */
2260 	for (node = rb_first_cached(&head->ref_tree); node;
2261 	     node = rb_next(node)) {
2262 		u64 ref_owner;
2263 		u64 ref_offset;
2264 
2265 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
2266 		/* If it's a shared ref we know a cross reference exists */
2267 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2268 			ret = 1;
2269 			break;
2270 		}
2271 
2272 		ref_owner = btrfs_delayed_ref_owner(ref);
2273 		ref_offset = btrfs_delayed_ref_offset(ref);
2274 
2275 		/*
2276 		 * If our ref doesn't match the one we're currently looking at
2277 		 * then we have a cross reference.
2278 		 */
2279 		if (ref->ref_root != btrfs_root_id(root) ||
2280 		    ref_owner != btrfs_ino(inode) || ref_offset != offset) {
2281 			ret = 1;
2282 			break;
2283 		}
2284 	}
2285 	spin_unlock(&head->lock);
2286 	mutex_unlock(&head->mutex);
2287 	btrfs_put_transaction(cur_trans);
2288 	return ret;
2289 }
2290 
2291 /*
2292  * Check if there are references for a data extent other than the one belonging
2293  * to the given inode and offset.
2294  *
2295  * @inode:     The only inode we expect to find associated with the data extent.
2296  * @path:      A path to use for searching the extent tree.
2297  * @offset:    The only offset we expect to find associated with the data extent.
2298  * @bytenr:    The logical address of the data extent.
2299  *
2300  * When the extent does not have any other references other than the one we
2301  * expect to find, we always return a value of 0 with the path having a locked
2302  * leaf that contains the extent's extent item - this is necessary to ensure
2303  * we don't race with a task running delayed references, and our caller must
2304  * have such a path when calling check_delayed_ref() - it must lock a delayed
2305  * ref head while holding the leaf locked. In case the extent item is not found
2306  * in the extent tree, we return -ENOENT with the path having the leaf (locked)
2307  * where the extent item should be, in order to prevent races with another task
2308  * running delayed references, so that we don't miss any reference when calling
2309  * check_delayed_ref().
2310  *
2311  * Note: this may return false positives, and this is because we want to be
2312  *       quick here as we're called in write paths (when flushing delalloc and
2313  *       in the direct IO write path). For example we can have an extent with
2314  *       a single reference but that reference is not inlined, or we may have
2315  *       many references in the extent tree but we also have delayed references
2316  *       that cancel all the reference except the one for our inode and offset,
2317  *       but it would be expensive to do such checks and complex due to all
2318  *       locking to avoid races between the checks and flushing delayed refs,
2319  *       plus non-inline references may be located on leaves other than the one
2320  *       that contains the extent item in the extent tree. The important thing
2321  *       here is to not return false negatives and that the false positives are
2322  *       not very common.
2323  *
2324  * Returns: 0 if there are no cross references and with the path having a locked
2325  *          leaf from the extent tree that contains the extent's extent item.
2326  *
2327  *          1 if there are cross references (false positives can happen).
2328  *
2329  *          < 0 in case of an error. In case of -ENOENT the leaf in the extent
2330  *          tree where the extent item should be located at is read locked and
2331  *          accessible in the given path.
2332  */
check_committed_ref(struct btrfs_inode * inode,struct btrfs_path * path,u64 offset,u64 bytenr)2333 static noinline int check_committed_ref(struct btrfs_inode *inode,
2334 					struct btrfs_path *path,
2335 					u64 offset, u64 bytenr)
2336 {
2337 	struct btrfs_root *root = inode->root;
2338 	struct btrfs_fs_info *fs_info = root->fs_info;
2339 	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2340 	struct extent_buffer *leaf;
2341 	struct btrfs_extent_data_ref *ref;
2342 	struct btrfs_extent_inline_ref *iref;
2343 	struct btrfs_extent_item *ei;
2344 	struct btrfs_key key;
2345 	u32 item_size;
2346 	u32 expected_size;
2347 	int type;
2348 	int ret;
2349 
2350 	key.objectid = bytenr;
2351 	key.offset = (u64)-1;
2352 	key.type = BTRFS_EXTENT_ITEM_KEY;
2353 
2354 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2355 	if (ret < 0)
2356 		return ret;
2357 	if (ret == 0) {
2358 		/*
2359 		 * Key with offset -1 found, there would have to exist an extent
2360 		 * item with such offset, but this is out of the valid range.
2361 		 */
2362 		return -EUCLEAN;
2363 	}
2364 
2365 	if (path->slots[0] == 0)
2366 		return -ENOENT;
2367 
2368 	path->slots[0]--;
2369 	leaf = path->nodes[0];
2370 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2371 
2372 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2373 		return -ENOENT;
2374 
2375 	item_size = btrfs_item_size(leaf, path->slots[0]);
2376 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2377 	expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY);
2378 
2379 	/* No inline refs; we need to bail before checking for owner ref. */
2380 	if (item_size == sizeof(*ei))
2381 		return 1;
2382 
2383 	/* Check for an owner ref; skip over it to the real inline refs. */
2384 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2385 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2386 	if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) {
2387 		expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
2388 		iref = (struct btrfs_extent_inline_ref *)(iref + 1);
2389 		type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2390 	}
2391 
2392 	/* If extent item has more than 1 inline ref then it's shared */
2393 	if (item_size != expected_size)
2394 		return 1;
2395 
2396 	/* If this extent has SHARED_DATA_REF then it's shared */
2397 	if (type != BTRFS_EXTENT_DATA_REF_KEY)
2398 		return 1;
2399 
2400 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2401 	if (btrfs_extent_refs(leaf, ei) !=
2402 	    btrfs_extent_data_ref_count(leaf, ref) ||
2403 	    btrfs_extent_data_ref_root(leaf, ref) != btrfs_root_id(root) ||
2404 	    btrfs_extent_data_ref_objectid(leaf, ref) != btrfs_ino(inode) ||
2405 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2406 		return 1;
2407 
2408 	return 0;
2409 }
2410 
btrfs_cross_ref_exist(struct btrfs_inode * inode,u64 offset,u64 bytenr,struct btrfs_path * path)2411 int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset,
2412 			  u64 bytenr, struct btrfs_path *path)
2413 {
2414 	int ret;
2415 
2416 	do {
2417 		ret = check_committed_ref(inode, path, offset, bytenr);
2418 		if (ret && ret != -ENOENT)
2419 			goto out;
2420 
2421 		/*
2422 		 * The path must have a locked leaf from the extent tree where
2423 		 * the extent item for our extent is located, in case it exists,
2424 		 * or where it should be located in case it doesn't exist yet
2425 		 * because it's new and its delayed ref was not yet flushed.
2426 		 * We need to lock the delayed ref head at check_delayed_ref(),
2427 		 * if one exists, while holding the leaf locked in order to not
2428 		 * race with delayed ref flushing, missing references and
2429 		 * incorrectly reporting that the extent is not shared.
2430 		 */
2431 		if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
2432 			struct extent_buffer *leaf = path->nodes[0];
2433 
2434 			ASSERT(leaf != NULL);
2435 			btrfs_assert_tree_read_locked(leaf);
2436 
2437 			if (ret != -ENOENT) {
2438 				struct btrfs_key key;
2439 
2440 				btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2441 				ASSERT(key.objectid == bytenr);
2442 				ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY);
2443 			}
2444 		}
2445 
2446 		ret = check_delayed_ref(inode, path, offset, bytenr);
2447 	} while (ret == -EAGAIN && !path->nowait);
2448 
2449 out:
2450 	btrfs_release_path(path);
2451 	if (btrfs_is_data_reloc_root(inode->root))
2452 		WARN_ON(ret > 0);
2453 	return ret;
2454 }
2455 
__btrfs_mod_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref,int inc)2456 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2457 			   struct btrfs_root *root,
2458 			   struct extent_buffer *buf,
2459 			   int full_backref, int inc)
2460 {
2461 	struct btrfs_fs_info *fs_info = root->fs_info;
2462 	u64 parent;
2463 	u64 ref_root;
2464 	u32 nritems;
2465 	struct btrfs_key key;
2466 	struct btrfs_file_extent_item *fi;
2467 	bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
2468 	int i;
2469 	int action;
2470 	int level;
2471 	int ret = 0;
2472 
2473 	if (btrfs_is_testing(fs_info))
2474 		return 0;
2475 
2476 	ref_root = btrfs_header_owner(buf);
2477 	nritems = btrfs_header_nritems(buf);
2478 	level = btrfs_header_level(buf);
2479 
2480 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0)
2481 		return 0;
2482 
2483 	if (full_backref)
2484 		parent = buf->start;
2485 	else
2486 		parent = 0;
2487 	if (inc)
2488 		action = BTRFS_ADD_DELAYED_REF;
2489 	else
2490 		action = BTRFS_DROP_DELAYED_REF;
2491 
2492 	for (i = 0; i < nritems; i++) {
2493 		struct btrfs_ref ref = {
2494 			.action = action,
2495 			.parent = parent,
2496 			.ref_root = ref_root,
2497 		};
2498 
2499 		if (level == 0) {
2500 			btrfs_item_key_to_cpu(buf, &key, i);
2501 			if (key.type != BTRFS_EXTENT_DATA_KEY)
2502 				continue;
2503 			fi = btrfs_item_ptr(buf, i,
2504 					    struct btrfs_file_extent_item);
2505 			if (btrfs_file_extent_type(buf, fi) ==
2506 			    BTRFS_FILE_EXTENT_INLINE)
2507 				continue;
2508 			ref.bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2509 			if (ref.bytenr == 0)
2510 				continue;
2511 
2512 			ref.num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2513 			ref.owning_root = ref_root;
2514 
2515 			key.offset -= btrfs_file_extent_offset(buf, fi);
2516 			btrfs_init_data_ref(&ref, key.objectid, key.offset,
2517 					    btrfs_root_id(root), for_reloc);
2518 			if (inc)
2519 				ret = btrfs_inc_extent_ref(trans, &ref);
2520 			else
2521 				ret = btrfs_free_extent(trans, &ref);
2522 			if (ret)
2523 				goto fail;
2524 		} else {
2525 			/* We don't know the owning_root, leave as 0. */
2526 			ref.bytenr = btrfs_node_blockptr(buf, i);
2527 			ref.num_bytes = fs_info->nodesize;
2528 
2529 			btrfs_init_tree_ref(&ref, level - 1,
2530 					    btrfs_root_id(root), for_reloc);
2531 			if (inc)
2532 				ret = btrfs_inc_extent_ref(trans, &ref);
2533 			else
2534 				ret = btrfs_free_extent(trans, &ref);
2535 			if (ret)
2536 				goto fail;
2537 		}
2538 	}
2539 	return 0;
2540 fail:
2541 	return ret;
2542 }
2543 
btrfs_inc_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2544 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2545 		  struct extent_buffer *buf, int full_backref)
2546 {
2547 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2548 }
2549 
btrfs_dec_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2550 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2551 		  struct extent_buffer *buf, int full_backref)
2552 {
2553 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2554 }
2555 
get_alloc_profile_by_root(struct btrfs_root * root,int data)2556 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
2557 {
2558 	struct btrfs_fs_info *fs_info = root->fs_info;
2559 	u64 flags;
2560 	u64 ret;
2561 
2562 	if (data)
2563 		flags = BTRFS_BLOCK_GROUP_DATA;
2564 	else if (root == fs_info->chunk_root)
2565 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
2566 	else
2567 		flags = BTRFS_BLOCK_GROUP_METADATA;
2568 
2569 	ret = btrfs_get_alloc_profile(fs_info, flags);
2570 	return ret;
2571 }
2572 
first_logical_byte(struct btrfs_fs_info * fs_info)2573 static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
2574 {
2575 	struct rb_node *leftmost;
2576 	u64 bytenr = 0;
2577 
2578 	read_lock(&fs_info->block_group_cache_lock);
2579 	/* Get the block group with the lowest logical start address. */
2580 	leftmost = rb_first_cached(&fs_info->block_group_cache_tree);
2581 	if (leftmost) {
2582 		struct btrfs_block_group *bg;
2583 
2584 		bg = rb_entry(leftmost, struct btrfs_block_group, cache_node);
2585 		bytenr = bg->start;
2586 	}
2587 	read_unlock(&fs_info->block_group_cache_lock);
2588 
2589 	return bytenr;
2590 }
2591 
pin_down_extent(struct btrfs_trans_handle * trans,struct btrfs_block_group * cache,u64 bytenr,u64 num_bytes,int reserved)2592 static int pin_down_extent(struct btrfs_trans_handle *trans,
2593 			   struct btrfs_block_group *cache,
2594 			   u64 bytenr, u64 num_bytes, int reserved)
2595 {
2596 	spin_lock(&cache->space_info->lock);
2597 	spin_lock(&cache->lock);
2598 	cache->pinned += num_bytes;
2599 	btrfs_space_info_update_bytes_pinned(cache->space_info, num_bytes);
2600 	if (reserved) {
2601 		cache->reserved -= num_bytes;
2602 		cache->space_info->bytes_reserved -= num_bytes;
2603 	}
2604 	spin_unlock(&cache->lock);
2605 	spin_unlock(&cache->space_info->lock);
2606 
2607 	set_extent_bit(&trans->transaction->pinned_extents, bytenr,
2608 		       bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
2609 	return 0;
2610 }
2611 
btrfs_pin_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,int reserved)2612 int btrfs_pin_extent(struct btrfs_trans_handle *trans,
2613 		     u64 bytenr, u64 num_bytes, int reserved)
2614 {
2615 	struct btrfs_block_group *cache;
2616 
2617 	cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2618 	BUG_ON(!cache); /* Logic error */
2619 
2620 	pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
2621 
2622 	btrfs_put_block_group(cache);
2623 	return 0;
2624 }
2625 
btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle * trans,const struct extent_buffer * eb)2626 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
2627 				    const struct extent_buffer *eb)
2628 {
2629 	struct btrfs_block_group *cache;
2630 	int ret;
2631 
2632 	cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
2633 	if (!cache)
2634 		return -EINVAL;
2635 
2636 	/*
2637 	 * Fully cache the free space first so that our pin removes the free space
2638 	 * from the cache.
2639 	 */
2640 	ret = btrfs_cache_block_group(cache, true);
2641 	if (ret)
2642 		goto out;
2643 
2644 	pin_down_extent(trans, cache, eb->start, eb->len, 0);
2645 
2646 	/* remove us from the free space cache (if we're there at all) */
2647 	ret = btrfs_remove_free_space(cache, eb->start, eb->len);
2648 out:
2649 	btrfs_put_block_group(cache);
2650 	return ret;
2651 }
2652 
__exclude_logged_extent(struct btrfs_fs_info * fs_info,u64 start,u64 num_bytes)2653 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
2654 				   u64 start, u64 num_bytes)
2655 {
2656 	int ret;
2657 	struct btrfs_block_group *block_group;
2658 
2659 	block_group = btrfs_lookup_block_group(fs_info, start);
2660 	if (!block_group)
2661 		return -EINVAL;
2662 
2663 	ret = btrfs_cache_block_group(block_group, true);
2664 	if (ret)
2665 		goto out;
2666 
2667 	ret = btrfs_remove_free_space(block_group, start, num_bytes);
2668 out:
2669 	btrfs_put_block_group(block_group);
2670 	return ret;
2671 }
2672 
btrfs_exclude_logged_extents(struct extent_buffer * eb)2673 int btrfs_exclude_logged_extents(struct extent_buffer *eb)
2674 {
2675 	struct btrfs_fs_info *fs_info = eb->fs_info;
2676 	struct btrfs_file_extent_item *item;
2677 	struct btrfs_key key;
2678 	int found_type;
2679 	int i;
2680 	int ret = 0;
2681 
2682 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
2683 		return 0;
2684 
2685 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
2686 		btrfs_item_key_to_cpu(eb, &key, i);
2687 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2688 			continue;
2689 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2690 		found_type = btrfs_file_extent_type(eb, item);
2691 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
2692 			continue;
2693 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
2694 			continue;
2695 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
2696 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
2697 		ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
2698 		if (ret)
2699 			break;
2700 	}
2701 
2702 	return ret;
2703 }
2704 
2705 static void
btrfs_inc_block_group_reservations(struct btrfs_block_group * bg)2706 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
2707 {
2708 	atomic_inc(&bg->reservations);
2709 }
2710 
2711 /*
2712  * Returns the free cluster for the given space info and sets empty_cluster to
2713  * what it should be based on the mount options.
2714  */
2715 static struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 * empty_cluster)2716 fetch_cluster_info(struct btrfs_fs_info *fs_info,
2717 		   struct btrfs_space_info *space_info, u64 *empty_cluster)
2718 {
2719 	struct btrfs_free_cluster *ret = NULL;
2720 
2721 	*empty_cluster = 0;
2722 	if (btrfs_mixed_space_info(space_info))
2723 		return ret;
2724 
2725 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
2726 		ret = &fs_info->meta_alloc_cluster;
2727 		if (btrfs_test_opt(fs_info, SSD))
2728 			*empty_cluster = SZ_2M;
2729 		else
2730 			*empty_cluster = SZ_64K;
2731 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
2732 		   btrfs_test_opt(fs_info, SSD_SPREAD)) {
2733 		*empty_cluster = SZ_2M;
2734 		ret = &fs_info->data_alloc_cluster;
2735 	}
2736 
2737 	return ret;
2738 }
2739 
unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end,const bool return_free_space)2740 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
2741 			      u64 start, u64 end,
2742 			      const bool return_free_space)
2743 {
2744 	struct btrfs_block_group *cache = NULL;
2745 	struct btrfs_space_info *space_info;
2746 	struct btrfs_free_cluster *cluster = NULL;
2747 	u64 total_unpinned = 0;
2748 	u64 empty_cluster = 0;
2749 	bool readonly;
2750 	int ret = 0;
2751 
2752 	while (start <= end) {
2753 		u64 len;
2754 
2755 		readonly = false;
2756 		if (!cache ||
2757 		    start >= cache->start + cache->length) {
2758 			if (cache)
2759 				btrfs_put_block_group(cache);
2760 			total_unpinned = 0;
2761 			cache = btrfs_lookup_block_group(fs_info, start);
2762 			if (cache == NULL) {
2763 				/* Logic error, something removed the block group. */
2764 				ret = -EUCLEAN;
2765 				goto out;
2766 			}
2767 
2768 			cluster = fetch_cluster_info(fs_info,
2769 						     cache->space_info,
2770 						     &empty_cluster);
2771 			empty_cluster <<= 1;
2772 		}
2773 
2774 		len = cache->start + cache->length - start;
2775 		len = min(len, end + 1 - start);
2776 
2777 		if (return_free_space)
2778 			btrfs_add_free_space(cache, start, len);
2779 
2780 		start += len;
2781 		total_unpinned += len;
2782 		space_info = cache->space_info;
2783 
2784 		/*
2785 		 * If this space cluster has been marked as fragmented and we've
2786 		 * unpinned enough in this block group to potentially allow a
2787 		 * cluster to be created inside of it go ahead and clear the
2788 		 * fragmented check.
2789 		 */
2790 		if (cluster && cluster->fragmented &&
2791 		    total_unpinned > empty_cluster) {
2792 			spin_lock(&cluster->lock);
2793 			cluster->fragmented = 0;
2794 			spin_unlock(&cluster->lock);
2795 		}
2796 
2797 		spin_lock(&space_info->lock);
2798 		spin_lock(&cache->lock);
2799 		cache->pinned -= len;
2800 		btrfs_space_info_update_bytes_pinned(space_info, -len);
2801 		space_info->max_extent_size = 0;
2802 		if (cache->ro) {
2803 			space_info->bytes_readonly += len;
2804 			readonly = true;
2805 		} else if (btrfs_is_zoned(fs_info)) {
2806 			/* Need reset before reusing in a zoned block group */
2807 			btrfs_space_info_update_bytes_zone_unusable(space_info, len);
2808 			readonly = true;
2809 		}
2810 		spin_unlock(&cache->lock);
2811 		if (!readonly && return_free_space)
2812 			btrfs_return_free_space(space_info, len);
2813 		spin_unlock(&space_info->lock);
2814 	}
2815 
2816 	if (cache)
2817 		btrfs_put_block_group(cache);
2818 out:
2819 	return ret;
2820 }
2821 
btrfs_finish_extent_commit(struct btrfs_trans_handle * trans)2822 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
2823 {
2824 	struct btrfs_fs_info *fs_info = trans->fs_info;
2825 	struct btrfs_block_group *block_group, *tmp;
2826 	struct list_head *deleted_bgs;
2827 	struct extent_io_tree *unpin;
2828 	u64 start;
2829 	u64 end;
2830 	int ret;
2831 
2832 	unpin = &trans->transaction->pinned_extents;
2833 
2834 	while (!TRANS_ABORTED(trans)) {
2835 		struct extent_state *cached_state = NULL;
2836 
2837 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
2838 		if (!find_first_extent_bit(unpin, 0, &start, &end,
2839 					   EXTENT_DIRTY, &cached_state)) {
2840 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2841 			break;
2842 		}
2843 
2844 		if (btrfs_test_opt(fs_info, DISCARD_SYNC))
2845 			ret = btrfs_discard_extent(fs_info, start,
2846 						   end + 1 - start, NULL);
2847 
2848 		clear_extent_dirty(unpin, start, end, &cached_state);
2849 		ret = unpin_extent_range(fs_info, start, end, true);
2850 		BUG_ON(ret);
2851 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2852 		free_extent_state(cached_state);
2853 		cond_resched();
2854 	}
2855 
2856 	if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
2857 		btrfs_discard_calc_delay(&fs_info->discard_ctl);
2858 		btrfs_discard_schedule_work(&fs_info->discard_ctl, true);
2859 	}
2860 
2861 	/*
2862 	 * Transaction is finished.  We don't need the lock anymore.  We
2863 	 * do need to clean up the block groups in case of a transaction
2864 	 * abort.
2865 	 */
2866 	deleted_bgs = &trans->transaction->deleted_bgs;
2867 	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
2868 		u64 trimmed = 0;
2869 
2870 		ret = -EROFS;
2871 		if (!TRANS_ABORTED(trans))
2872 			ret = btrfs_discard_extent(fs_info,
2873 						   block_group->start,
2874 						   block_group->length,
2875 						   &trimmed);
2876 
2877 		/*
2878 		 * Not strictly necessary to lock, as the block_group should be
2879 		 * read-only from btrfs_delete_unused_bgs().
2880 		 */
2881 		ASSERT(block_group->ro);
2882 		spin_lock(&fs_info->unused_bgs_lock);
2883 		list_del_init(&block_group->bg_list);
2884 		spin_unlock(&fs_info->unused_bgs_lock);
2885 
2886 		btrfs_unfreeze_block_group(block_group);
2887 		btrfs_put_block_group(block_group);
2888 
2889 		if (ret) {
2890 			const char *errstr = btrfs_decode_error(ret);
2891 			btrfs_warn(fs_info,
2892 			   "discard failed while removing blockgroup: errno=%d %s",
2893 				   ret, errstr);
2894 		}
2895 	}
2896 
2897 	return 0;
2898 }
2899 
2900 /*
2901  * Parse an extent item's inline extents looking for a simple quotas owner ref.
2902  *
2903  * @fs_info:	the btrfs_fs_info for this mount
2904  * @leaf:	a leaf in the extent tree containing the extent item
2905  * @slot:	the slot in the leaf where the extent item is found
2906  *
2907  * Returns the objectid of the root that originally allocated the extent item
2908  * if the inline owner ref is expected and present, otherwise 0.
2909  *
2910  * If an extent item has an owner ref item, it will be the first inline ref
2911  * item. Therefore the logic is to check whether there are any inline ref
2912  * items, then check the type of the first one.
2913  */
btrfs_get_extent_owner_root(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot)2914 u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
2915 				struct extent_buffer *leaf, int slot)
2916 {
2917 	struct btrfs_extent_item *ei;
2918 	struct btrfs_extent_inline_ref *iref;
2919 	struct btrfs_extent_owner_ref *oref;
2920 	unsigned long ptr;
2921 	unsigned long end;
2922 	int type;
2923 
2924 	if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA))
2925 		return 0;
2926 
2927 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
2928 	ptr = (unsigned long)(ei + 1);
2929 	end = (unsigned long)ei + btrfs_item_size(leaf, slot);
2930 
2931 	/* No inline ref items of any kind, can't check type. */
2932 	if (ptr == end)
2933 		return 0;
2934 
2935 	iref = (struct btrfs_extent_inline_ref *)ptr;
2936 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
2937 
2938 	/* We found an owner ref, get the root out of it. */
2939 	if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
2940 		oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
2941 		return btrfs_extent_owner_ref_root_id(leaf, oref);
2942 	}
2943 
2944 	/* We have inline refs, but not an owner ref. */
2945 	return 0;
2946 }
2947 
do_free_extent_accounting(struct btrfs_trans_handle * trans,u64 bytenr,struct btrfs_squota_delta * delta)2948 static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
2949 				     u64 bytenr, struct btrfs_squota_delta *delta)
2950 {
2951 	int ret;
2952 	u64 num_bytes = delta->num_bytes;
2953 
2954 	if (delta->is_data) {
2955 		struct btrfs_root *csum_root;
2956 
2957 		csum_root = btrfs_csum_root(trans->fs_info, bytenr);
2958 		ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes);
2959 		if (ret) {
2960 			btrfs_abort_transaction(trans, ret);
2961 			return ret;
2962 		}
2963 
2964 		ret = btrfs_delete_raid_extent(trans, bytenr, num_bytes);
2965 		if (ret) {
2966 			btrfs_abort_transaction(trans, ret);
2967 			return ret;
2968 		}
2969 	}
2970 
2971 	ret = btrfs_record_squota_delta(trans->fs_info, delta);
2972 	if (ret) {
2973 		btrfs_abort_transaction(trans, ret);
2974 		return ret;
2975 	}
2976 
2977 	ret = add_to_free_space_tree(trans, bytenr, num_bytes);
2978 	if (ret) {
2979 		btrfs_abort_transaction(trans, ret);
2980 		return ret;
2981 	}
2982 
2983 	ret = btrfs_update_block_group(trans, bytenr, num_bytes, false);
2984 	if (ret)
2985 		btrfs_abort_transaction(trans, ret);
2986 
2987 	return ret;
2988 }
2989 
2990 #define abort_and_dump(trans, path, fmt, args...)	\
2991 ({							\
2992 	btrfs_abort_transaction(trans, -EUCLEAN);	\
2993 	btrfs_print_leaf(path->nodes[0]);		\
2994 	btrfs_crit(trans->fs_info, fmt, ##args);	\
2995 })
2996 
2997 /*
2998  * Drop one or more refs of @node.
2999  *
3000  * 1. Locate the extent refs.
3001  *    It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item.
3002  *    Locate it, then reduce the refs number or remove the ref line completely.
3003  *
3004  * 2. Update the refs count in EXTENT/METADATA_ITEM
3005  *
3006  * Inline backref case:
3007  *
3008  * in extent tree we have:
3009  *
3010  * 	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
3011  *		refs 2 gen 6 flags DATA
3012  *		extent data backref root FS_TREE objectid 258 offset 0 count 1
3013  *		extent data backref root FS_TREE objectid 257 offset 0 count 1
3014  *
3015  * This function gets called with:
3016  *
3017  *    node->bytenr = 13631488
3018  *    node->num_bytes = 1048576
3019  *    root_objectid = FS_TREE
3020  *    owner_objectid = 257
3021  *    owner_offset = 0
3022  *    refs_to_drop = 1
3023  *
3024  * Then we should get some like:
3025  *
3026  * 	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
3027  *		refs 1 gen 6 flags DATA
3028  *		extent data backref root FS_TREE objectid 258 offset 0 count 1
3029  *
3030  * Keyed backref case:
3031  *
3032  * in extent tree we have:
3033  *
3034  *	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
3035  *		refs 754 gen 6 flags DATA
3036  *	[...]
3037  *	item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28
3038  *		extent data backref root FS_TREE objectid 866 offset 0 count 1
3039  *
3040  * This function get called with:
3041  *
3042  *    node->bytenr = 13631488
3043  *    node->num_bytes = 1048576
3044  *    root_objectid = FS_TREE
3045  *    owner_objectid = 866
3046  *    owner_offset = 0
3047  *    refs_to_drop = 1
3048  *
3049  * Then we should get some like:
3050  *
3051  *	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
3052  *		refs 753 gen 6 flags DATA
3053  *
3054  * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed.
3055  */
__btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)3056 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3057 			       struct btrfs_delayed_ref_head *href,
3058 			       struct btrfs_delayed_ref_node *node,
3059 			       struct btrfs_delayed_extent_op *extent_op)
3060 {
3061 	struct btrfs_fs_info *info = trans->fs_info;
3062 	struct btrfs_key key;
3063 	struct btrfs_path *path;
3064 	struct btrfs_root *extent_root;
3065 	struct extent_buffer *leaf;
3066 	struct btrfs_extent_item *ei;
3067 	struct btrfs_extent_inline_ref *iref;
3068 	int ret;
3069 	int is_data;
3070 	int extent_slot = 0;
3071 	int found_extent = 0;
3072 	int num_to_del = 1;
3073 	int refs_to_drop = node->ref_mod;
3074 	u32 item_size;
3075 	u64 refs;
3076 	u64 bytenr = node->bytenr;
3077 	u64 num_bytes = node->num_bytes;
3078 	u64 owner_objectid = btrfs_delayed_ref_owner(node);
3079 	u64 owner_offset = btrfs_delayed_ref_offset(node);
3080 	bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
3081 	u64 delayed_ref_root = href->owning_root;
3082 
3083 	extent_root = btrfs_extent_root(info, bytenr);
3084 	ASSERT(extent_root);
3085 
3086 	path = btrfs_alloc_path();
3087 	if (!path)
3088 		return -ENOMEM;
3089 
3090 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3091 
3092 	if (!is_data && refs_to_drop != 1) {
3093 		btrfs_crit(info,
3094 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u",
3095 			   node->bytenr, refs_to_drop);
3096 		ret = -EINVAL;
3097 		btrfs_abort_transaction(trans, ret);
3098 		goto out;
3099 	}
3100 
3101 	if (is_data)
3102 		skinny_metadata = false;
3103 
3104 	ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
3105 				    node->parent, node->ref_root, owner_objectid,
3106 				    owner_offset);
3107 	if (ret == 0) {
3108 		/*
3109 		 * Either the inline backref or the SHARED_DATA_REF/
3110 		 * SHARED_BLOCK_REF is found
3111 		 *
3112 		 * Here is a quick path to locate EXTENT/METADATA_ITEM.
3113 		 * It's possible the EXTENT/METADATA_ITEM is near current slot.
3114 		 */
3115 		extent_slot = path->slots[0];
3116 		while (extent_slot >= 0) {
3117 			btrfs_item_key_to_cpu(path->nodes[0], &key,
3118 					      extent_slot);
3119 			if (key.objectid != bytenr)
3120 				break;
3121 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3122 			    key.offset == num_bytes) {
3123 				found_extent = 1;
3124 				break;
3125 			}
3126 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
3127 			    key.offset == owner_objectid) {
3128 				found_extent = 1;
3129 				break;
3130 			}
3131 
3132 			/* Quick path didn't find the EXTENT/METADATA_ITEM */
3133 			if (path->slots[0] - extent_slot > 5)
3134 				break;
3135 			extent_slot--;
3136 		}
3137 
3138 		if (!found_extent) {
3139 			if (iref) {
3140 				abort_and_dump(trans, path,
3141 "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref",
3142 					   path->slots[0]);
3143 				ret = -EUCLEAN;
3144 				goto out;
3145 			}
3146 			/* Must be SHARED_* item, remove the backref first */
3147 			ret = remove_extent_backref(trans, extent_root, path,
3148 						    NULL, refs_to_drop, is_data);
3149 			if (ret) {
3150 				btrfs_abort_transaction(trans, ret);
3151 				goto out;
3152 			}
3153 			btrfs_release_path(path);
3154 
3155 			/* Slow path to locate EXTENT/METADATA_ITEM */
3156 			key.objectid = bytenr;
3157 			key.type = BTRFS_EXTENT_ITEM_KEY;
3158 			key.offset = num_bytes;
3159 
3160 			if (!is_data && skinny_metadata) {
3161 				key.type = BTRFS_METADATA_ITEM_KEY;
3162 				key.offset = owner_objectid;
3163 			}
3164 
3165 			ret = btrfs_search_slot(trans, extent_root,
3166 						&key, path, -1, 1);
3167 			if (ret > 0 && skinny_metadata && path->slots[0]) {
3168 				/*
3169 				 * Couldn't find our skinny metadata item,
3170 				 * see if we have ye olde extent item.
3171 				 */
3172 				path->slots[0]--;
3173 				btrfs_item_key_to_cpu(path->nodes[0], &key,
3174 						      path->slots[0]);
3175 				if (key.objectid == bytenr &&
3176 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
3177 				    key.offset == num_bytes)
3178 					ret = 0;
3179 			}
3180 
3181 			if (ret > 0 && skinny_metadata) {
3182 				skinny_metadata = false;
3183 				key.objectid = bytenr;
3184 				key.type = BTRFS_EXTENT_ITEM_KEY;
3185 				key.offset = num_bytes;
3186 				btrfs_release_path(path);
3187 				ret = btrfs_search_slot(trans, extent_root,
3188 							&key, path, -1, 1);
3189 			}
3190 
3191 			if (ret) {
3192 				if (ret > 0)
3193 					btrfs_print_leaf(path->nodes[0]);
3194 				btrfs_err(info,
3195 			"umm, got %d back from search, was looking for %llu, slot %d",
3196 					  ret, bytenr, path->slots[0]);
3197 			}
3198 			if (ret < 0) {
3199 				btrfs_abort_transaction(trans, ret);
3200 				goto out;
3201 			}
3202 			extent_slot = path->slots[0];
3203 		}
3204 	} else if (WARN_ON(ret == -ENOENT)) {
3205 		abort_and_dump(trans, path,
3206 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d",
3207 			       bytenr, node->parent, node->ref_root, owner_objectid,
3208 			       owner_offset, path->slots[0]);
3209 		goto out;
3210 	} else {
3211 		btrfs_abort_transaction(trans, ret);
3212 		goto out;
3213 	}
3214 
3215 	leaf = path->nodes[0];
3216 	item_size = btrfs_item_size(leaf, extent_slot);
3217 	if (unlikely(item_size < sizeof(*ei))) {
3218 		ret = -EUCLEAN;
3219 		btrfs_err(trans->fs_info,
3220 			  "unexpected extent item size, has %u expect >= %zu",
3221 			  item_size, sizeof(*ei));
3222 		btrfs_abort_transaction(trans, ret);
3223 		goto out;
3224 	}
3225 	ei = btrfs_item_ptr(leaf, extent_slot,
3226 			    struct btrfs_extent_item);
3227 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
3228 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
3229 		struct btrfs_tree_block_info *bi;
3230 
3231 		if (item_size < sizeof(*ei) + sizeof(*bi)) {
3232 			abort_and_dump(trans, path,
3233 "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu",
3234 				       key.objectid, key.type, key.offset,
3235 				       path->slots[0], owner_objectid, item_size,
3236 				       sizeof(*ei) + sizeof(*bi));
3237 			ret = -EUCLEAN;
3238 			goto out;
3239 		}
3240 		bi = (struct btrfs_tree_block_info *)(ei + 1);
3241 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3242 	}
3243 
3244 	refs = btrfs_extent_refs(leaf, ei);
3245 	if (refs < refs_to_drop) {
3246 		abort_and_dump(trans, path,
3247 		"trying to drop %d refs but we only have %llu for bytenr %llu slot %u",
3248 			       refs_to_drop, refs, bytenr, path->slots[0]);
3249 		ret = -EUCLEAN;
3250 		goto out;
3251 	}
3252 	refs -= refs_to_drop;
3253 
3254 	if (refs > 0) {
3255 		if (extent_op)
3256 			__run_delayed_extent_op(extent_op, leaf, ei);
3257 		/*
3258 		 * In the case of inline back ref, reference count will
3259 		 * be updated by remove_extent_backref
3260 		 */
3261 		if (iref) {
3262 			if (!found_extent) {
3263 				abort_and_dump(trans, path,
3264 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u",
3265 					       path->slots[0]);
3266 				ret = -EUCLEAN;
3267 				goto out;
3268 			}
3269 		} else {
3270 			btrfs_set_extent_refs(leaf, ei, refs);
3271 		}
3272 		if (found_extent) {
3273 			ret = remove_extent_backref(trans, extent_root, path,
3274 						    iref, refs_to_drop, is_data);
3275 			if (ret) {
3276 				btrfs_abort_transaction(trans, ret);
3277 				goto out;
3278 			}
3279 		}
3280 	} else {
3281 		struct btrfs_squota_delta delta = {
3282 			.root = delayed_ref_root,
3283 			.num_bytes = num_bytes,
3284 			.is_data = is_data,
3285 			.is_inc = false,
3286 			.generation = btrfs_extent_generation(leaf, ei),
3287 		};
3288 
3289 		/* In this branch refs == 1 */
3290 		if (found_extent) {
3291 			if (is_data && refs_to_drop !=
3292 			    extent_data_ref_count(path, iref)) {
3293 				abort_and_dump(trans, path,
3294 		"invalid refs_to_drop, current refs %u refs_to_drop %u slot %u",
3295 					       extent_data_ref_count(path, iref),
3296 					       refs_to_drop, path->slots[0]);
3297 				ret = -EUCLEAN;
3298 				goto out;
3299 			}
3300 			if (iref) {
3301 				if (path->slots[0] != extent_slot) {
3302 					abort_and_dump(trans, path,
3303 "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref",
3304 						       key.objectid, key.type,
3305 						       key.offset, path->slots[0]);
3306 					ret = -EUCLEAN;
3307 					goto out;
3308 				}
3309 			} else {
3310 				/*
3311 				 * No inline ref, we must be at SHARED_* item,
3312 				 * And it's single ref, it must be:
3313 				 * |	extent_slot	  ||extent_slot + 1|
3314 				 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ]
3315 				 */
3316 				if (path->slots[0] != extent_slot + 1) {
3317 					abort_and_dump(trans, path,
3318 	"invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM",
3319 						       path->slots[0]);
3320 					ret = -EUCLEAN;
3321 					goto out;
3322 				}
3323 				path->slots[0] = extent_slot;
3324 				num_to_del = 2;
3325 			}
3326 		}
3327 		/*
3328 		 * We can't infer the data owner from the delayed ref, so we need
3329 		 * to try to get it from the owning ref item.
3330 		 *
3331 		 * If it is not present, then that extent was not written under
3332 		 * simple quotas mode, so we don't need to account for its deletion.
3333 		 */
3334 		if (is_data)
3335 			delta.root = btrfs_get_extent_owner_root(trans->fs_info,
3336 								 leaf, extent_slot);
3337 
3338 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3339 				      num_to_del);
3340 		if (ret) {
3341 			btrfs_abort_transaction(trans, ret);
3342 			goto out;
3343 		}
3344 		btrfs_release_path(path);
3345 
3346 		ret = do_free_extent_accounting(trans, bytenr, &delta);
3347 	}
3348 	btrfs_release_path(path);
3349 
3350 out:
3351 	btrfs_free_path(path);
3352 	return ret;
3353 }
3354 
3355 /*
3356  * when we free an block, it is possible (and likely) that we free the last
3357  * delayed ref for that extent as well.  This searches the delayed ref tree for
3358  * a given extent, and if there are no other delayed refs to be processed, it
3359  * removes it from the tree.
3360  */
check_ref_cleanup(struct btrfs_trans_handle * trans,u64 bytenr)3361 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3362 				      u64 bytenr)
3363 {
3364 	struct btrfs_fs_info *fs_info = trans->fs_info;
3365 	struct btrfs_delayed_ref_head *head;
3366 	struct btrfs_delayed_ref_root *delayed_refs;
3367 	int ret = 0;
3368 
3369 	delayed_refs = &trans->transaction->delayed_refs;
3370 	spin_lock(&delayed_refs->lock);
3371 	head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
3372 	if (!head)
3373 		goto out_delayed_unlock;
3374 
3375 	spin_lock(&head->lock);
3376 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
3377 		goto out;
3378 
3379 	if (cleanup_extent_op(head) != NULL)
3380 		goto out;
3381 
3382 	/*
3383 	 * waiting for the lock here would deadlock.  If someone else has it
3384 	 * locked they are already in the process of dropping it anyway
3385 	 */
3386 	if (!mutex_trylock(&head->mutex))
3387 		goto out;
3388 
3389 	btrfs_delete_ref_head(fs_info, delayed_refs, head);
3390 	head->processing = false;
3391 
3392 	spin_unlock(&head->lock);
3393 	spin_unlock(&delayed_refs->lock);
3394 
3395 	BUG_ON(head->extent_op);
3396 	if (head->must_insert_reserved)
3397 		ret = 1;
3398 
3399 	btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
3400 	mutex_unlock(&head->mutex);
3401 	btrfs_put_delayed_ref_head(head);
3402 	return ret;
3403 out:
3404 	spin_unlock(&head->lock);
3405 
3406 out_delayed_unlock:
3407 	spin_unlock(&delayed_refs->lock);
3408 	return 0;
3409 }
3410 
btrfs_free_tree_block(struct btrfs_trans_handle * trans,u64 root_id,struct extent_buffer * buf,u64 parent,int last_ref)3411 int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
3412 			  u64 root_id,
3413 			  struct extent_buffer *buf,
3414 			  u64 parent, int last_ref)
3415 {
3416 	struct btrfs_fs_info *fs_info = trans->fs_info;
3417 	struct btrfs_block_group *bg;
3418 	int ret;
3419 
3420 	if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3421 		struct btrfs_ref generic_ref = {
3422 			.action = BTRFS_DROP_DELAYED_REF,
3423 			.bytenr = buf->start,
3424 			.num_bytes = buf->len,
3425 			.parent = parent,
3426 			.owning_root = btrfs_header_owner(buf),
3427 			.ref_root = root_id,
3428 		};
3429 
3430 		/*
3431 		 * Assert that the extent buffer is not cleared due to
3432 		 * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
3433 		 * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
3434 		 * detail.
3435 		 */
3436 		ASSERT(btrfs_header_bytenr(buf) != 0);
3437 
3438 		btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 0, false);
3439 		btrfs_ref_tree_mod(fs_info, &generic_ref);
3440 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
3441 		if (ret < 0)
3442 			return ret;
3443 	}
3444 
3445 	if (!last_ref)
3446 		return 0;
3447 
3448 	if (btrfs_header_generation(buf) != trans->transid)
3449 		goto out;
3450 
3451 	if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3452 		ret = check_ref_cleanup(trans, buf->start);
3453 		if (!ret)
3454 			goto out;
3455 	}
3456 
3457 	bg = btrfs_lookup_block_group(fs_info, buf->start);
3458 
3459 	if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3460 		pin_down_extent(trans, bg, buf->start, buf->len, 1);
3461 		btrfs_put_block_group(bg);
3462 		goto out;
3463 	}
3464 
3465 	/*
3466 	 * If there are tree mod log users we may have recorded mod log
3467 	 * operations for this node.  If we re-allocate this node we
3468 	 * could replay operations on this node that happened when it
3469 	 * existed in a completely different root.  For example if it
3470 	 * was part of root A, then was reallocated to root B, and we
3471 	 * are doing a btrfs_old_search_slot(root b), we could replay
3472 	 * operations that happened when the block was part of root A,
3473 	 * giving us an inconsistent view of the btree.
3474 	 *
3475 	 * We are safe from races here because at this point no other
3476 	 * node or root points to this extent buffer, so if after this
3477 	 * check a new tree mod log user joins we will not have an
3478 	 * existing log of operations on this node that we have to
3479 	 * contend with.
3480 	 */
3481 
3482 	if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)
3483 		     || btrfs_is_zoned(fs_info)) {
3484 		pin_down_extent(trans, bg, buf->start, buf->len, 1);
3485 		btrfs_put_block_group(bg);
3486 		goto out;
3487 	}
3488 
3489 	WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
3490 
3491 	btrfs_add_free_space(bg, buf->start, buf->len);
3492 	btrfs_free_reserved_bytes(bg, buf->len, 0);
3493 	btrfs_put_block_group(bg);
3494 	trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
3495 
3496 out:
3497 
3498 	/*
3499 	 * Deleting the buffer, clear the corrupt flag since it doesn't
3500 	 * matter anymore.
3501 	 */
3502 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
3503 	return 0;
3504 }
3505 
3506 /* Can return -ENOMEM */
btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_ref * ref)3507 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
3508 {
3509 	struct btrfs_fs_info *fs_info = trans->fs_info;
3510 	int ret;
3511 
3512 	if (btrfs_is_testing(fs_info))
3513 		return 0;
3514 
3515 	/*
3516 	 * tree log blocks never actually go into the extent allocation
3517 	 * tree, just update pinning info and exit early.
3518 	 */
3519 	if (ref->ref_root == BTRFS_TREE_LOG_OBJECTID) {
3520 		btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes, 1);
3521 		ret = 0;
3522 	} else if (ref->type == BTRFS_REF_METADATA) {
3523 		ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
3524 	} else {
3525 		ret = btrfs_add_delayed_data_ref(trans, ref, 0);
3526 	}
3527 
3528 	if (ref->ref_root != BTRFS_TREE_LOG_OBJECTID)
3529 		btrfs_ref_tree_mod(fs_info, ref);
3530 
3531 	return ret;
3532 }
3533 
3534 enum btrfs_loop_type {
3535 	/*
3536 	 * Start caching block groups but do not wait for progress or for them
3537 	 * to be done.
3538 	 */
3539 	LOOP_CACHING_NOWAIT,
3540 
3541 	/*
3542 	 * Wait for the block group free_space >= the space we're waiting for if
3543 	 * the block group isn't cached.
3544 	 */
3545 	LOOP_CACHING_WAIT,
3546 
3547 	/*
3548 	 * Allow allocations to happen from block groups that do not yet have a
3549 	 * size classification.
3550 	 */
3551 	LOOP_UNSET_SIZE_CLASS,
3552 
3553 	/*
3554 	 * Allocate a chunk and then retry the allocation.
3555 	 */
3556 	LOOP_ALLOC_CHUNK,
3557 
3558 	/*
3559 	 * Ignore the size class restrictions for this allocation.
3560 	 */
3561 	LOOP_WRONG_SIZE_CLASS,
3562 
3563 	/*
3564 	 * Ignore the empty size, only try to allocate the number of bytes
3565 	 * needed for this allocation.
3566 	 */
3567 	LOOP_NO_EMPTY_SIZE,
3568 };
3569 
3570 static inline void
btrfs_lock_block_group(struct btrfs_block_group * cache,int delalloc)3571 btrfs_lock_block_group(struct btrfs_block_group *cache,
3572 		       int delalloc)
3573 {
3574 	if (delalloc)
3575 		down_read(&cache->data_rwsem);
3576 }
3577 
btrfs_grab_block_group(struct btrfs_block_group * cache,int delalloc)3578 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
3579 		       int delalloc)
3580 {
3581 	btrfs_get_block_group(cache);
3582 	if (delalloc)
3583 		down_read(&cache->data_rwsem);
3584 }
3585 
btrfs_lock_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,int delalloc)3586 static struct btrfs_block_group *btrfs_lock_cluster(
3587 		   struct btrfs_block_group *block_group,
3588 		   struct btrfs_free_cluster *cluster,
3589 		   int delalloc)
3590 	__acquires(&cluster->refill_lock)
3591 {
3592 	struct btrfs_block_group *used_bg = NULL;
3593 
3594 	spin_lock(&cluster->refill_lock);
3595 	while (1) {
3596 		used_bg = cluster->block_group;
3597 		if (!used_bg)
3598 			return NULL;
3599 
3600 		if (used_bg == block_group)
3601 			return used_bg;
3602 
3603 		btrfs_get_block_group(used_bg);
3604 
3605 		if (!delalloc)
3606 			return used_bg;
3607 
3608 		if (down_read_trylock(&used_bg->data_rwsem))
3609 			return used_bg;
3610 
3611 		spin_unlock(&cluster->refill_lock);
3612 
3613 		/* We should only have one-level nested. */
3614 		down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
3615 
3616 		spin_lock(&cluster->refill_lock);
3617 		if (used_bg == cluster->block_group)
3618 			return used_bg;
3619 
3620 		up_read(&used_bg->data_rwsem);
3621 		btrfs_put_block_group(used_bg);
3622 	}
3623 }
3624 
3625 static inline void
btrfs_release_block_group(struct btrfs_block_group * cache,int delalloc)3626 btrfs_release_block_group(struct btrfs_block_group *cache,
3627 			 int delalloc)
3628 {
3629 	if (delalloc)
3630 		up_read(&cache->data_rwsem);
3631 	btrfs_put_block_group(cache);
3632 }
3633 
3634 /*
3635  * Helper function for find_free_extent().
3636  *
3637  * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3638  * Return >0 to inform caller that we find nothing
3639  * Return 0 means we have found a location and set ffe_ctl->found_offset.
3640  */
find_free_extent_clustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** cluster_bg_ret)3641 static int find_free_extent_clustered(struct btrfs_block_group *bg,
3642 				      struct find_free_extent_ctl *ffe_ctl,
3643 				      struct btrfs_block_group **cluster_bg_ret)
3644 {
3645 	struct btrfs_block_group *cluster_bg;
3646 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3647 	u64 aligned_cluster;
3648 	u64 offset;
3649 	int ret;
3650 
3651 	cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
3652 	if (!cluster_bg)
3653 		goto refill_cluster;
3654 	if (cluster_bg != bg && (cluster_bg->ro ||
3655 	    !block_group_bits(cluster_bg, ffe_ctl->flags)))
3656 		goto release_cluster;
3657 
3658 	offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
3659 			ffe_ctl->num_bytes, cluster_bg->start,
3660 			&ffe_ctl->max_extent_size);
3661 	if (offset) {
3662 		/* We have a block, we're done */
3663 		spin_unlock(&last_ptr->refill_lock);
3664 		trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl);
3665 		*cluster_bg_ret = cluster_bg;
3666 		ffe_ctl->found_offset = offset;
3667 		return 0;
3668 	}
3669 	WARN_ON(last_ptr->block_group != cluster_bg);
3670 
3671 release_cluster:
3672 	/*
3673 	 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
3674 	 * lets just skip it and let the allocator find whatever block it can
3675 	 * find. If we reach this point, we will have tried the cluster
3676 	 * allocator plenty of times and not have found anything, so we are
3677 	 * likely way too fragmented for the clustering stuff to find anything.
3678 	 *
3679 	 * However, if the cluster is taken from the current block group,
3680 	 * release the cluster first, so that we stand a better chance of
3681 	 * succeeding in the unclustered allocation.
3682 	 */
3683 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
3684 		spin_unlock(&last_ptr->refill_lock);
3685 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3686 		return -ENOENT;
3687 	}
3688 
3689 	/* This cluster didn't work out, free it and start over */
3690 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3691 
3692 	if (cluster_bg != bg)
3693 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3694 
3695 refill_cluster:
3696 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
3697 		spin_unlock(&last_ptr->refill_lock);
3698 		return -ENOENT;
3699 	}
3700 
3701 	aligned_cluster = max_t(u64,
3702 			ffe_ctl->empty_cluster + ffe_ctl->empty_size,
3703 			bg->full_stripe_len);
3704 	ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
3705 			ffe_ctl->num_bytes, aligned_cluster);
3706 	if (ret == 0) {
3707 		/* Now pull our allocation out of this cluster */
3708 		offset = btrfs_alloc_from_cluster(bg, last_ptr,
3709 				ffe_ctl->num_bytes, ffe_ctl->search_start,
3710 				&ffe_ctl->max_extent_size);
3711 		if (offset) {
3712 			/* We found one, proceed */
3713 			spin_unlock(&last_ptr->refill_lock);
3714 			ffe_ctl->found_offset = offset;
3715 			trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
3716 			return 0;
3717 		}
3718 	}
3719 	/*
3720 	 * At this point we either didn't find a cluster or we weren't able to
3721 	 * allocate a block from our cluster.  Free the cluster we've been
3722 	 * trying to use, and go to the next block group.
3723 	 */
3724 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3725 	spin_unlock(&last_ptr->refill_lock);
3726 	return 1;
3727 }
3728 
3729 /*
3730  * Return >0 to inform caller that we find nothing
3731  * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3732  */
find_free_extent_unclustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl)3733 static int find_free_extent_unclustered(struct btrfs_block_group *bg,
3734 					struct find_free_extent_ctl *ffe_ctl)
3735 {
3736 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3737 	u64 offset;
3738 
3739 	/*
3740 	 * We are doing an unclustered allocation, set the fragmented flag so
3741 	 * we don't bother trying to setup a cluster again until we get more
3742 	 * space.
3743 	 */
3744 	if (unlikely(last_ptr)) {
3745 		spin_lock(&last_ptr->lock);
3746 		last_ptr->fragmented = 1;
3747 		spin_unlock(&last_ptr->lock);
3748 	}
3749 	if (ffe_ctl->cached) {
3750 		struct btrfs_free_space_ctl *free_space_ctl;
3751 
3752 		free_space_ctl = bg->free_space_ctl;
3753 		spin_lock(&free_space_ctl->tree_lock);
3754 		if (free_space_ctl->free_space <
3755 		    ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
3756 		    ffe_ctl->empty_size) {
3757 			ffe_ctl->total_free_space = max_t(u64,
3758 					ffe_ctl->total_free_space,
3759 					free_space_ctl->free_space);
3760 			spin_unlock(&free_space_ctl->tree_lock);
3761 			return 1;
3762 		}
3763 		spin_unlock(&free_space_ctl->tree_lock);
3764 	}
3765 
3766 	offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
3767 			ffe_ctl->num_bytes, ffe_ctl->empty_size,
3768 			&ffe_ctl->max_extent_size);
3769 	if (!offset)
3770 		return 1;
3771 	ffe_ctl->found_offset = offset;
3772 	return 0;
3773 }
3774 
do_allocation_clustered(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3775 static int do_allocation_clustered(struct btrfs_block_group *block_group,
3776 				   struct find_free_extent_ctl *ffe_ctl,
3777 				   struct btrfs_block_group **bg_ret)
3778 {
3779 	int ret;
3780 
3781 	/* We want to try and use the cluster allocator, so lets look there */
3782 	if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
3783 		ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
3784 		if (ret >= 0)
3785 			return ret;
3786 		/* ret == -ENOENT case falls through */
3787 	}
3788 
3789 	return find_free_extent_unclustered(block_group, ffe_ctl);
3790 }
3791 
3792 /*
3793  * Tree-log block group locking
3794  * ============================
3795  *
3796  * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which
3797  * indicates the starting address of a block group, which is reserved only
3798  * for tree-log metadata.
3799  *
3800  * Lock nesting
3801  * ============
3802  *
3803  * space_info::lock
3804  *   block_group::lock
3805  *     fs_info::treelog_bg_lock
3806  */
3807 
3808 /*
3809  * Simple allocator for sequential-only block group. It only allows sequential
3810  * allocation. No need to play with trees. This function also reserves the
3811  * bytes as in btrfs_add_reserved_bytes.
3812  */
do_allocation_zoned(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3813 static int do_allocation_zoned(struct btrfs_block_group *block_group,
3814 			       struct find_free_extent_ctl *ffe_ctl,
3815 			       struct btrfs_block_group **bg_ret)
3816 {
3817 	struct btrfs_fs_info *fs_info = block_group->fs_info;
3818 	struct btrfs_space_info *space_info = block_group->space_info;
3819 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3820 	u64 start = block_group->start;
3821 	u64 num_bytes = ffe_ctl->num_bytes;
3822 	u64 avail;
3823 	u64 bytenr = block_group->start;
3824 	u64 log_bytenr;
3825 	u64 data_reloc_bytenr;
3826 	int ret = 0;
3827 	bool skip = false;
3828 
3829 	ASSERT(btrfs_is_zoned(block_group->fs_info));
3830 
3831 	/*
3832 	 * Do not allow non-tree-log blocks in the dedicated tree-log block
3833 	 * group, and vice versa.
3834 	 */
3835 	spin_lock(&fs_info->treelog_bg_lock);
3836 	log_bytenr = fs_info->treelog_bg;
3837 	if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) ||
3838 			   (!ffe_ctl->for_treelog && bytenr == log_bytenr)))
3839 		skip = true;
3840 	spin_unlock(&fs_info->treelog_bg_lock);
3841 	if (skip)
3842 		return 1;
3843 
3844 	/*
3845 	 * Do not allow non-relocation blocks in the dedicated relocation block
3846 	 * group, and vice versa.
3847 	 */
3848 	spin_lock(&fs_info->relocation_bg_lock);
3849 	data_reloc_bytenr = fs_info->data_reloc_bg;
3850 	if (data_reloc_bytenr &&
3851 	    ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) ||
3852 	     (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr)))
3853 		skip = true;
3854 	spin_unlock(&fs_info->relocation_bg_lock);
3855 	if (skip)
3856 		return 1;
3857 
3858 	/* Check RO and no space case before trying to activate it */
3859 	spin_lock(&block_group->lock);
3860 	if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) {
3861 		ret = 1;
3862 		/*
3863 		 * May need to clear fs_info->{treelog,data_reloc}_bg.
3864 		 * Return the error after taking the locks.
3865 		 */
3866 	}
3867 	spin_unlock(&block_group->lock);
3868 
3869 	/* Metadata block group is activated at write time. */
3870 	if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
3871 	    !btrfs_zone_activate(block_group)) {
3872 		ret = 1;
3873 		/*
3874 		 * May need to clear fs_info->{treelog,data_reloc}_bg.
3875 		 * Return the error after taking the locks.
3876 		 */
3877 	}
3878 
3879 	spin_lock(&space_info->lock);
3880 	spin_lock(&block_group->lock);
3881 	spin_lock(&fs_info->treelog_bg_lock);
3882 	spin_lock(&fs_info->relocation_bg_lock);
3883 
3884 	if (ret)
3885 		goto out;
3886 
3887 	ASSERT(!ffe_ctl->for_treelog ||
3888 	       block_group->start == fs_info->treelog_bg ||
3889 	       fs_info->treelog_bg == 0);
3890 	ASSERT(!ffe_ctl->for_data_reloc ||
3891 	       block_group->start == fs_info->data_reloc_bg ||
3892 	       fs_info->data_reloc_bg == 0);
3893 
3894 	if (block_group->ro ||
3895 	    (!ffe_ctl->for_data_reloc &&
3896 	     test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
3897 		ret = 1;
3898 		goto out;
3899 	}
3900 
3901 	/*
3902 	 * Do not allow currently using block group to be tree-log dedicated
3903 	 * block group.
3904 	 */
3905 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
3906 	    (block_group->used || block_group->reserved)) {
3907 		ret = 1;
3908 		goto out;
3909 	}
3910 
3911 	/*
3912 	 * Do not allow currently used block group to be the data relocation
3913 	 * dedicated block group.
3914 	 */
3915 	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
3916 	    (block_group->used || block_group->reserved)) {
3917 		ret = 1;
3918 		goto out;
3919 	}
3920 
3921 	WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
3922 	avail = block_group->zone_capacity - block_group->alloc_offset;
3923 	if (avail < num_bytes) {
3924 		if (ffe_ctl->max_extent_size < avail) {
3925 			/*
3926 			 * With sequential allocator, free space is always
3927 			 * contiguous
3928 			 */
3929 			ffe_ctl->max_extent_size = avail;
3930 			ffe_ctl->total_free_space = avail;
3931 		}
3932 		ret = 1;
3933 		goto out;
3934 	}
3935 
3936 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
3937 		fs_info->treelog_bg = block_group->start;
3938 
3939 	if (ffe_ctl->for_data_reloc) {
3940 		if (!fs_info->data_reloc_bg)
3941 			fs_info->data_reloc_bg = block_group->start;
3942 		/*
3943 		 * Do not allow allocations from this block group, unless it is
3944 		 * for data relocation. Compared to increasing the ->ro, setting
3945 		 * the ->zoned_data_reloc_ongoing flag still allows nocow
3946 		 * writers to come in. See btrfs_inc_nocow_writers().
3947 		 *
3948 		 * We need to disable an allocation to avoid an allocation of
3949 		 * regular (non-relocation data) extent. With mix of relocation
3950 		 * extents and regular extents, we can dispatch WRITE commands
3951 		 * (for relocation extents) and ZONE APPEND commands (for
3952 		 * regular extents) at the same time to the same zone, which
3953 		 * easily break the write pointer.
3954 		 *
3955 		 * Also, this flag avoids this block group to be zone finished.
3956 		 */
3957 		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
3958 	}
3959 
3960 	ffe_ctl->found_offset = start + block_group->alloc_offset;
3961 	block_group->alloc_offset += num_bytes;
3962 	spin_lock(&ctl->tree_lock);
3963 	ctl->free_space -= num_bytes;
3964 	spin_unlock(&ctl->tree_lock);
3965 
3966 	/*
3967 	 * We do not check if found_offset is aligned to stripesize. The
3968 	 * address is anyway rewritten when using zone append writing.
3969 	 */
3970 
3971 	ffe_ctl->search_start = ffe_ctl->found_offset;
3972 
3973 out:
3974 	if (ret && ffe_ctl->for_treelog)
3975 		fs_info->treelog_bg = 0;
3976 	if (ret && ffe_ctl->for_data_reloc)
3977 		fs_info->data_reloc_bg = 0;
3978 	spin_unlock(&fs_info->relocation_bg_lock);
3979 	spin_unlock(&fs_info->treelog_bg_lock);
3980 	spin_unlock(&block_group->lock);
3981 	spin_unlock(&space_info->lock);
3982 	return ret;
3983 }
3984 
do_allocation(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3985 static int do_allocation(struct btrfs_block_group *block_group,
3986 			 struct find_free_extent_ctl *ffe_ctl,
3987 			 struct btrfs_block_group **bg_ret)
3988 {
3989 	switch (ffe_ctl->policy) {
3990 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3991 		return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
3992 	case BTRFS_EXTENT_ALLOC_ZONED:
3993 		return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
3994 	default:
3995 		BUG();
3996 	}
3997 }
3998 
release_block_group(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,int delalloc)3999 static void release_block_group(struct btrfs_block_group *block_group,
4000 				struct find_free_extent_ctl *ffe_ctl,
4001 				int delalloc)
4002 {
4003 	switch (ffe_ctl->policy) {
4004 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4005 		ffe_ctl->retry_uncached = false;
4006 		break;
4007 	case BTRFS_EXTENT_ALLOC_ZONED:
4008 		/* Nothing to do */
4009 		break;
4010 	default:
4011 		BUG();
4012 	}
4013 
4014 	BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
4015 	       ffe_ctl->index);
4016 	btrfs_release_block_group(block_group, delalloc);
4017 }
4018 
found_extent_clustered(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)4019 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
4020 				   struct btrfs_key *ins)
4021 {
4022 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
4023 
4024 	if (!ffe_ctl->use_cluster && last_ptr) {
4025 		spin_lock(&last_ptr->lock);
4026 		last_ptr->window_start = ins->objectid;
4027 		spin_unlock(&last_ptr->lock);
4028 	}
4029 }
4030 
found_extent(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)4031 static void found_extent(struct find_free_extent_ctl *ffe_ctl,
4032 			 struct btrfs_key *ins)
4033 {
4034 	switch (ffe_ctl->policy) {
4035 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4036 		found_extent_clustered(ffe_ctl, ins);
4037 		break;
4038 	case BTRFS_EXTENT_ALLOC_ZONED:
4039 		/* Nothing to do */
4040 		break;
4041 	default:
4042 		BUG();
4043 	}
4044 }
4045 
can_allocate_chunk_zoned(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4046 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
4047 				    struct find_free_extent_ctl *ffe_ctl)
4048 {
4049 	/* Block group's activeness is not a requirement for METADATA block groups. */
4050 	if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
4051 		return 0;
4052 
4053 	/* If we can activate new zone, just allocate a chunk and use it */
4054 	if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
4055 		return 0;
4056 
4057 	/*
4058 	 * We already reached the max active zones. Try to finish one block
4059 	 * group to make a room for a new block group. This is only possible
4060 	 * for a data block group because btrfs_zone_finish() may need to wait
4061 	 * for a running transaction which can cause a deadlock for metadata
4062 	 * allocation.
4063 	 */
4064 	if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
4065 		int ret = btrfs_zone_finish_one_bg(fs_info);
4066 
4067 		if (ret == 1)
4068 			return 0;
4069 		else if (ret < 0)
4070 			return ret;
4071 	}
4072 
4073 	/*
4074 	 * If we have enough free space left in an already active block group
4075 	 * and we can't activate any other zone now, do not allow allocating a
4076 	 * new chunk and let find_free_extent() retry with a smaller size.
4077 	 */
4078 	if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
4079 		return -ENOSPC;
4080 
4081 	/*
4082 	 * Even min_alloc_size is not left in any block groups. Since we cannot
4083 	 * activate a new block group, allocating it may not help. Let's tell a
4084 	 * caller to try again and hope it progress something by writing some
4085 	 * parts of the region. That is only possible for data block groups,
4086 	 * where a part of the region can be written.
4087 	 */
4088 	if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
4089 		return -EAGAIN;
4090 
4091 	/*
4092 	 * We cannot activate a new block group and no enough space left in any
4093 	 * block groups. So, allocating a new block group may not help. But,
4094 	 * there is nothing to do anyway, so let's go with it.
4095 	 */
4096 	return 0;
4097 }
4098 
can_allocate_chunk(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4099 static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
4100 			      struct find_free_extent_ctl *ffe_ctl)
4101 {
4102 	switch (ffe_ctl->policy) {
4103 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4104 		return 0;
4105 	case BTRFS_EXTENT_ALLOC_ZONED:
4106 		return can_allocate_chunk_zoned(fs_info, ffe_ctl);
4107 	default:
4108 		BUG();
4109 	}
4110 }
4111 
4112 /*
4113  * Return >0 means caller needs to re-search for free extent
4114  * Return 0 means we have the needed free extent.
4115  * Return <0 means we failed to locate any free extent.
4116  */
find_free_extent_update_loop(struct btrfs_fs_info * fs_info,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl,bool full_search)4117 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
4118 					struct btrfs_key *ins,
4119 					struct find_free_extent_ctl *ffe_ctl,
4120 					bool full_search)
4121 {
4122 	struct btrfs_root *root = fs_info->chunk_root;
4123 	int ret;
4124 
4125 	if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
4126 	    ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
4127 		ffe_ctl->orig_have_caching_bg = true;
4128 
4129 	if (ins->objectid) {
4130 		found_extent(ffe_ctl, ins);
4131 		return 0;
4132 	}
4133 
4134 	if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
4135 		return 1;
4136 
4137 	ffe_ctl->index++;
4138 	if (ffe_ctl->index < BTRFS_NR_RAID_TYPES)
4139 		return 1;
4140 
4141 	/* See the comments for btrfs_loop_type for an explanation of the phases. */
4142 	if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
4143 		ffe_ctl->index = 0;
4144 		/*
4145 		 * We want to skip the LOOP_CACHING_WAIT step if we don't have
4146 		 * any uncached bgs and we've already done a full search
4147 		 * through.
4148 		 */
4149 		if (ffe_ctl->loop == LOOP_CACHING_NOWAIT &&
4150 		    (!ffe_ctl->orig_have_caching_bg && full_search))
4151 			ffe_ctl->loop++;
4152 		ffe_ctl->loop++;
4153 
4154 		if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
4155 			struct btrfs_trans_handle *trans;
4156 			int exist = 0;
4157 
4158 			/* Check if allocation policy allows to create a new chunk */
4159 			ret = can_allocate_chunk(fs_info, ffe_ctl);
4160 			if (ret)
4161 				return ret;
4162 
4163 			trans = current->journal_info;
4164 			if (trans)
4165 				exist = 1;
4166 			else
4167 				trans = btrfs_join_transaction(root);
4168 
4169 			if (IS_ERR(trans)) {
4170 				ret = PTR_ERR(trans);
4171 				return ret;
4172 			}
4173 
4174 			ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
4175 						CHUNK_ALLOC_FORCE_FOR_EXTENT);
4176 
4177 			/* Do not bail out on ENOSPC since we can do more. */
4178 			if (ret == -ENOSPC) {
4179 				ret = 0;
4180 				ffe_ctl->loop++;
4181 			}
4182 			else if (ret < 0)
4183 				btrfs_abort_transaction(trans, ret);
4184 			else
4185 				ret = 0;
4186 			if (!exist)
4187 				btrfs_end_transaction(trans);
4188 			if (ret)
4189 				return ret;
4190 		}
4191 
4192 		if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
4193 			if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED)
4194 				return -ENOSPC;
4195 
4196 			/*
4197 			 * Don't loop again if we already have no empty_size and
4198 			 * no empty_cluster.
4199 			 */
4200 			if (ffe_ctl->empty_size == 0 &&
4201 			    ffe_ctl->empty_cluster == 0)
4202 				return -ENOSPC;
4203 			ffe_ctl->empty_size = 0;
4204 			ffe_ctl->empty_cluster = 0;
4205 		}
4206 		return 1;
4207 	}
4208 	return -ENOSPC;
4209 }
4210 
find_free_extent_check_size_class(struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group * bg)4211 static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl,
4212 					      struct btrfs_block_group *bg)
4213 {
4214 	if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
4215 		return true;
4216 	if (!btrfs_block_group_should_use_size_class(bg))
4217 		return true;
4218 	if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
4219 		return true;
4220 	if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
4221 	    bg->size_class == BTRFS_BG_SZ_NONE)
4222 		return true;
4223 	return ffe_ctl->size_class == bg->size_class;
4224 }
4225 
prepare_allocation_clustered(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4226 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
4227 					struct find_free_extent_ctl *ffe_ctl,
4228 					struct btrfs_space_info *space_info,
4229 					struct btrfs_key *ins)
4230 {
4231 	/*
4232 	 * If our free space is heavily fragmented we may not be able to make
4233 	 * big contiguous allocations, so instead of doing the expensive search
4234 	 * for free space, simply return ENOSPC with our max_extent_size so we
4235 	 * can go ahead and search for a more manageable chunk.
4236 	 *
4237 	 * If our max_extent_size is large enough for our allocation simply
4238 	 * disable clustering since we will likely not be able to find enough
4239 	 * space to create a cluster and induce latency trying.
4240 	 */
4241 	if (space_info->max_extent_size) {
4242 		spin_lock(&space_info->lock);
4243 		if (space_info->max_extent_size &&
4244 		    ffe_ctl->num_bytes > space_info->max_extent_size) {
4245 			ins->offset = space_info->max_extent_size;
4246 			spin_unlock(&space_info->lock);
4247 			return -ENOSPC;
4248 		} else if (space_info->max_extent_size) {
4249 			ffe_ctl->use_cluster = false;
4250 		}
4251 		spin_unlock(&space_info->lock);
4252 	}
4253 
4254 	ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
4255 					       &ffe_ctl->empty_cluster);
4256 	if (ffe_ctl->last_ptr) {
4257 		struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
4258 
4259 		spin_lock(&last_ptr->lock);
4260 		if (last_ptr->block_group)
4261 			ffe_ctl->hint_byte = last_ptr->window_start;
4262 		if (last_ptr->fragmented) {
4263 			/*
4264 			 * We still set window_start so we can keep track of the
4265 			 * last place we found an allocation to try and save
4266 			 * some time.
4267 			 */
4268 			ffe_ctl->hint_byte = last_ptr->window_start;
4269 			ffe_ctl->use_cluster = false;
4270 		}
4271 		spin_unlock(&last_ptr->lock);
4272 	}
4273 
4274 	return 0;
4275 }
4276 
prepare_allocation_zoned(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4277 static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
4278 				    struct find_free_extent_ctl *ffe_ctl)
4279 {
4280 	if (ffe_ctl->for_treelog) {
4281 		spin_lock(&fs_info->treelog_bg_lock);
4282 		if (fs_info->treelog_bg)
4283 			ffe_ctl->hint_byte = fs_info->treelog_bg;
4284 		spin_unlock(&fs_info->treelog_bg_lock);
4285 	} else if (ffe_ctl->for_data_reloc) {
4286 		spin_lock(&fs_info->relocation_bg_lock);
4287 		if (fs_info->data_reloc_bg)
4288 			ffe_ctl->hint_byte = fs_info->data_reloc_bg;
4289 		spin_unlock(&fs_info->relocation_bg_lock);
4290 	} else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
4291 		struct btrfs_block_group *block_group;
4292 
4293 		spin_lock(&fs_info->zone_active_bgs_lock);
4294 		list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
4295 			/*
4296 			 * No lock is OK here because avail is monotinically
4297 			 * decreasing, and this is just a hint.
4298 			 */
4299 			u64 avail = block_group->zone_capacity - block_group->alloc_offset;
4300 
4301 			if (block_group_bits(block_group, ffe_ctl->flags) &&
4302 			    avail >= ffe_ctl->num_bytes) {
4303 				ffe_ctl->hint_byte = block_group->start;
4304 				break;
4305 			}
4306 		}
4307 		spin_unlock(&fs_info->zone_active_bgs_lock);
4308 	}
4309 
4310 	return 0;
4311 }
4312 
prepare_allocation(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4313 static int prepare_allocation(struct btrfs_fs_info *fs_info,
4314 			      struct find_free_extent_ctl *ffe_ctl,
4315 			      struct btrfs_space_info *space_info,
4316 			      struct btrfs_key *ins)
4317 {
4318 	switch (ffe_ctl->policy) {
4319 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4320 		return prepare_allocation_clustered(fs_info, ffe_ctl,
4321 						    space_info, ins);
4322 	case BTRFS_EXTENT_ALLOC_ZONED:
4323 		return prepare_allocation_zoned(fs_info, ffe_ctl);
4324 	default:
4325 		BUG();
4326 	}
4327 }
4328 
4329 /*
4330  * walks the btree of allocated extents and find a hole of a given size.
4331  * The key ins is changed to record the hole:
4332  * ins->objectid == start position
4333  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4334  * ins->offset == the size of the hole.
4335  * Any available blocks before search_start are skipped.
4336  *
4337  * If there is no suitable free space, we will record the max size of
4338  * the free space extent currently.
4339  *
4340  * The overall logic and call chain:
4341  *
4342  * find_free_extent()
4343  * |- Iterate through all block groups
4344  * |  |- Get a valid block group
4345  * |  |- Try to do clustered allocation in that block group
4346  * |  |- Try to do unclustered allocation in that block group
4347  * |  |- Check if the result is valid
4348  * |  |  |- If valid, then exit
4349  * |  |- Jump to next block group
4350  * |
4351  * |- Push harder to find free extents
4352  *    |- If not found, re-iterate all block groups
4353  */
find_free_extent(struct btrfs_root * root,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl)4354 static noinline int find_free_extent(struct btrfs_root *root,
4355 				     struct btrfs_key *ins,
4356 				     struct find_free_extent_ctl *ffe_ctl)
4357 {
4358 	struct btrfs_fs_info *fs_info = root->fs_info;
4359 	int ret = 0;
4360 	int cache_block_group_error = 0;
4361 	struct btrfs_block_group *block_group = NULL;
4362 	struct btrfs_space_info *space_info;
4363 	bool full_search = false;
4364 
4365 	WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize);
4366 
4367 	ffe_ctl->search_start = 0;
4368 	/* For clustered allocation */
4369 	ffe_ctl->empty_cluster = 0;
4370 	ffe_ctl->last_ptr = NULL;
4371 	ffe_ctl->use_cluster = true;
4372 	ffe_ctl->have_caching_bg = false;
4373 	ffe_ctl->orig_have_caching_bg = false;
4374 	ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
4375 	ffe_ctl->loop = 0;
4376 	ffe_ctl->retry_uncached = false;
4377 	ffe_ctl->cached = 0;
4378 	ffe_ctl->max_extent_size = 0;
4379 	ffe_ctl->total_free_space = 0;
4380 	ffe_ctl->found_offset = 0;
4381 	ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
4382 	ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes);
4383 
4384 	if (btrfs_is_zoned(fs_info))
4385 		ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED;
4386 
4387 	ins->type = BTRFS_EXTENT_ITEM_KEY;
4388 	ins->objectid = 0;
4389 	ins->offset = 0;
4390 
4391 	trace_find_free_extent(root, ffe_ctl);
4392 
4393 	space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags);
4394 	if (!space_info) {
4395 		btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags);
4396 		return -ENOSPC;
4397 	}
4398 
4399 	ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins);
4400 	if (ret < 0)
4401 		return ret;
4402 
4403 	ffe_ctl->search_start = max(ffe_ctl->search_start,
4404 				    first_logical_byte(fs_info));
4405 	ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte);
4406 	if (ffe_ctl->search_start == ffe_ctl->hint_byte) {
4407 		block_group = btrfs_lookup_block_group(fs_info,
4408 						       ffe_ctl->search_start);
4409 		/*
4410 		 * we don't want to use the block group if it doesn't match our
4411 		 * allocation bits, or if its not cached.
4412 		 *
4413 		 * However if we are re-searching with an ideal block group
4414 		 * picked out then we don't care that the block group is cached.
4415 		 */
4416 		if (block_group && block_group_bits(block_group, ffe_ctl->flags) &&
4417 		    block_group->cached != BTRFS_CACHE_NO) {
4418 			down_read(&space_info->groups_sem);
4419 			if (list_empty(&block_group->list) ||
4420 			    block_group->ro) {
4421 				/*
4422 				 * someone is removing this block group,
4423 				 * we can't jump into the have_block_group
4424 				 * target because our list pointers are not
4425 				 * valid
4426 				 */
4427 				btrfs_put_block_group(block_group);
4428 				up_read(&space_info->groups_sem);
4429 			} else {
4430 				ffe_ctl->index = btrfs_bg_flags_to_raid_index(
4431 							block_group->flags);
4432 				btrfs_lock_block_group(block_group,
4433 						       ffe_ctl->delalloc);
4434 				ffe_ctl->hinted = true;
4435 				goto have_block_group;
4436 			}
4437 		} else if (block_group) {
4438 			btrfs_put_block_group(block_group);
4439 		}
4440 	}
4441 search:
4442 	trace_find_free_extent_search_loop(root, ffe_ctl);
4443 	ffe_ctl->have_caching_bg = false;
4444 	if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) ||
4445 	    ffe_ctl->index == 0)
4446 		full_search = true;
4447 	down_read(&space_info->groups_sem);
4448 	list_for_each_entry(block_group,
4449 			    &space_info->block_groups[ffe_ctl->index], list) {
4450 		struct btrfs_block_group *bg_ret;
4451 
4452 		ffe_ctl->hinted = false;
4453 		/* If the block group is read-only, we can skip it entirely. */
4454 		if (unlikely(block_group->ro)) {
4455 			if (ffe_ctl->for_treelog)
4456 				btrfs_clear_treelog_bg(block_group);
4457 			if (ffe_ctl->for_data_reloc)
4458 				btrfs_clear_data_reloc_bg(block_group);
4459 			continue;
4460 		}
4461 
4462 		btrfs_grab_block_group(block_group, ffe_ctl->delalloc);
4463 		ffe_ctl->search_start = block_group->start;
4464 
4465 		/*
4466 		 * this can happen if we end up cycling through all the
4467 		 * raid types, but we want to make sure we only allocate
4468 		 * for the proper type.
4469 		 */
4470 		if (!block_group_bits(block_group, ffe_ctl->flags)) {
4471 			u64 extra = BTRFS_BLOCK_GROUP_DUP |
4472 				BTRFS_BLOCK_GROUP_RAID1_MASK |
4473 				BTRFS_BLOCK_GROUP_RAID56_MASK |
4474 				BTRFS_BLOCK_GROUP_RAID10;
4475 
4476 			/*
4477 			 * if they asked for extra copies and this block group
4478 			 * doesn't provide them, bail.  This does allow us to
4479 			 * fill raid0 from raid1.
4480 			 */
4481 			if ((ffe_ctl->flags & extra) && !(block_group->flags & extra))
4482 				goto loop;
4483 
4484 			/*
4485 			 * This block group has different flags than we want.
4486 			 * It's possible that we have MIXED_GROUP flag but no
4487 			 * block group is mixed.  Just skip such block group.
4488 			 */
4489 			btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4490 			continue;
4491 		}
4492 
4493 have_block_group:
4494 		trace_find_free_extent_have_block_group(root, ffe_ctl, block_group);
4495 		ffe_ctl->cached = btrfs_block_group_done(block_group);
4496 		if (unlikely(!ffe_ctl->cached)) {
4497 			ffe_ctl->have_caching_bg = true;
4498 			ret = btrfs_cache_block_group(block_group, false);
4499 
4500 			/*
4501 			 * If we get ENOMEM here or something else we want to
4502 			 * try other block groups, because it may not be fatal.
4503 			 * However if we can't find anything else we need to
4504 			 * save our return here so that we return the actual
4505 			 * error that caused problems, not ENOSPC.
4506 			 */
4507 			if (ret < 0) {
4508 				if (!cache_block_group_error)
4509 					cache_block_group_error = ret;
4510 				ret = 0;
4511 				goto loop;
4512 			}
4513 			ret = 0;
4514 		}
4515 
4516 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
4517 			if (!cache_block_group_error)
4518 				cache_block_group_error = -EIO;
4519 			goto loop;
4520 		}
4521 
4522 		if (!find_free_extent_check_size_class(ffe_ctl, block_group))
4523 			goto loop;
4524 
4525 		bg_ret = NULL;
4526 		ret = do_allocation(block_group, ffe_ctl, &bg_ret);
4527 		if (ret > 0)
4528 			goto loop;
4529 
4530 		if (bg_ret && bg_ret != block_group) {
4531 			btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4532 			block_group = bg_ret;
4533 		}
4534 
4535 		/* Checks */
4536 		ffe_ctl->search_start = round_up(ffe_ctl->found_offset,
4537 						 fs_info->stripesize);
4538 
4539 		/* move on to the next group */
4540 		if (ffe_ctl->search_start + ffe_ctl->num_bytes >
4541 		    block_group->start + block_group->length) {
4542 			btrfs_add_free_space_unused(block_group,
4543 					    ffe_ctl->found_offset,
4544 					    ffe_ctl->num_bytes);
4545 			goto loop;
4546 		}
4547 
4548 		if (ffe_ctl->found_offset < ffe_ctl->search_start)
4549 			btrfs_add_free_space_unused(block_group,
4550 					ffe_ctl->found_offset,
4551 					ffe_ctl->search_start - ffe_ctl->found_offset);
4552 
4553 		ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes,
4554 					       ffe_ctl->num_bytes,
4555 					       ffe_ctl->delalloc,
4556 					       ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS);
4557 		if (ret == -EAGAIN) {
4558 			btrfs_add_free_space_unused(block_group,
4559 					ffe_ctl->found_offset,
4560 					ffe_ctl->num_bytes);
4561 			goto loop;
4562 		}
4563 		btrfs_inc_block_group_reservations(block_group);
4564 
4565 		/* we are all good, lets return */
4566 		ins->objectid = ffe_ctl->search_start;
4567 		ins->offset = ffe_ctl->num_bytes;
4568 
4569 		trace_btrfs_reserve_extent(block_group, ffe_ctl);
4570 		btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4571 		break;
4572 loop:
4573 		if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
4574 		    !ffe_ctl->retry_uncached) {
4575 			ffe_ctl->retry_uncached = true;
4576 			btrfs_wait_block_group_cache_progress(block_group,
4577 						ffe_ctl->num_bytes +
4578 						ffe_ctl->empty_cluster +
4579 						ffe_ctl->empty_size);
4580 			goto have_block_group;
4581 		}
4582 		release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
4583 		cond_resched();
4584 	}
4585 	up_read(&space_info->groups_sem);
4586 
4587 	ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search);
4588 	if (ret > 0)
4589 		goto search;
4590 
4591 	if (ret == -ENOSPC && !cache_block_group_error) {
4592 		/*
4593 		 * Use ffe_ctl->total_free_space as fallback if we can't find
4594 		 * any contiguous hole.
4595 		 */
4596 		if (!ffe_ctl->max_extent_size)
4597 			ffe_ctl->max_extent_size = ffe_ctl->total_free_space;
4598 		spin_lock(&space_info->lock);
4599 		space_info->max_extent_size = ffe_ctl->max_extent_size;
4600 		spin_unlock(&space_info->lock);
4601 		ins->offset = ffe_ctl->max_extent_size;
4602 	} else if (ret == -ENOSPC) {
4603 		ret = cache_block_group_error;
4604 	}
4605 	return ret;
4606 }
4607 
4608 /*
4609  * Entry point to the extent allocator. Tries to find a hole that is at least
4610  * as big as @num_bytes.
4611  *
4612  * @root           -	The root that will contain this extent
4613  *
4614  * @ram_bytes      -	The amount of space in ram that @num_bytes take. This
4615  *			is used for accounting purposes. This value differs
4616  *			from @num_bytes only in the case of compressed extents.
4617  *
4618  * @num_bytes      -	Number of bytes to allocate on-disk.
4619  *
4620  * @min_alloc_size -	Indicates the minimum amount of space that the
4621  *			allocator should try to satisfy. In some cases
4622  *			@num_bytes may be larger than what is required and if
4623  *			the filesystem is fragmented then allocation fails.
4624  *			However, the presence of @min_alloc_size gives a
4625  *			chance to try and satisfy the smaller allocation.
4626  *
4627  * @empty_size     -	A hint that you plan on doing more COW. This is the
4628  *			size in bytes the allocator should try to find free
4629  *			next to the block it returns.  This is just a hint and
4630  *			may be ignored by the allocator.
4631  *
4632  * @hint_byte      -	Hint to the allocator to start searching above the byte
4633  *			address passed. It might be ignored.
4634  *
4635  * @ins            -	This key is modified to record the found hole. It will
4636  *			have the following values:
4637  *			ins->objectid == start position
4638  *			ins->flags = BTRFS_EXTENT_ITEM_KEY
4639  *			ins->offset == the size of the hole.
4640  *
4641  * @is_data        -	Boolean flag indicating whether an extent is
4642  *			allocated for data (true) or metadata (false)
4643  *
4644  * @delalloc       -	Boolean flag indicating whether this allocation is for
4645  *			delalloc or not. If 'true' data_rwsem of block groups
4646  *			is going to be acquired.
4647  *
4648  *
4649  * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
4650  * case -ENOSPC is returned then @ins->offset will contain the size of the
4651  * largest available hole the allocator managed to find.
4652  */
btrfs_reserve_extent(struct btrfs_root * root,u64 ram_bytes,u64 num_bytes,u64 min_alloc_size,u64 empty_size,u64 hint_byte,struct btrfs_key * ins,int is_data,int delalloc)4653 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4654 			 u64 num_bytes, u64 min_alloc_size,
4655 			 u64 empty_size, u64 hint_byte,
4656 			 struct btrfs_key *ins, int is_data, int delalloc)
4657 {
4658 	struct btrfs_fs_info *fs_info = root->fs_info;
4659 	struct find_free_extent_ctl ffe_ctl = {};
4660 	bool final_tried = num_bytes == min_alloc_size;
4661 	u64 flags;
4662 	int ret;
4663 	bool for_treelog = (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID);
4664 	bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
4665 
4666 	flags = get_alloc_profile_by_root(root, is_data);
4667 again:
4668 	WARN_ON(num_bytes < fs_info->sectorsize);
4669 
4670 	ffe_ctl.ram_bytes = ram_bytes;
4671 	ffe_ctl.num_bytes = num_bytes;
4672 	ffe_ctl.min_alloc_size = min_alloc_size;
4673 	ffe_ctl.empty_size = empty_size;
4674 	ffe_ctl.flags = flags;
4675 	ffe_ctl.delalloc = delalloc;
4676 	ffe_ctl.hint_byte = hint_byte;
4677 	ffe_ctl.for_treelog = for_treelog;
4678 	ffe_ctl.for_data_reloc = for_data_reloc;
4679 
4680 	ret = find_free_extent(root, ins, &ffe_ctl);
4681 	if (!ret && !is_data) {
4682 		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
4683 	} else if (ret == -ENOSPC) {
4684 		if (!final_tried && ins->offset) {
4685 			num_bytes = min(num_bytes >> 1, ins->offset);
4686 			num_bytes = round_down(num_bytes,
4687 					       fs_info->sectorsize);
4688 			num_bytes = max(num_bytes, min_alloc_size);
4689 			ram_bytes = num_bytes;
4690 			if (num_bytes == min_alloc_size)
4691 				final_tried = true;
4692 			goto again;
4693 		} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4694 			struct btrfs_space_info *sinfo;
4695 
4696 			sinfo = btrfs_find_space_info(fs_info, flags);
4697 			btrfs_err(fs_info,
4698 	"allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
4699 				  flags, num_bytes, for_treelog, for_data_reloc);
4700 			if (sinfo)
4701 				btrfs_dump_space_info(fs_info, sinfo,
4702 						      num_bytes, 1);
4703 		}
4704 	}
4705 
4706 	return ret;
4707 }
4708 
btrfs_free_reserved_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len,int delalloc)4709 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
4710 			       u64 start, u64 len, int delalloc)
4711 {
4712 	struct btrfs_block_group *cache;
4713 
4714 	cache = btrfs_lookup_block_group(fs_info, start);
4715 	if (!cache) {
4716 		btrfs_err(fs_info, "Unable to find block group for %llu",
4717 			  start);
4718 		return -ENOSPC;
4719 	}
4720 
4721 	btrfs_add_free_space(cache, start, len);
4722 	btrfs_free_reserved_bytes(cache, len, delalloc);
4723 	trace_btrfs_reserved_extent_free(fs_info, start, len);
4724 
4725 	btrfs_put_block_group(cache);
4726 	return 0;
4727 }
4728 
btrfs_pin_reserved_extent(struct btrfs_trans_handle * trans,const struct extent_buffer * eb)4729 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
4730 			      const struct extent_buffer *eb)
4731 {
4732 	struct btrfs_block_group *cache;
4733 	int ret = 0;
4734 
4735 	cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
4736 	if (!cache) {
4737 		btrfs_err(trans->fs_info, "unable to find block group for %llu",
4738 			  eb->start);
4739 		return -ENOSPC;
4740 	}
4741 
4742 	ret = pin_down_extent(trans, cache, eb->start, eb->len, 1);
4743 	btrfs_put_block_group(cache);
4744 	return ret;
4745 }
4746 
alloc_reserved_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)4747 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr,
4748 				 u64 num_bytes)
4749 {
4750 	struct btrfs_fs_info *fs_info = trans->fs_info;
4751 	int ret;
4752 
4753 	ret = remove_from_free_space_tree(trans, bytenr, num_bytes);
4754 	if (ret)
4755 		return ret;
4756 
4757 	ret = btrfs_update_block_group(trans, bytenr, num_bytes, true);
4758 	if (ret) {
4759 		ASSERT(!ret);
4760 		btrfs_err(fs_info, "update block group failed for %llu %llu",
4761 			  bytenr, num_bytes);
4762 		return ret;
4763 	}
4764 
4765 	trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes);
4766 	return 0;
4767 }
4768 
alloc_reserved_file_extent(struct btrfs_trans_handle * trans,u64 parent,u64 root_objectid,u64 flags,u64 owner,u64 offset,struct btrfs_key * ins,int ref_mod,u64 oref_root)4769 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4770 				      u64 parent, u64 root_objectid,
4771 				      u64 flags, u64 owner, u64 offset,
4772 				      struct btrfs_key *ins, int ref_mod, u64 oref_root)
4773 {
4774 	struct btrfs_fs_info *fs_info = trans->fs_info;
4775 	struct btrfs_root *extent_root;
4776 	int ret;
4777 	struct btrfs_extent_item *extent_item;
4778 	struct btrfs_extent_owner_ref *oref;
4779 	struct btrfs_extent_inline_ref *iref;
4780 	struct btrfs_path *path;
4781 	struct extent_buffer *leaf;
4782 	int type;
4783 	u32 size;
4784 	const bool simple_quota = (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE);
4785 
4786 	if (parent > 0)
4787 		type = BTRFS_SHARED_DATA_REF_KEY;
4788 	else
4789 		type = BTRFS_EXTENT_DATA_REF_KEY;
4790 
4791 	size = sizeof(*extent_item);
4792 	if (simple_quota)
4793 		size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
4794 	size += btrfs_extent_inline_ref_size(type);
4795 
4796 	path = btrfs_alloc_path();
4797 	if (!path)
4798 		return -ENOMEM;
4799 
4800 	extent_root = btrfs_extent_root(fs_info, ins->objectid);
4801 	ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size);
4802 	if (ret) {
4803 		btrfs_free_path(path);
4804 		return ret;
4805 	}
4806 
4807 	leaf = path->nodes[0];
4808 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4809 				     struct btrfs_extent_item);
4810 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4811 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4812 	btrfs_set_extent_flags(leaf, extent_item,
4813 			       flags | BTRFS_EXTENT_FLAG_DATA);
4814 
4815 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4816 	if (simple_quota) {
4817 		btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_EXTENT_OWNER_REF_KEY);
4818 		oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
4819 		btrfs_set_extent_owner_ref_root_id(leaf, oref, oref_root);
4820 		iref = (struct btrfs_extent_inline_ref *)(oref + 1);
4821 	}
4822 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
4823 
4824 	if (parent > 0) {
4825 		struct btrfs_shared_data_ref *ref;
4826 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
4827 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4828 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4829 	} else {
4830 		struct btrfs_extent_data_ref *ref;
4831 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4832 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4833 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4834 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4835 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4836 	}
4837 
4838 	btrfs_free_path(path);
4839 
4840 	return alloc_reserved_extent(trans, ins->objectid, ins->offset);
4841 }
4842 
alloc_reserved_tree_block(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)4843 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4844 				     struct btrfs_delayed_ref_node *node,
4845 				     struct btrfs_delayed_extent_op *extent_op)
4846 {
4847 	struct btrfs_fs_info *fs_info = trans->fs_info;
4848 	struct btrfs_root *extent_root;
4849 	int ret;
4850 	struct btrfs_extent_item *extent_item;
4851 	struct btrfs_key extent_key;
4852 	struct btrfs_tree_block_info *block_info;
4853 	struct btrfs_extent_inline_ref *iref;
4854 	struct btrfs_path *path;
4855 	struct extent_buffer *leaf;
4856 	u32 size = sizeof(*extent_item) + sizeof(*iref);
4857 	const u64 flags = (extent_op ? extent_op->flags_to_set : 0);
4858 	/* The owner of a tree block is the level. */
4859 	int level = btrfs_delayed_ref_owner(node);
4860 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4861 
4862 	extent_key.objectid = node->bytenr;
4863 	if (skinny_metadata) {
4864 		/* The owner of a tree block is the level. */
4865 		extent_key.offset = level;
4866 		extent_key.type = BTRFS_METADATA_ITEM_KEY;
4867 	} else {
4868 		extent_key.offset = node->num_bytes;
4869 		extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4870 		size += sizeof(*block_info);
4871 	}
4872 
4873 	path = btrfs_alloc_path();
4874 	if (!path)
4875 		return -ENOMEM;
4876 
4877 	extent_root = btrfs_extent_root(fs_info, extent_key.objectid);
4878 	ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key,
4879 				      size);
4880 	if (ret) {
4881 		btrfs_free_path(path);
4882 		return ret;
4883 	}
4884 
4885 	leaf = path->nodes[0];
4886 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4887 				     struct btrfs_extent_item);
4888 	btrfs_set_extent_refs(leaf, extent_item, 1);
4889 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4890 	btrfs_set_extent_flags(leaf, extent_item,
4891 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4892 
4893 	if (skinny_metadata) {
4894 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4895 	} else {
4896 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4897 		btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
4898 		btrfs_set_tree_block_level(leaf, block_info, level);
4899 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4900 	}
4901 
4902 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
4903 		btrfs_set_extent_inline_ref_type(leaf, iref,
4904 						 BTRFS_SHARED_BLOCK_REF_KEY);
4905 		btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
4906 	} else {
4907 		btrfs_set_extent_inline_ref_type(leaf, iref,
4908 						 BTRFS_TREE_BLOCK_REF_KEY);
4909 		btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
4910 	}
4911 
4912 	btrfs_free_path(path);
4913 
4914 	return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
4915 }
4916 
btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 owner,u64 offset,u64 ram_bytes,struct btrfs_key * ins)4917 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4918 				     struct btrfs_root *root, u64 owner,
4919 				     u64 offset, u64 ram_bytes,
4920 				     struct btrfs_key *ins)
4921 {
4922 	struct btrfs_ref generic_ref = {
4923 		.action = BTRFS_ADD_DELAYED_EXTENT,
4924 		.bytenr = ins->objectid,
4925 		.num_bytes = ins->offset,
4926 		.owning_root = btrfs_root_id(root),
4927 		.ref_root = btrfs_root_id(root),
4928 	};
4929 
4930 	ASSERT(generic_ref.ref_root != BTRFS_TREE_LOG_OBJECTID);
4931 
4932 	if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root))
4933 		generic_ref.owning_root = root->relocation_src_root;
4934 
4935 	btrfs_init_data_ref(&generic_ref, owner, offset, 0, false);
4936 	btrfs_ref_tree_mod(root->fs_info, &generic_ref);
4937 
4938 	return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
4939 }
4940 
4941 /*
4942  * this is used by the tree logging recovery code.  It records that
4943  * an extent has been allocated and makes sure to clear the free
4944  * space cache bits as well
4945  */
btrfs_alloc_logged_file_extent(struct btrfs_trans_handle * trans,u64 root_objectid,u64 owner,u64 offset,struct btrfs_key * ins)4946 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4947 				   u64 root_objectid, u64 owner, u64 offset,
4948 				   struct btrfs_key *ins)
4949 {
4950 	struct btrfs_fs_info *fs_info = trans->fs_info;
4951 	int ret;
4952 	struct btrfs_block_group *block_group;
4953 	struct btrfs_space_info *space_info;
4954 	struct btrfs_squota_delta delta = {
4955 		.root = root_objectid,
4956 		.num_bytes = ins->offset,
4957 		.generation = trans->transid,
4958 		.is_data = true,
4959 		.is_inc = true,
4960 	};
4961 
4962 	/*
4963 	 * Mixed block groups will exclude before processing the log so we only
4964 	 * need to do the exclude dance if this fs isn't mixed.
4965 	 */
4966 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
4967 		ret = __exclude_logged_extent(fs_info, ins->objectid,
4968 					      ins->offset);
4969 		if (ret)
4970 			return ret;
4971 	}
4972 
4973 	block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
4974 	if (!block_group)
4975 		return -EINVAL;
4976 
4977 	space_info = block_group->space_info;
4978 	spin_lock(&space_info->lock);
4979 	spin_lock(&block_group->lock);
4980 	space_info->bytes_reserved += ins->offset;
4981 	block_group->reserved += ins->offset;
4982 	spin_unlock(&block_group->lock);
4983 	spin_unlock(&space_info->lock);
4984 
4985 	ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
4986 					 offset, ins, 1, root_objectid);
4987 	if (ret)
4988 		btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
4989 	ret = btrfs_record_squota_delta(fs_info, &delta);
4990 	btrfs_put_block_group(block_group);
4991 	return ret;
4992 }
4993 
4994 #ifdef CONFIG_BTRFS_DEBUG
4995 /*
4996  * Extra safety check in case the extent tree is corrupted and extent allocator
4997  * chooses to use a tree block which is already used and locked.
4998  */
check_eb_lock_owner(const struct extent_buffer * eb)4999 static bool check_eb_lock_owner(const struct extent_buffer *eb)
5000 {
5001 	if (eb->lock_owner == current->pid) {
5002 		btrfs_err_rl(eb->fs_info,
5003 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
5004 			     eb->start, btrfs_header_owner(eb), current->pid);
5005 		return true;
5006 	}
5007 	return false;
5008 }
5009 #else
check_eb_lock_owner(struct extent_buffer * eb)5010 static bool check_eb_lock_owner(struct extent_buffer *eb)
5011 {
5012 	return false;
5013 }
5014 #endif
5015 
5016 static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,int level,u64 owner,enum btrfs_lock_nesting nest)5017 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5018 		      u64 bytenr, int level, u64 owner,
5019 		      enum btrfs_lock_nesting nest)
5020 {
5021 	struct btrfs_fs_info *fs_info = root->fs_info;
5022 	struct extent_buffer *buf;
5023 	u64 lockdep_owner = owner;
5024 
5025 	buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
5026 	if (IS_ERR(buf))
5027 		return buf;
5028 
5029 	if (check_eb_lock_owner(buf)) {
5030 		free_extent_buffer(buf);
5031 		return ERR_PTR(-EUCLEAN);
5032 	}
5033 
5034 	/*
5035 	 * The reloc trees are just snapshots, so we need them to appear to be
5036 	 * just like any other fs tree WRT lockdep.
5037 	 *
5038 	 * The exception however is in replace_path() in relocation, where we
5039 	 * hold the lock on the original fs root and then search for the reloc
5040 	 * root.  At that point we need to make sure any reloc root buffers are
5041 	 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
5042 	 * lockdep happy.
5043 	 */
5044 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
5045 	    !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
5046 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
5047 
5048 	/* btrfs_clear_buffer_dirty() accesses generation field. */
5049 	btrfs_set_header_generation(buf, trans->transid);
5050 
5051 	/*
5052 	 * This needs to stay, because we could allocate a freed block from an
5053 	 * old tree into a new tree, so we need to make sure this new block is
5054 	 * set to the appropriate level and owner.
5055 	 */
5056 	btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
5057 
5058 	btrfs_tree_lock_nested(buf, nest);
5059 	btrfs_clear_buffer_dirty(trans, buf);
5060 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
5061 	clear_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &buf->bflags);
5062 
5063 	set_extent_buffer_uptodate(buf);
5064 
5065 	memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
5066 	btrfs_set_header_level(buf, level);
5067 	btrfs_set_header_bytenr(buf, buf->start);
5068 	btrfs_set_header_generation(buf, trans->transid);
5069 	btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
5070 	btrfs_set_header_owner(buf, owner);
5071 	write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
5072 	write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
5073 	if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) {
5074 		buf->log_index = root->log_transid % 2;
5075 		/*
5076 		 * we allow two log transactions at a time, use different
5077 		 * EXTENT bit to differentiate dirty pages.
5078 		 */
5079 		if (buf->log_index == 0)
5080 			set_extent_bit(&root->dirty_log_pages, buf->start,
5081 				       buf->start + buf->len - 1,
5082 				       EXTENT_DIRTY, NULL);
5083 		else
5084 			set_extent_bit(&root->dirty_log_pages, buf->start,
5085 				       buf->start + buf->len - 1,
5086 				       EXTENT_NEW, NULL);
5087 	} else {
5088 		buf->log_index = -1;
5089 		set_extent_bit(&trans->transaction->dirty_pages, buf->start,
5090 			       buf->start + buf->len - 1, EXTENT_DIRTY, NULL);
5091 	}
5092 	/* this returns a buffer locked for blocking */
5093 	return buf;
5094 }
5095 
5096 /*
5097  * finds a free extent and does all the dirty work required for allocation
5098  * returns the tree buffer or an ERR_PTR on error.
5099  */
btrfs_alloc_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,const struct btrfs_disk_key * key,int level,u64 hint,u64 empty_size,u64 reloc_src_root,enum btrfs_lock_nesting nest)5100 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
5101 					     struct btrfs_root *root,
5102 					     u64 parent, u64 root_objectid,
5103 					     const struct btrfs_disk_key *key,
5104 					     int level, u64 hint,
5105 					     u64 empty_size,
5106 					     u64 reloc_src_root,
5107 					     enum btrfs_lock_nesting nest)
5108 {
5109 	struct btrfs_fs_info *fs_info = root->fs_info;
5110 	struct btrfs_key ins;
5111 	struct btrfs_block_rsv *block_rsv;
5112 	struct extent_buffer *buf;
5113 	u64 flags = 0;
5114 	int ret;
5115 	u32 blocksize = fs_info->nodesize;
5116 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
5117 	u64 owning_root;
5118 
5119 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5120 	if (btrfs_is_testing(fs_info)) {
5121 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
5122 					    level, root_objectid, nest);
5123 		if (!IS_ERR(buf))
5124 			root->alloc_bytenr += blocksize;
5125 		return buf;
5126 	}
5127 #endif
5128 
5129 	block_rsv = btrfs_use_block_rsv(trans, root, blocksize);
5130 	if (IS_ERR(block_rsv))
5131 		return ERR_CAST(block_rsv);
5132 
5133 	ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
5134 				   empty_size, hint, &ins, 0, 0);
5135 	if (ret)
5136 		goto out_unuse;
5137 
5138 	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
5139 				    root_objectid, nest);
5140 	if (IS_ERR(buf)) {
5141 		ret = PTR_ERR(buf);
5142 		goto out_free_reserved;
5143 	}
5144 	owning_root = btrfs_header_owner(buf);
5145 
5146 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5147 		if (parent == 0)
5148 			parent = ins.objectid;
5149 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5150 		owning_root = reloc_src_root;
5151 	} else
5152 		BUG_ON(parent > 0);
5153 
5154 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5155 		struct btrfs_delayed_extent_op *extent_op;
5156 		struct btrfs_ref generic_ref = {
5157 			.action = BTRFS_ADD_DELAYED_EXTENT,
5158 			.bytenr = ins.objectid,
5159 			.num_bytes = ins.offset,
5160 			.parent = parent,
5161 			.owning_root = owning_root,
5162 			.ref_root = root_objectid,
5163 		};
5164 
5165 		if (!skinny_metadata || flags != 0) {
5166 			extent_op = btrfs_alloc_delayed_extent_op();
5167 			if (!extent_op) {
5168 				ret = -ENOMEM;
5169 				goto out_free_buf;
5170 			}
5171 			if (key)
5172 				memcpy(&extent_op->key, key, sizeof(extent_op->key));
5173 			else
5174 				memset(&extent_op->key, 0, sizeof(extent_op->key));
5175 			extent_op->flags_to_set = flags;
5176 			extent_op->update_key = (skinny_metadata ? false : true);
5177 			extent_op->update_flags = (flags != 0);
5178 		} else {
5179 			extent_op = NULL;
5180 		}
5181 
5182 		btrfs_init_tree_ref(&generic_ref, level, btrfs_root_id(root), false);
5183 		btrfs_ref_tree_mod(fs_info, &generic_ref);
5184 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
5185 		if (ret) {
5186 			btrfs_free_delayed_extent_op(extent_op);
5187 			goto out_free_buf;
5188 		}
5189 	}
5190 	return buf;
5191 
5192 out_free_buf:
5193 	btrfs_tree_unlock(buf);
5194 	free_extent_buffer(buf);
5195 out_free_reserved:
5196 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
5197 out_unuse:
5198 	btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
5199 	return ERR_PTR(ret);
5200 }
5201 
5202 struct walk_control {
5203 	u64 refs[BTRFS_MAX_LEVEL];
5204 	u64 flags[BTRFS_MAX_LEVEL];
5205 	struct btrfs_key update_progress;
5206 	struct btrfs_key drop_progress;
5207 	int drop_level;
5208 	int stage;
5209 	int level;
5210 	int shared_level;
5211 	int update_ref;
5212 	int keep_locks;
5213 	int reada_slot;
5214 	int reada_count;
5215 	int restarted;
5216 	/* Indicate that extent info needs to be looked up when walking the tree. */
5217 	int lookup_info;
5218 };
5219 
5220 /*
5221  * This is our normal stage.  We are traversing blocks the current snapshot owns
5222  * and we are dropping any of our references to any children we are able to, and
5223  * then freeing the block once we've processed all of the children.
5224  */
5225 #define DROP_REFERENCE	1
5226 
5227 /*
5228  * We enter this stage when we have to walk into a child block (meaning we can't
5229  * simply drop our reference to it from our current parent node) and there are
5230  * more than one reference on it.  If we are the owner of any of the children
5231  * blocks from the current parent node then we have to do the FULL_BACKREF dance
5232  * on them in order to drop our normal ref and add the shared ref.
5233  */
5234 #define UPDATE_BACKREF	2
5235 
5236 /*
5237  * Decide if we need to walk down into this node to adjust the references.
5238  *
5239  * @root:	the root we are currently deleting
5240  * @wc:		the walk control for this deletion
5241  * @eb:		the parent eb that we're currently visiting
5242  * @refs:	the number of refs for wc->level - 1
5243  * @flags:	the flags for wc->level - 1
5244  * @slot:	the slot in the eb that we're currently checking
5245  *
5246  * This is meant to be called when we're evaluating if a node we point to at
5247  * wc->level should be read and walked into, or if we can simply delete our
5248  * reference to it.  We return true if we should walk into the node, false if we
5249  * can skip it.
5250  *
5251  * We have assertions in here to make sure this is called correctly.  We assume
5252  * that sanity checking on the blocks read to this point has been done, so any
5253  * corrupted file systems must have been caught before calling this function.
5254  */
visit_node_for_delete(struct btrfs_root * root,struct walk_control * wc,struct extent_buffer * eb,u64 flags,int slot)5255 static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *wc,
5256 				  struct extent_buffer *eb, u64 flags, int slot)
5257 {
5258 	struct btrfs_key key;
5259 	u64 generation;
5260 	int level = wc->level;
5261 
5262 	ASSERT(level > 0);
5263 	ASSERT(wc->refs[level - 1] > 0);
5264 
5265 	/*
5266 	 * The update backref stage we only want to skip if we already have
5267 	 * FULL_BACKREF set, otherwise we need to read.
5268 	 */
5269 	if (wc->stage == UPDATE_BACKREF) {
5270 		if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5271 			return false;
5272 		return true;
5273 	}
5274 
5275 	/*
5276 	 * We're the last ref on this block, we must walk into it and process
5277 	 * any refs it's pointing at.
5278 	 */
5279 	if (wc->refs[level - 1] == 1)
5280 		return true;
5281 
5282 	/*
5283 	 * If we're already FULL_BACKREF then we know we can just drop our
5284 	 * current reference.
5285 	 */
5286 	if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5287 		return false;
5288 
5289 	/*
5290 	 * This block is older than our creation generation, we can drop our
5291 	 * reference to it.
5292 	 */
5293 	generation = btrfs_node_ptr_generation(eb, slot);
5294 	if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
5295 		return false;
5296 
5297 	/*
5298 	 * This block was processed from a previous snapshot deletion run, we
5299 	 * can skip it.
5300 	 */
5301 	btrfs_node_key_to_cpu(eb, &key, slot);
5302 	if (btrfs_comp_cpu_keys(&key, &wc->update_progress) < 0)
5303 		return false;
5304 
5305 	/* All other cases we need to wander into the node. */
5306 	return true;
5307 }
5308 
reada_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct walk_control * wc,struct btrfs_path * path)5309 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5310 				     struct btrfs_root *root,
5311 				     struct walk_control *wc,
5312 				     struct btrfs_path *path)
5313 {
5314 	struct btrfs_fs_info *fs_info = root->fs_info;
5315 	u64 bytenr;
5316 	u64 generation;
5317 	u64 refs;
5318 	u64 flags;
5319 	u32 nritems;
5320 	struct extent_buffer *eb;
5321 	int ret;
5322 	int slot;
5323 	int nread = 0;
5324 
5325 	if (path->slots[wc->level] < wc->reada_slot) {
5326 		wc->reada_count = wc->reada_count * 2 / 3;
5327 		wc->reada_count = max(wc->reada_count, 2);
5328 	} else {
5329 		wc->reada_count = wc->reada_count * 3 / 2;
5330 		wc->reada_count = min_t(int, wc->reada_count,
5331 					BTRFS_NODEPTRS_PER_BLOCK(fs_info));
5332 	}
5333 
5334 	eb = path->nodes[wc->level];
5335 	nritems = btrfs_header_nritems(eb);
5336 
5337 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5338 		if (nread >= wc->reada_count)
5339 			break;
5340 
5341 		cond_resched();
5342 		bytenr = btrfs_node_blockptr(eb, slot);
5343 		generation = btrfs_node_ptr_generation(eb, slot);
5344 
5345 		if (slot == path->slots[wc->level])
5346 			goto reada;
5347 
5348 		if (wc->stage == UPDATE_BACKREF &&
5349 		    generation <= btrfs_root_origin_generation(root))
5350 			continue;
5351 
5352 		/* We don't lock the tree block, it's OK to be racy here */
5353 		ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
5354 					       wc->level - 1, 1, &refs,
5355 					       &flags, NULL);
5356 		/* We don't care about errors in readahead. */
5357 		if (ret < 0)
5358 			continue;
5359 
5360 		/*
5361 		 * This could be racey, it's conceivable that we raced and end
5362 		 * up with a bogus refs count, if that's the case just skip, if
5363 		 * we are actually corrupt we will notice when we look up
5364 		 * everything again with our locks.
5365 		 */
5366 		if (refs == 0)
5367 			continue;
5368 
5369 		/* If we don't need to visit this node don't reada. */
5370 		if (!visit_node_for_delete(root, wc, eb, flags, slot))
5371 			continue;
5372 reada:
5373 		btrfs_readahead_node_child(eb, slot);
5374 		nread++;
5375 	}
5376 	wc->reada_slot = slot;
5377 }
5378 
5379 /*
5380  * helper to process tree block while walking down the tree.
5381  *
5382  * when wc->stage == UPDATE_BACKREF, this function updates
5383  * back refs for pointers in the block.
5384  *
5385  * NOTE: return value 1 means we should stop walking down.
5386  */
walk_down_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5387 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5388 				   struct btrfs_root *root,
5389 				   struct btrfs_path *path,
5390 				   struct walk_control *wc)
5391 {
5392 	struct btrfs_fs_info *fs_info = root->fs_info;
5393 	int level = wc->level;
5394 	struct extent_buffer *eb = path->nodes[level];
5395 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5396 	int ret;
5397 
5398 	if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != btrfs_root_id(root))
5399 		return 1;
5400 
5401 	/*
5402 	 * when reference count of tree block is 1, it won't increase
5403 	 * again. once full backref flag is set, we never clear it.
5404 	 */
5405 	if (wc->lookup_info &&
5406 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5407 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5408 		ASSERT(path->locks[level]);
5409 		ret = btrfs_lookup_extent_info(trans, fs_info,
5410 					       eb->start, level, 1,
5411 					       &wc->refs[level],
5412 					       &wc->flags[level],
5413 					       NULL);
5414 		if (ret)
5415 			return ret;
5416 		if (unlikely(wc->refs[level] == 0)) {
5417 			btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5418 				  eb->start);
5419 			return -EUCLEAN;
5420 		}
5421 	}
5422 
5423 	if (wc->stage == DROP_REFERENCE) {
5424 		if (wc->refs[level] > 1)
5425 			return 1;
5426 
5427 		if (path->locks[level] && !wc->keep_locks) {
5428 			btrfs_tree_unlock_rw(eb, path->locks[level]);
5429 			path->locks[level] = 0;
5430 		}
5431 		return 0;
5432 	}
5433 
5434 	/* wc->stage == UPDATE_BACKREF */
5435 	if (!(wc->flags[level] & flag)) {
5436 		ASSERT(path->locks[level]);
5437 		ret = btrfs_inc_ref(trans, root, eb, 1);
5438 		if (ret) {
5439 			btrfs_abort_transaction(trans, ret);
5440 			return ret;
5441 		}
5442 		ret = btrfs_dec_ref(trans, root, eb, 0);
5443 		if (ret) {
5444 			btrfs_abort_transaction(trans, ret);
5445 			return ret;
5446 		}
5447 		ret = btrfs_set_disk_extent_flags(trans, eb, flag);
5448 		if (ret) {
5449 			btrfs_abort_transaction(trans, ret);
5450 			return ret;
5451 		}
5452 		wc->flags[level] |= flag;
5453 	}
5454 
5455 	/*
5456 	 * the block is shared by multiple trees, so it's not good to
5457 	 * keep the tree lock
5458 	 */
5459 	if (path->locks[level] && level > 0) {
5460 		btrfs_tree_unlock_rw(eb, path->locks[level]);
5461 		path->locks[level] = 0;
5462 	}
5463 	return 0;
5464 }
5465 
5466 /*
5467  * This is used to verify a ref exists for this root to deal with a bug where we
5468  * would have a drop_progress key that hadn't been updated properly.
5469  */
check_ref_exists(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 parent,int level)5470 static int check_ref_exists(struct btrfs_trans_handle *trans,
5471 			    struct btrfs_root *root, u64 bytenr, u64 parent,
5472 			    int level)
5473 {
5474 	struct btrfs_delayed_ref_root *delayed_refs;
5475 	struct btrfs_delayed_ref_head *head;
5476 	struct btrfs_path *path;
5477 	struct btrfs_extent_inline_ref *iref;
5478 	int ret;
5479 	bool exists = false;
5480 
5481 	path = btrfs_alloc_path();
5482 	if (!path)
5483 		return -ENOMEM;
5484 again:
5485 	ret = lookup_extent_backref(trans, path, &iref, bytenr,
5486 				    root->fs_info->nodesize, parent,
5487 				    btrfs_root_id(root), level, 0);
5488 	if (ret != -ENOENT) {
5489 		/*
5490 		 * If we get 0 then we found our reference, return 1, else
5491 		 * return the error if it's not -ENOENT;
5492 		 */
5493 		btrfs_free_path(path);
5494 		return (ret < 0 ) ? ret : 1;
5495 	}
5496 
5497 	/*
5498 	 * We could have a delayed ref with this reference, so look it up while
5499 	 * we're holding the path open to make sure we don't race with the
5500 	 * delayed ref running.
5501 	 */
5502 	delayed_refs = &trans->transaction->delayed_refs;
5503 	spin_lock(&delayed_refs->lock);
5504 	head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
5505 	if (!head)
5506 		goto out;
5507 	if (!mutex_trylock(&head->mutex)) {
5508 		/*
5509 		 * We're contended, means that the delayed ref is running, get a
5510 		 * reference and wait for the ref head to be complete and then
5511 		 * try again.
5512 		 */
5513 		refcount_inc(&head->refs);
5514 		spin_unlock(&delayed_refs->lock);
5515 
5516 		btrfs_release_path(path);
5517 
5518 		mutex_lock(&head->mutex);
5519 		mutex_unlock(&head->mutex);
5520 		btrfs_put_delayed_ref_head(head);
5521 		goto again;
5522 	}
5523 
5524 	exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent);
5525 	mutex_unlock(&head->mutex);
5526 out:
5527 	spin_unlock(&delayed_refs->lock);
5528 	btrfs_free_path(path);
5529 	return exists ? 1 : 0;
5530 }
5531 
5532 /*
5533  * We may not have an uptodate block, so if we are going to walk down into this
5534  * block we need to drop the lock, read it off of the disk, re-lock it and
5535  * return to continue dropping the snapshot.
5536  */
check_next_block_uptodate(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,struct extent_buffer * next)5537 static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
5538 				     struct btrfs_root *root,
5539 				     struct btrfs_path *path,
5540 				     struct walk_control *wc,
5541 				     struct extent_buffer *next)
5542 {
5543 	struct btrfs_tree_parent_check check = { 0 };
5544 	u64 generation;
5545 	int level = wc->level;
5546 	int ret;
5547 
5548 	btrfs_assert_tree_write_locked(next);
5549 
5550 	generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]);
5551 
5552 	if (btrfs_buffer_uptodate(next, generation, 0))
5553 		return 0;
5554 
5555 	check.level = level - 1;
5556 	check.transid = generation;
5557 	check.owner_root = btrfs_root_id(root);
5558 	check.has_first_key = true;
5559 	btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, path->slots[level]);
5560 
5561 	btrfs_tree_unlock(next);
5562 	if (level == 1)
5563 		reada_walk_down(trans, root, wc, path);
5564 	ret = btrfs_read_extent_buffer(next, &check);
5565 	if (ret) {
5566 		free_extent_buffer(next);
5567 		return ret;
5568 	}
5569 	btrfs_tree_lock(next);
5570 	wc->lookup_info = 1;
5571 	return 0;
5572 }
5573 
5574 /*
5575  * If we determine that we don't have to visit wc->level - 1 then we need to
5576  * determine if we can drop our reference.
5577  *
5578  * If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
5579  *
5580  * If we are DROP_REFERENCE this will figure out if we need to drop our current
5581  * reference, skipping it if we dropped it from a previous incompleted drop, or
5582  * dropping it if we still have a reference to it.
5583  */
maybe_drop_reference(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,struct extent_buffer * next,u64 owner_root)5584 static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5585 				struct btrfs_path *path, struct walk_control *wc,
5586 				struct extent_buffer *next, u64 owner_root)
5587 {
5588 	struct btrfs_ref ref = {
5589 		.action = BTRFS_DROP_DELAYED_REF,
5590 		.bytenr = next->start,
5591 		.num_bytes = root->fs_info->nodesize,
5592 		.owning_root = owner_root,
5593 		.ref_root = btrfs_root_id(root),
5594 	};
5595 	int level = wc->level;
5596 	int ret;
5597 
5598 	/* We are UPDATE_BACKREF, we're not dropping anything. */
5599 	if (wc->stage == UPDATE_BACKREF)
5600 		return 0;
5601 
5602 	if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5603 		ref.parent = path->nodes[level]->start;
5604 	} else {
5605 		ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level]));
5606 		if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) {
5607 			btrfs_err(root->fs_info, "mismatched block owner");
5608 			return -EIO;
5609 		}
5610 	}
5611 
5612 	/*
5613 	 * If we had a drop_progress we need to verify the refs are set as
5614 	 * expected.  If we find our ref then we know that from here on out
5615 	 * everything should be correct, and we can clear the
5616 	 * ->restarted flag.
5617 	 */
5618 	if (wc->restarted) {
5619 		ret = check_ref_exists(trans, root, next->start, ref.parent,
5620 				       level - 1);
5621 		if (ret <= 0)
5622 			return ret;
5623 		ret = 0;
5624 		wc->restarted = 0;
5625 	}
5626 
5627 	/*
5628 	 * Reloc tree doesn't contribute to qgroup numbers, and we have already
5629 	 * accounted them at merge time (replace_path), thus we could skip
5630 	 * expensive subtree trace here.
5631 	 */
5632 	if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
5633 	    wc->refs[level - 1] > 1) {
5634 		u64 generation = btrfs_node_ptr_generation(path->nodes[level],
5635 							   path->slots[level]);
5636 
5637 		ret = btrfs_qgroup_trace_subtree(trans, next, generation, level - 1);
5638 		if (ret) {
5639 			btrfs_err_rl(root->fs_info,
5640 "error %d accounting shared subtree, quota is out of sync, rescan required",
5641 				     ret);
5642 		}
5643 	}
5644 
5645 	/*
5646 	 * We need to update the next key in our walk control so we can update
5647 	 * the drop_progress key accordingly.  We don't care if find_next_key
5648 	 * doesn't find a key because that means we're at the end and are going
5649 	 * to clean up now.
5650 	 */
5651 	wc->drop_level = level;
5652 	find_next_key(path, level, &wc->drop_progress);
5653 
5654 	btrfs_init_tree_ref(&ref, level - 1, 0, false);
5655 	return btrfs_free_extent(trans, &ref);
5656 }
5657 
5658 /*
5659  * helper to process tree block pointer.
5660  *
5661  * when wc->stage == DROP_REFERENCE, this function checks
5662  * reference count of the block pointed to. if the block
5663  * is shared and we need update back refs for the subtree
5664  * rooted at the block, this function changes wc->stage to
5665  * UPDATE_BACKREF. if the block is shared and there is no
5666  * need to update back, this function drops the reference
5667  * to the block.
5668  *
5669  * NOTE: return value 1 means we should stop walking down.
5670  */
do_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5671 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5672 				 struct btrfs_root *root,
5673 				 struct btrfs_path *path,
5674 				 struct walk_control *wc)
5675 {
5676 	struct btrfs_fs_info *fs_info = root->fs_info;
5677 	u64 bytenr;
5678 	u64 generation;
5679 	u64 owner_root = 0;
5680 	struct extent_buffer *next;
5681 	int level = wc->level;
5682 	int ret = 0;
5683 
5684 	generation = btrfs_node_ptr_generation(path->nodes[level],
5685 					       path->slots[level]);
5686 	/*
5687 	 * if the lower level block was created before the snapshot
5688 	 * was created, we know there is no need to update back refs
5689 	 * for the subtree
5690 	 */
5691 	if (wc->stage == UPDATE_BACKREF &&
5692 	    generation <= btrfs_root_origin_generation(root)) {
5693 		wc->lookup_info = 1;
5694 		return 1;
5695 	}
5696 
5697 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5698 
5699 	next = btrfs_find_create_tree_block(fs_info, bytenr, btrfs_root_id(root),
5700 					    level - 1);
5701 	if (IS_ERR(next))
5702 		return PTR_ERR(next);
5703 
5704 	btrfs_tree_lock(next);
5705 
5706 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
5707 				       &wc->refs[level - 1],
5708 				       &wc->flags[level - 1],
5709 				       &owner_root);
5710 	if (ret < 0)
5711 		goto out_unlock;
5712 
5713 	if (unlikely(wc->refs[level - 1] == 0)) {
5714 		btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5715 			  bytenr);
5716 		ret = -EUCLEAN;
5717 		goto out_unlock;
5718 	}
5719 	wc->lookup_info = 0;
5720 
5721 	/* If we don't have to walk into this node skip it. */
5722 	if (!visit_node_for_delete(root, wc, path->nodes[level],
5723 				   wc->flags[level - 1], path->slots[level]))
5724 		goto skip;
5725 
5726 	/*
5727 	 * We have to walk down into this node, and if we're currently at the
5728 	 * DROP_REFERNCE stage and this block is shared then we need to switch
5729 	 * to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
5730 	 */
5731 	if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
5732 		wc->stage = UPDATE_BACKREF;
5733 		wc->shared_level = level - 1;
5734 	}
5735 
5736 	ret = check_next_block_uptodate(trans, root, path, wc, next);
5737 	if (ret)
5738 		return ret;
5739 
5740 	level--;
5741 	ASSERT(level == btrfs_header_level(next));
5742 	if (level != btrfs_header_level(next)) {
5743 		btrfs_err(root->fs_info, "mismatched level");
5744 		ret = -EIO;
5745 		goto out_unlock;
5746 	}
5747 	path->nodes[level] = next;
5748 	path->slots[level] = 0;
5749 	path->locks[level] = BTRFS_WRITE_LOCK;
5750 	wc->level = level;
5751 	if (wc->level == 1)
5752 		wc->reada_slot = 0;
5753 	return 0;
5754 skip:
5755 	ret = maybe_drop_reference(trans, root, path, wc, next, owner_root);
5756 	if (ret)
5757 		goto out_unlock;
5758 	wc->refs[level - 1] = 0;
5759 	wc->flags[level - 1] = 0;
5760 	wc->lookup_info = 1;
5761 	ret = 1;
5762 
5763 out_unlock:
5764 	btrfs_tree_unlock(next);
5765 	free_extent_buffer(next);
5766 
5767 	return ret;
5768 }
5769 
5770 /*
5771  * helper to process tree block while walking up the tree.
5772  *
5773  * when wc->stage == DROP_REFERENCE, this function drops
5774  * reference count on the block.
5775  *
5776  * when wc->stage == UPDATE_BACKREF, this function changes
5777  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5778  * to UPDATE_BACKREF previously while processing the block.
5779  *
5780  * NOTE: return value 1 means we should stop walking up.
5781  */
walk_up_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5782 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5783 				 struct btrfs_root *root,
5784 				 struct btrfs_path *path,
5785 				 struct walk_control *wc)
5786 {
5787 	struct btrfs_fs_info *fs_info = root->fs_info;
5788 	int ret = 0;
5789 	int level = wc->level;
5790 	struct extent_buffer *eb = path->nodes[level];
5791 	u64 parent = 0;
5792 
5793 	if (wc->stage == UPDATE_BACKREF) {
5794 		ASSERT(wc->shared_level >= level);
5795 		if (level < wc->shared_level)
5796 			goto out;
5797 
5798 		ret = find_next_key(path, level + 1, &wc->update_progress);
5799 		if (ret > 0)
5800 			wc->update_ref = 0;
5801 
5802 		wc->stage = DROP_REFERENCE;
5803 		wc->shared_level = -1;
5804 		path->slots[level] = 0;
5805 
5806 		/*
5807 		 * check reference count again if the block isn't locked.
5808 		 * we should start walking down the tree again if reference
5809 		 * count is one.
5810 		 */
5811 		if (!path->locks[level]) {
5812 			ASSERT(level > 0);
5813 			btrfs_tree_lock(eb);
5814 			path->locks[level] = BTRFS_WRITE_LOCK;
5815 
5816 			ret = btrfs_lookup_extent_info(trans, fs_info,
5817 						       eb->start, level, 1,
5818 						       &wc->refs[level],
5819 						       &wc->flags[level],
5820 						       NULL);
5821 			if (ret < 0) {
5822 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5823 				path->locks[level] = 0;
5824 				return ret;
5825 			}
5826 			if (unlikely(wc->refs[level] == 0)) {
5827 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5828 				btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5829 					  eb->start);
5830 				return -EUCLEAN;
5831 			}
5832 			if (wc->refs[level] == 1) {
5833 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5834 				path->locks[level] = 0;
5835 				return 1;
5836 			}
5837 		}
5838 	}
5839 
5840 	/* wc->stage == DROP_REFERENCE */
5841 	ASSERT(path->locks[level] || wc->refs[level] == 1);
5842 
5843 	if (wc->refs[level] == 1) {
5844 		if (level == 0) {
5845 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5846 				ret = btrfs_dec_ref(trans, root, eb, 1);
5847 			else
5848 				ret = btrfs_dec_ref(trans, root, eb, 0);
5849 			if (ret) {
5850 				btrfs_abort_transaction(trans, ret);
5851 				return ret;
5852 			}
5853 			if (is_fstree(btrfs_root_id(root))) {
5854 				ret = btrfs_qgroup_trace_leaf_items(trans, eb);
5855 				if (ret) {
5856 					btrfs_err_rl(fs_info,
5857 	"error %d accounting leaf items, quota is out of sync, rescan required",
5858 					     ret);
5859 				}
5860 			}
5861 		}
5862 		/* Make block locked assertion in btrfs_clear_buffer_dirty happy. */
5863 		if (!path->locks[level]) {
5864 			btrfs_tree_lock(eb);
5865 			path->locks[level] = BTRFS_WRITE_LOCK;
5866 		}
5867 		btrfs_clear_buffer_dirty(trans, eb);
5868 	}
5869 
5870 	if (eb == root->node) {
5871 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5872 			parent = eb->start;
5873 		else if (btrfs_root_id(root) != btrfs_header_owner(eb))
5874 			goto owner_mismatch;
5875 	} else {
5876 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5877 			parent = path->nodes[level + 1]->start;
5878 		else if (btrfs_root_id(root) !=
5879 			 btrfs_header_owner(path->nodes[level + 1]))
5880 			goto owner_mismatch;
5881 	}
5882 
5883 	ret = btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
5884 				    wc->refs[level] == 1);
5885 	if (ret < 0)
5886 		btrfs_abort_transaction(trans, ret);
5887 out:
5888 	wc->refs[level] = 0;
5889 	wc->flags[level] = 0;
5890 	return ret;
5891 
5892 owner_mismatch:
5893 	btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
5894 		     btrfs_header_owner(eb), btrfs_root_id(root));
5895 	return -EUCLEAN;
5896 }
5897 
5898 /*
5899  * walk_down_tree consists of two steps.
5900  *
5901  * walk_down_proc().  Look up the reference count and reference of our current
5902  * wc->level.  At this point path->nodes[wc->level] should be populated and
5903  * uptodate, and in most cases should already be locked.  If we are in
5904  * DROP_REFERENCE and our refcount is > 1 then we've entered a shared node and
5905  * we can walk back up the tree.  If we are UPDATE_BACKREF we have to set
5906  * FULL_BACKREF on this node if it's not already set, and then do the
5907  * FULL_BACKREF conversion dance, which is to drop the root reference and add
5908  * the shared reference to all of this nodes children.
5909  *
5910  * do_walk_down().  This is where we actually start iterating on the children of
5911  * our current path->nodes[wc->level].  For DROP_REFERENCE that means dropping
5912  * our reference to the children that return false from visit_node_for_delete(),
5913  * which has various conditions where we know we can just drop our reference
5914  * without visiting the node.  For UPDATE_BACKREF we will skip any children that
5915  * visit_node_for_delete() returns false for, only walking down when necessary.
5916  * The bulk of the work for UPDATE_BACKREF occurs in the walk_up_tree() part of
5917  * snapshot deletion.
5918  */
walk_down_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5919 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5920 				   struct btrfs_root *root,
5921 				   struct btrfs_path *path,
5922 				   struct walk_control *wc)
5923 {
5924 	int level = wc->level;
5925 	int ret = 0;
5926 
5927 	wc->lookup_info = 1;
5928 	while (level >= 0) {
5929 		ret = walk_down_proc(trans, root, path, wc);
5930 		if (ret)
5931 			break;
5932 
5933 		if (level == 0)
5934 			break;
5935 
5936 		if (path->slots[level] >=
5937 		    btrfs_header_nritems(path->nodes[level]))
5938 			break;
5939 
5940 		ret = do_walk_down(trans, root, path, wc);
5941 		if (ret > 0) {
5942 			path->slots[level]++;
5943 			continue;
5944 		} else if (ret < 0)
5945 			break;
5946 		level = wc->level;
5947 	}
5948 	return (ret == 1) ? 0 : ret;
5949 }
5950 
5951 /*
5952  * walk_up_tree() is responsible for making sure we visit every slot on our
5953  * current node, and if we're at the end of that node then we call
5954  * walk_up_proc() on our current node which will do one of a few things based on
5955  * our stage.
5956  *
5957  * UPDATE_BACKREF.  If we wc->level is currently less than our wc->shared_level
5958  * then we need to walk back up the tree, and then going back down into the
5959  * other slots via walk_down_tree to update any other children from our original
5960  * wc->shared_level.  Once we're at or above our wc->shared_level we can switch
5961  * back to DROP_REFERENCE, lookup the current nodes refs and flags, and carry on.
5962  *
5963  * DROP_REFERENCE. If our refs == 1 then we're going to free this tree block.
5964  * If we're level 0 then we need to btrfs_dec_ref() on all of the data extents
5965  * in our current leaf.  After that we call btrfs_free_tree_block() on the
5966  * current node and walk up to the next node to walk down the next slot.
5967  */
walk_up_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int max_level)5968 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5969 				 struct btrfs_root *root,
5970 				 struct btrfs_path *path,
5971 				 struct walk_control *wc, int max_level)
5972 {
5973 	int level = wc->level;
5974 	int ret;
5975 
5976 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5977 	while (level < max_level && path->nodes[level]) {
5978 		wc->level = level;
5979 		if (path->slots[level] + 1 <
5980 		    btrfs_header_nritems(path->nodes[level])) {
5981 			path->slots[level]++;
5982 			return 0;
5983 		} else {
5984 			ret = walk_up_proc(trans, root, path, wc);
5985 			if (ret > 0)
5986 				return 0;
5987 			if (ret < 0)
5988 				return ret;
5989 
5990 			if (path->locks[level]) {
5991 				btrfs_tree_unlock_rw(path->nodes[level],
5992 						     path->locks[level]);
5993 				path->locks[level] = 0;
5994 			}
5995 			free_extent_buffer(path->nodes[level]);
5996 			path->nodes[level] = NULL;
5997 			level++;
5998 		}
5999 	}
6000 	return 1;
6001 }
6002 
6003 /*
6004  * drop a subvolume tree.
6005  *
6006  * this function traverses the tree freeing any blocks that only
6007  * referenced by the tree.
6008  *
6009  * when a shared tree block is found. this function decreases its
6010  * reference count by one. if update_ref is true, this function
6011  * also make sure backrefs for the shared block and all lower level
6012  * blocks are properly updated.
6013  *
6014  * If called with for_reloc == 0, may exit early with -EAGAIN
6015  */
btrfs_drop_snapshot(struct btrfs_root * root,int update_ref,int for_reloc)6016 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
6017 {
6018 	const bool is_reloc_root = (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
6019 	struct btrfs_fs_info *fs_info = root->fs_info;
6020 	struct btrfs_path *path;
6021 	struct btrfs_trans_handle *trans;
6022 	struct btrfs_root *tree_root = fs_info->tree_root;
6023 	struct btrfs_root_item *root_item = &root->root_item;
6024 	struct walk_control *wc;
6025 	struct btrfs_key key;
6026 	const u64 rootid = btrfs_root_id(root);
6027 	int ret = 0;
6028 	int level;
6029 	bool root_dropped = false;
6030 	bool unfinished_drop = false;
6031 
6032 	btrfs_debug(fs_info, "Drop subvolume %llu", btrfs_root_id(root));
6033 
6034 	path = btrfs_alloc_path();
6035 	if (!path) {
6036 		ret = -ENOMEM;
6037 		goto out;
6038 	}
6039 
6040 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6041 	if (!wc) {
6042 		btrfs_free_path(path);
6043 		ret = -ENOMEM;
6044 		goto out;
6045 	}
6046 
6047 	/*
6048 	 * Use join to avoid potential EINTR from transaction start. See
6049 	 * wait_reserve_ticket and the whole reservation callchain.
6050 	 */
6051 	if (for_reloc)
6052 		trans = btrfs_join_transaction(tree_root);
6053 	else
6054 		trans = btrfs_start_transaction(tree_root, 0);
6055 	if (IS_ERR(trans)) {
6056 		ret = PTR_ERR(trans);
6057 		goto out_free;
6058 	}
6059 
6060 	ret = btrfs_run_delayed_items(trans);
6061 	if (ret)
6062 		goto out_end_trans;
6063 
6064 	/*
6065 	 * This will help us catch people modifying the fs tree while we're
6066 	 * dropping it.  It is unsafe to mess with the fs tree while it's being
6067 	 * dropped as we unlock the root node and parent nodes as we walk down
6068 	 * the tree, assuming nothing will change.  If something does change
6069 	 * then we'll have stale information and drop references to blocks we've
6070 	 * already dropped.
6071 	 */
6072 	set_bit(BTRFS_ROOT_DELETING, &root->state);
6073 	unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
6074 
6075 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6076 		level = btrfs_header_level(root->node);
6077 		path->nodes[level] = btrfs_lock_root_node(root);
6078 		path->slots[level] = 0;
6079 		path->locks[level] = BTRFS_WRITE_LOCK;
6080 		memset(&wc->update_progress, 0,
6081 		       sizeof(wc->update_progress));
6082 	} else {
6083 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6084 		memcpy(&wc->update_progress, &key,
6085 		       sizeof(wc->update_progress));
6086 
6087 		level = btrfs_root_drop_level(root_item);
6088 		BUG_ON(level == 0);
6089 		path->lowest_level = level;
6090 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6091 		path->lowest_level = 0;
6092 		if (ret < 0)
6093 			goto out_end_trans;
6094 
6095 		WARN_ON(ret > 0);
6096 		ret = 0;
6097 
6098 		/*
6099 		 * unlock our path, this is safe because only this
6100 		 * function is allowed to delete this snapshot
6101 		 */
6102 		btrfs_unlock_up_safe(path, 0);
6103 
6104 		level = btrfs_header_level(root->node);
6105 		while (1) {
6106 			btrfs_tree_lock(path->nodes[level]);
6107 			path->locks[level] = BTRFS_WRITE_LOCK;
6108 
6109 			/*
6110 			 * btrfs_lookup_extent_info() returns 0 for success,
6111 			 * or < 0 for error.
6112 			 */
6113 			ret = btrfs_lookup_extent_info(trans, fs_info,
6114 						path->nodes[level]->start,
6115 						level, 1, &wc->refs[level],
6116 						&wc->flags[level], NULL);
6117 			if (ret < 0)
6118 				goto out_end_trans;
6119 
6120 			BUG_ON(wc->refs[level] == 0);
6121 
6122 			if (level == btrfs_root_drop_level(root_item))
6123 				break;
6124 
6125 			btrfs_tree_unlock(path->nodes[level]);
6126 			path->locks[level] = 0;
6127 			WARN_ON(wc->refs[level] != 1);
6128 			level--;
6129 		}
6130 	}
6131 
6132 	wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
6133 	wc->level = level;
6134 	wc->shared_level = -1;
6135 	wc->stage = DROP_REFERENCE;
6136 	wc->update_ref = update_ref;
6137 	wc->keep_locks = 0;
6138 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
6139 
6140 	while (1) {
6141 
6142 		ret = walk_down_tree(trans, root, path, wc);
6143 		if (ret < 0) {
6144 			btrfs_abort_transaction(trans, ret);
6145 			break;
6146 		}
6147 
6148 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6149 		if (ret < 0) {
6150 			btrfs_abort_transaction(trans, ret);
6151 			break;
6152 		}
6153 
6154 		if (ret > 0) {
6155 			BUG_ON(wc->stage != DROP_REFERENCE);
6156 			ret = 0;
6157 			break;
6158 		}
6159 
6160 		if (wc->stage == DROP_REFERENCE) {
6161 			wc->drop_level = wc->level;
6162 			btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
6163 					      &wc->drop_progress,
6164 					      path->slots[wc->drop_level]);
6165 		}
6166 		btrfs_cpu_key_to_disk(&root_item->drop_progress,
6167 				      &wc->drop_progress);
6168 		btrfs_set_root_drop_level(root_item, wc->drop_level);
6169 
6170 		BUG_ON(wc->level == 0);
6171 		if (btrfs_should_end_transaction(trans) ||
6172 		    (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
6173 			ret = btrfs_update_root(trans, tree_root,
6174 						&root->root_key,
6175 						root_item);
6176 			if (ret) {
6177 				btrfs_abort_transaction(trans, ret);
6178 				goto out_end_trans;
6179 			}
6180 
6181 			if (!is_reloc_root)
6182 				btrfs_set_last_root_drop_gen(fs_info, trans->transid);
6183 
6184 			btrfs_end_transaction_throttle(trans);
6185 			if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
6186 				btrfs_debug(fs_info,
6187 					    "drop snapshot early exit");
6188 				ret = -EAGAIN;
6189 				goto out_free;
6190 			}
6191 
6192 		       /*
6193 			* Use join to avoid potential EINTR from transaction
6194 			* start. See wait_reserve_ticket and the whole
6195 			* reservation callchain.
6196 			*/
6197 			if (for_reloc)
6198 				trans = btrfs_join_transaction(tree_root);
6199 			else
6200 				trans = btrfs_start_transaction(tree_root, 0);
6201 			if (IS_ERR(trans)) {
6202 				ret = PTR_ERR(trans);
6203 				goto out_free;
6204 			}
6205 		}
6206 	}
6207 	btrfs_release_path(path);
6208 	if (ret)
6209 		goto out_end_trans;
6210 
6211 	ret = btrfs_del_root(trans, &root->root_key);
6212 	if (ret) {
6213 		btrfs_abort_transaction(trans, ret);
6214 		goto out_end_trans;
6215 	}
6216 
6217 	if (!is_reloc_root) {
6218 		ret = btrfs_find_root(tree_root, &root->root_key, path,
6219 				      NULL, NULL);
6220 		if (ret < 0) {
6221 			btrfs_abort_transaction(trans, ret);
6222 			goto out_end_trans;
6223 		} else if (ret > 0) {
6224 			ret = 0;
6225 			/*
6226 			 * If we fail to delete the orphan item this time
6227 			 * around, it'll get picked up the next time.
6228 			 *
6229 			 * The most common failure here is just -ENOENT.
6230 			 */
6231 			btrfs_del_orphan_item(trans, tree_root, btrfs_root_id(root));
6232 		}
6233 	}
6234 
6235 	/*
6236 	 * This subvolume is going to be completely dropped, and won't be
6237 	 * recorded as dirty roots, thus pertrans meta rsv will not be freed at
6238 	 * commit transaction time.  So free it here manually.
6239 	 */
6240 	btrfs_qgroup_convert_reserved_meta(root, INT_MAX);
6241 	btrfs_qgroup_free_meta_all_pertrans(root);
6242 
6243 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state))
6244 		btrfs_add_dropped_root(trans, root);
6245 	else
6246 		btrfs_put_root(root);
6247 	root_dropped = true;
6248 out_end_trans:
6249 	if (!is_reloc_root)
6250 		btrfs_set_last_root_drop_gen(fs_info, trans->transid);
6251 
6252 	btrfs_end_transaction_throttle(trans);
6253 out_free:
6254 	kfree(wc);
6255 	btrfs_free_path(path);
6256 out:
6257 	if (!ret && root_dropped) {
6258 		ret = btrfs_qgroup_cleanup_dropped_subvolume(fs_info, rootid);
6259 		if (ret < 0)
6260 			btrfs_warn_rl(fs_info,
6261 				      "failed to cleanup qgroup 0/%llu: %d",
6262 				      rootid, ret);
6263 		ret = 0;
6264 	}
6265 	/*
6266 	 * We were an unfinished drop root, check to see if there are any
6267 	 * pending, and if not clear and wake up any waiters.
6268 	 */
6269 	if (!ret && unfinished_drop)
6270 		btrfs_maybe_wake_unfinished_drop(fs_info);
6271 
6272 	/*
6273 	 * So if we need to stop dropping the snapshot for whatever reason we
6274 	 * need to make sure to add it back to the dead root list so that we
6275 	 * keep trying to do the work later.  This also cleans up roots if we
6276 	 * don't have it in the radix (like when we recover after a power fail
6277 	 * or unmount) so we don't leak memory.
6278 	 */
6279 	if (!for_reloc && !root_dropped)
6280 		btrfs_add_dead_root(root);
6281 	return ret;
6282 }
6283 
6284 /*
6285  * drop subtree rooted at tree block 'node'.
6286  *
6287  * NOTE: this function will unlock and release tree block 'node'
6288  * only used by relocation code
6289  */
btrfs_drop_subtree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * node,struct extent_buffer * parent)6290 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6291 			struct btrfs_root *root,
6292 			struct extent_buffer *node,
6293 			struct extent_buffer *parent)
6294 {
6295 	struct btrfs_fs_info *fs_info = root->fs_info;
6296 	struct btrfs_path *path;
6297 	struct walk_control *wc;
6298 	int level;
6299 	int parent_level;
6300 	int ret = 0;
6301 
6302 	BUG_ON(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
6303 
6304 	path = btrfs_alloc_path();
6305 	if (!path)
6306 		return -ENOMEM;
6307 
6308 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6309 	if (!wc) {
6310 		btrfs_free_path(path);
6311 		return -ENOMEM;
6312 	}
6313 
6314 	btrfs_assert_tree_write_locked(parent);
6315 	parent_level = btrfs_header_level(parent);
6316 	atomic_inc(&parent->refs);
6317 	path->nodes[parent_level] = parent;
6318 	path->slots[parent_level] = btrfs_header_nritems(parent);
6319 
6320 	btrfs_assert_tree_write_locked(node);
6321 	level = btrfs_header_level(node);
6322 	path->nodes[level] = node;
6323 	path->slots[level] = 0;
6324 	path->locks[level] = BTRFS_WRITE_LOCK;
6325 
6326 	wc->refs[parent_level] = 1;
6327 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6328 	wc->level = level;
6329 	wc->shared_level = -1;
6330 	wc->stage = DROP_REFERENCE;
6331 	wc->update_ref = 0;
6332 	wc->keep_locks = 1;
6333 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
6334 
6335 	while (1) {
6336 		ret = walk_down_tree(trans, root, path, wc);
6337 		if (ret < 0)
6338 			break;
6339 
6340 		ret = walk_up_tree(trans, root, path, wc, parent_level);
6341 		if (ret) {
6342 			if (ret > 0)
6343 				ret = 0;
6344 			break;
6345 		}
6346 	}
6347 
6348 	kfree(wc);
6349 	btrfs_free_path(path);
6350 	return ret;
6351 }
6352 
6353 /*
6354  * Unpin the extent range in an error context and don't add the space back.
6355  * Errors are not propagated further.
6356  */
btrfs_error_unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end)6357 void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end)
6358 {
6359 	unpin_extent_range(fs_info, start, end, false);
6360 }
6361 
6362 /*
6363  * It used to be that old block groups would be left around forever.
6364  * Iterating over them would be enough to trim unused space.  Since we
6365  * now automatically remove them, we also need to iterate over unallocated
6366  * space.
6367  *
6368  * We don't want a transaction for this since the discard may take a
6369  * substantial amount of time.  We don't require that a transaction be
6370  * running, but we do need to take a running transaction into account
6371  * to ensure that we're not discarding chunks that were released or
6372  * allocated in the current transaction.
6373  *
6374  * Holding the chunks lock will prevent other threads from allocating
6375  * or releasing chunks, but it won't prevent a running transaction
6376  * from committing and releasing the memory that the pending chunks
6377  * list head uses.  For that, we need to take a reference to the
6378  * transaction and hold the commit root sem.  We only need to hold
6379  * it while performing the free space search since we have already
6380  * held back allocations.
6381  */
btrfs_trim_free_extents(struct btrfs_device * device,u64 * trimmed)6382 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
6383 {
6384 	u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0;
6385 	int ret;
6386 
6387 	*trimmed = 0;
6388 
6389 	/* Discard not supported = nothing to do. */
6390 	if (!bdev_max_discard_sectors(device->bdev))
6391 		return 0;
6392 
6393 	/* Not writable = nothing to do. */
6394 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
6395 		return 0;
6396 
6397 	/* No free space = nothing to do. */
6398 	if (device->total_bytes <= device->bytes_used)
6399 		return 0;
6400 
6401 	ret = 0;
6402 
6403 	while (1) {
6404 		struct btrfs_fs_info *fs_info = device->fs_info;
6405 		u64 bytes;
6406 
6407 		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
6408 		if (ret)
6409 			break;
6410 
6411 		find_first_clear_extent_bit(&device->alloc_state, start,
6412 					    &start, &end,
6413 					    CHUNK_TRIMMED | CHUNK_ALLOCATED);
6414 
6415 		/* Check if there are any CHUNK_* bits left */
6416 		if (start > device->total_bytes) {
6417 			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6418 			btrfs_warn_in_rcu(fs_info,
6419 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
6420 					  start, end - start + 1,
6421 					  btrfs_dev_name(device),
6422 					  device->total_bytes);
6423 			mutex_unlock(&fs_info->chunk_mutex);
6424 			ret = 0;
6425 			break;
6426 		}
6427 
6428 		/* Ensure we skip the reserved space on each device. */
6429 		start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
6430 
6431 		/*
6432 		 * If find_first_clear_extent_bit find a range that spans the
6433 		 * end of the device it will set end to -1, in this case it's up
6434 		 * to the caller to trim the value to the size of the device.
6435 		 */
6436 		end = min(end, device->total_bytes - 1);
6437 
6438 		len = end - start + 1;
6439 
6440 		/* We didn't find any extents */
6441 		if (!len) {
6442 			mutex_unlock(&fs_info->chunk_mutex);
6443 			ret = 0;
6444 			break;
6445 		}
6446 
6447 		ret = btrfs_issue_discard(device->bdev, start, len,
6448 					  &bytes);
6449 		if (!ret)
6450 			set_extent_bit(&device->alloc_state, start,
6451 				       start + bytes - 1, CHUNK_TRIMMED, NULL);
6452 		mutex_unlock(&fs_info->chunk_mutex);
6453 
6454 		if (ret)
6455 			break;
6456 
6457 		start += len;
6458 		*trimmed += bytes;
6459 
6460 		if (btrfs_trim_interrupted()) {
6461 			ret = -ERESTARTSYS;
6462 			break;
6463 		}
6464 
6465 		cond_resched();
6466 	}
6467 
6468 	return ret;
6469 }
6470 
6471 /*
6472  * Trim the whole filesystem by:
6473  * 1) trimming the free space in each block group
6474  * 2) trimming the unallocated space on each device
6475  *
6476  * This will also continue trimming even if a block group or device encounters
6477  * an error.  The return value will be the last error, or 0 if nothing bad
6478  * happens.
6479  */
btrfs_trim_fs(struct btrfs_fs_info * fs_info,struct fstrim_range * range)6480 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
6481 {
6482 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6483 	struct btrfs_block_group *cache = NULL;
6484 	struct btrfs_device *device;
6485 	u64 group_trimmed;
6486 	u64 range_end = U64_MAX;
6487 	u64 start;
6488 	u64 end;
6489 	u64 trimmed = 0;
6490 	u64 bg_failed = 0;
6491 	u64 dev_failed = 0;
6492 	int bg_ret = 0;
6493 	int dev_ret = 0;
6494 	int ret = 0;
6495 
6496 	if (range->start == U64_MAX)
6497 		return -EINVAL;
6498 
6499 	/*
6500 	 * Check range overflow if range->len is set.
6501 	 * The default range->len is U64_MAX.
6502 	 */
6503 	if (range->len != U64_MAX &&
6504 	    check_add_overflow(range->start, range->len, &range_end))
6505 		return -EINVAL;
6506 
6507 	cache = btrfs_lookup_first_block_group(fs_info, range->start);
6508 	for (; cache; cache = btrfs_next_block_group(cache)) {
6509 		if (cache->start >= range_end) {
6510 			btrfs_put_block_group(cache);
6511 			break;
6512 		}
6513 
6514 		start = max(range->start, cache->start);
6515 		end = min(range_end, cache->start + cache->length);
6516 
6517 		if (end - start >= range->minlen) {
6518 			if (!btrfs_block_group_done(cache)) {
6519 				ret = btrfs_cache_block_group(cache, true);
6520 				if (ret) {
6521 					bg_failed++;
6522 					bg_ret = ret;
6523 					continue;
6524 				}
6525 			}
6526 			ret = btrfs_trim_block_group(cache,
6527 						     &group_trimmed,
6528 						     start,
6529 						     end,
6530 						     range->minlen);
6531 
6532 			trimmed += group_trimmed;
6533 			if (ret) {
6534 				bg_failed++;
6535 				bg_ret = ret;
6536 				continue;
6537 			}
6538 		}
6539 	}
6540 
6541 	if (bg_failed)
6542 		btrfs_warn(fs_info,
6543 			"failed to trim %llu block group(s), last error %d",
6544 			bg_failed, bg_ret);
6545 
6546 	mutex_lock(&fs_devices->device_list_mutex);
6547 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6548 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
6549 			continue;
6550 
6551 		ret = btrfs_trim_free_extents(device, &group_trimmed);
6552 
6553 		trimmed += group_trimmed;
6554 		if (ret) {
6555 			dev_failed++;
6556 			dev_ret = ret;
6557 			break;
6558 		}
6559 	}
6560 	mutex_unlock(&fs_devices->device_list_mutex);
6561 
6562 	if (dev_failed)
6563 		btrfs_warn(fs_info,
6564 			"failed to trim %llu device(s), last error %d",
6565 			dev_failed, dev_ret);
6566 	range->len = trimmed;
6567 	if (bg_ret)
6568 		return bg_ret;
6569 	return dev_ret;
6570 }
6571