xref: /aosp_15_r20/external/erofs-utils/lib/cache.c (revision 33b1fccf6a0fada2c2875d400ed01119b7676ee5)
1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3  * Copyright (C) 2018-2019 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Miao Xie <[email protected]>
6  * with heavy changes by Gao Xiang <[email protected]>
7  */
8 #include <stdlib.h>
9 #include <erofs/cache.h>
10 #include "erofs/print.h"
11 
erofs_bh_flush_drop_directly(struct erofs_buffer_head * bh)12 static int erofs_bh_flush_drop_directly(struct erofs_buffer_head *bh)
13 {
14 	return erofs_bh_flush_generic_end(bh);
15 }
16 
17 const struct erofs_bhops erofs_drop_directly_bhops = {
18 	.flush = erofs_bh_flush_drop_directly,
19 };
20 
erofs_bh_flush_skip_write(struct erofs_buffer_head * bh)21 static int erofs_bh_flush_skip_write(struct erofs_buffer_head *bh)
22 {
23 	return -EBUSY;
24 }
25 
26 const struct erofs_bhops erofs_skip_write_bhops = {
27 	.flush = erofs_bh_flush_skip_write,
28 };
29 
erofs_buffer_init(struct erofs_sb_info * sbi,erofs_blk_t startblk)30 struct erofs_bufmgr *erofs_buffer_init(struct erofs_sb_info *sbi,
31 				       erofs_blk_t startblk)
32 {
33 	struct erofs_bufmgr *bufmgr;
34 	int i, j;
35 
36 	bufmgr = malloc(sizeof(struct erofs_bufmgr));
37 	if (!bufmgr)
38 		return NULL;
39 
40 	init_list_head(&bufmgr->blkh.list);
41 	bufmgr->blkh.blkaddr = NULL_ADDR;
42 	bufmgr->last_mapped_block = &bufmgr->blkh;
43 
44 	for (i = 0; i < ARRAY_SIZE(bufmgr->mapped_buckets); i++)
45 		for (j = 0; j < ARRAY_SIZE(bufmgr->mapped_buckets[0]); j++)
46 			init_list_head(&bufmgr->mapped_buckets[i][j]);
47 	bufmgr->tail_blkaddr = startblk;
48 	bufmgr->sbi = sbi;
49 	return bufmgr;
50 }
51 
erofs_bupdate_mapped(struct erofs_buffer_block * bb)52 static void erofs_bupdate_mapped(struct erofs_buffer_block *bb)
53 {
54 	struct erofs_bufmgr *bmgr = bb->buffers.fsprivate;
55 	struct erofs_sb_info *sbi = bmgr->sbi;
56 	struct list_head *bkt;
57 
58 	if (bb->blkaddr == NULL_ADDR)
59 		return;
60 
61 	bkt = bmgr->mapped_buckets[bb->type] +
62 		(bb->buffers.off & (erofs_blksiz(sbi) - 1));
63 	list_del(&bb->mapped_list);
64 	list_add_tail(&bb->mapped_list, bkt);
65 }
66 
67 /* return occupied bytes in specific buffer block if succeed */
__erofs_battach(struct erofs_buffer_block * bb,struct erofs_buffer_head * bh,erofs_off_t incr,unsigned int alignsize,unsigned int extrasize,bool dryrun)68 static int __erofs_battach(struct erofs_buffer_block *bb,
69 			   struct erofs_buffer_head *bh,
70 			   erofs_off_t incr,
71 			   unsigned int alignsize,
72 			   unsigned int extrasize,
73 			   bool dryrun)
74 {
75 	struct erofs_bufmgr *bmgr = bb->buffers.fsprivate;
76 	struct erofs_sb_info *sbi = bmgr->sbi;
77 	const unsigned int blksiz = erofs_blksiz(sbi);
78 	const unsigned int blkmask = blksiz - 1;
79 	erofs_off_t boff = bb->buffers.off;
80 	const erofs_off_t alignedoffset = roundup(boff, alignsize);
81 	const int oob = cmpsgn(roundup(((boff - 1) & blkmask) + 1, alignsize) +
82 					incr + extrasize, blksiz);
83 	bool tailupdate = false;
84 	erofs_blk_t blkaddr;
85 
86 	if (oob >= 0) {
87 		/* the next buffer block should be NULL_ADDR all the time */
88 		if (oob && list_next_entry(bb, list)->blkaddr != NULL_ADDR)
89 			return -EINVAL;
90 
91 		blkaddr = bb->blkaddr;
92 		if (blkaddr != NULL_ADDR) {
93 			tailupdate = (bmgr->tail_blkaddr == blkaddr +
94 				      BLK_ROUND_UP(sbi, boff));
95 			if (oob && !tailupdate)
96 				return -EINVAL;
97 		}
98 	}
99 
100 	if (!dryrun) {
101 		if (bh) {
102 			bh->off = alignedoffset;
103 			bh->block = bb;
104 			list_add_tail(&bh->list, &bb->buffers.list);
105 		}
106 		boff = alignedoffset + incr;
107 		bb->buffers.off = boff;
108 		/* need to update the tail_blkaddr */
109 		if (tailupdate)
110 			bmgr->tail_blkaddr = blkaddr +
111 						BLK_ROUND_UP(sbi, boff);
112 		erofs_bupdate_mapped(bb);
113 	}
114 	return ((alignedoffset + incr - 1) & blkmask) + 1;
115 }
116 
erofs_bh_balloon(struct erofs_buffer_head * bh,erofs_off_t incr)117 int erofs_bh_balloon(struct erofs_buffer_head *bh, erofs_off_t incr)
118 {
119 	struct erofs_buffer_block *const bb = bh->block;
120 
121 	/* should be the tail bh in the corresponding buffer block */
122 	if (bh->list.next != &bb->buffers.list)
123 		return -EINVAL;
124 
125 	return __erofs_battach(bb, NULL, incr, 1, 0, false);
126 }
127 
erofs_bfind_for_attach(struct erofs_bufmgr * bmgr,int type,erofs_off_t size,unsigned int required_ext,unsigned int inline_ext,unsigned int alignsize,struct erofs_buffer_block ** bbp)128 static int erofs_bfind_for_attach(struct erofs_bufmgr *bmgr,
129 				  int type, erofs_off_t size,
130 				  unsigned int required_ext,
131 				  unsigned int inline_ext,
132 				  unsigned int alignsize,
133 				  struct erofs_buffer_block **bbp)
134 {
135 	const unsigned int blksiz = erofs_blksiz(bmgr->sbi);
136 	struct erofs_buffer_block *cur, *bb;
137 	unsigned int used0, used_before, usedmax, used;
138 	int ret;
139 
140 	used0 = ((size + required_ext) & (blksiz - 1)) + inline_ext;
141 	/* inline data should be in the same fs block */
142 	if (used0 > blksiz)
143 		return -ENOSPC;
144 
145 	if (!used0 || alignsize == blksiz) {
146 		*bbp = NULL;
147 		return 0;
148 	}
149 
150 	usedmax = 0;
151 	bb = NULL;
152 
153 	/* try to find a most-fit mapped buffer block first */
154 	if (size + required_ext + inline_ext >= blksiz)
155 		goto skip_mapped;
156 
157 	used_before = rounddown(blksiz -
158 				(size + required_ext + inline_ext), alignsize);
159 	for (; used_before; --used_before) {
160 		struct list_head *bt = bmgr->mapped_buckets[type] + used_before;
161 
162 		if (list_empty(bt))
163 			continue;
164 		cur = list_first_entry(bt, struct erofs_buffer_block,
165 				       mapped_list);
166 
167 		/* last mapped block can be expended, don't handle it here */
168 		if (list_next_entry(cur, list)->blkaddr == NULL_ADDR) {
169 			DBG_BUGON(cur != bmgr->last_mapped_block);
170 			continue;
171 		}
172 
173 		DBG_BUGON(cur->type != type);
174 		DBG_BUGON(cur->blkaddr == NULL_ADDR);
175 		DBG_BUGON(used_before != (cur->buffers.off & (blksiz - 1)));
176 
177 		ret = __erofs_battach(cur, NULL, size, alignsize,
178 				      required_ext + inline_ext, true);
179 		if (ret < 0) {
180 			DBG_BUGON(1);
181 			continue;
182 		}
183 
184 		/* should contain all data in the current block */
185 		used = ret + required_ext + inline_ext;
186 		DBG_BUGON(used > blksiz);
187 
188 		bb = cur;
189 		usedmax = used;
190 		break;
191 	}
192 
193 skip_mapped:
194 	/* try to start from the last mapped one, which can be expended */
195 	cur = bmgr->last_mapped_block;
196 	if (cur == &bmgr->blkh)
197 		cur = list_next_entry(cur, list);
198 	for (; cur != &bmgr->blkh; cur = list_next_entry(cur, list)) {
199 		used_before = cur->buffers.off & (blksiz - 1);
200 
201 		/* skip if buffer block is just full */
202 		if (!used_before)
203 			continue;
204 
205 		/* skip if the entry which has different type */
206 		if (cur->type != type)
207 			continue;
208 
209 		ret = __erofs_battach(cur, NULL, size, alignsize,
210 				      required_ext + inline_ext, true);
211 		if (ret < 0)
212 			continue;
213 
214 		used = ((ret + required_ext) & (blksiz - 1)) + inline_ext;
215 
216 		/* should contain inline data in current block */
217 		if (used > blksiz)
218 			continue;
219 
220 		/*
221 		 * remaining should be smaller than before or
222 		 * larger than allocating a new buffer block
223 		 */
224 		if (used < used_before && used < used0)
225 			continue;
226 
227 		if (usedmax < used) {
228 			bb = cur;
229 			usedmax = used;
230 		}
231 	}
232 	*bbp = bb;
233 	return 0;
234 }
235 
erofs_balloc(struct erofs_bufmgr * bmgr,int type,erofs_off_t size,unsigned int required_ext,unsigned int inline_ext)236 struct erofs_buffer_head *erofs_balloc(struct erofs_bufmgr *bmgr,
237 				       int type, erofs_off_t size,
238 				       unsigned int required_ext,
239 				       unsigned int inline_ext)
240 {
241 	struct erofs_buffer_block *bb;
242 	struct erofs_buffer_head *bh;
243 	unsigned int alignsize;
244 	int ret;
245 
246 	ret = get_alignsize(bmgr->sbi, type, &type);
247 	if (ret < 0)
248 		return ERR_PTR(ret);
249 
250 	DBG_BUGON(type < 0 || type > META);
251 	alignsize = ret;
252 
253 	/* try to find if we could reuse an allocated buffer block */
254 	ret = erofs_bfind_for_attach(bmgr, type, size, required_ext, inline_ext,
255 				     alignsize, &bb);
256 	if (ret)
257 		return ERR_PTR(ret);
258 
259 	if (bb) {
260 		bh = malloc(sizeof(struct erofs_buffer_head));
261 		if (!bh)
262 			return ERR_PTR(-ENOMEM);
263 	} else {
264 		/* get a new buffer block instead */
265 		bb = malloc(sizeof(struct erofs_buffer_block));
266 		if (!bb)
267 			return ERR_PTR(-ENOMEM);
268 
269 		bb->type = type;
270 		bb->blkaddr = NULL_ADDR;
271 		bb->buffers.off = 0;
272 		bb->buffers.fsprivate = bmgr;
273 		init_list_head(&bb->buffers.list);
274 		if (type == DATA)
275 			list_add(&bb->list,
276 				 &bmgr->last_mapped_block->list);
277 		else
278 			list_add_tail(&bb->list, &bmgr->blkh.list);
279 		init_list_head(&bb->mapped_list);
280 
281 		bh = malloc(sizeof(struct erofs_buffer_head));
282 		if (!bh) {
283 			free(bb);
284 			return ERR_PTR(-ENOMEM);
285 		}
286 	}
287 
288 	ret = __erofs_battach(bb, bh, size, alignsize,
289 			      required_ext + inline_ext, false);
290 	if (ret < 0) {
291 		free(bh);
292 		return ERR_PTR(ret);
293 	}
294 	return bh;
295 }
296 
erofs_battach(struct erofs_buffer_head * bh,int type,unsigned int size)297 struct erofs_buffer_head *erofs_battach(struct erofs_buffer_head *bh,
298 					int type, unsigned int size)
299 {
300 	struct erofs_buffer_block *const bb = bh->block;
301 	struct erofs_bufmgr *bmgr = bb->buffers.fsprivate;
302 	struct erofs_buffer_head *nbh;
303 	unsigned int alignsize;
304 	int ret = get_alignsize(bmgr->sbi, type, &type);
305 
306 	if (ret < 0)
307 		return ERR_PTR(ret);
308 	alignsize = ret;
309 
310 	/* should be the tail bh in the corresponding buffer block */
311 	if (bh->list.next != &bb->buffers.list)
312 		return ERR_PTR(-EINVAL);
313 
314 	nbh = malloc(sizeof(*nbh));
315 	if (!nbh)
316 		return ERR_PTR(-ENOMEM);
317 
318 	ret = __erofs_battach(bb, nbh, size, alignsize, 0, false);
319 	if (ret < 0) {
320 		free(nbh);
321 		return ERR_PTR(ret);
322 	}
323 	return nbh;
324 }
325 
__erofs_mapbh(struct erofs_buffer_block * bb)326 static erofs_blk_t __erofs_mapbh(struct erofs_buffer_block *bb)
327 {
328 	struct erofs_bufmgr *bmgr = bb->buffers.fsprivate;
329 	erofs_blk_t blkaddr;
330 
331 	if (bb->blkaddr == NULL_ADDR) {
332 		bb->blkaddr = bmgr->tail_blkaddr;
333 		bmgr->last_mapped_block = bb;
334 		erofs_bupdate_mapped(bb);
335 	}
336 
337 	blkaddr = bb->blkaddr + BLK_ROUND_UP(bmgr->sbi, bb->buffers.off);
338 	if (blkaddr > bmgr->tail_blkaddr)
339 		bmgr->tail_blkaddr = blkaddr;
340 	return blkaddr;
341 }
342 
erofs_mapbh(struct erofs_bufmgr * bmgr,struct erofs_buffer_block * bb)343 erofs_blk_t erofs_mapbh(struct erofs_bufmgr *bmgr,
344 			struct erofs_buffer_block *bb)
345 {
346 	struct erofs_buffer_block *t;
347 
348 	if (!bmgr)
349 		bmgr = bb->buffers.fsprivate;
350 	t = bmgr->last_mapped_block;
351 
352 	if (bb && bb->blkaddr != NULL_ADDR)
353 		return bb->blkaddr;
354 	do {
355 		t = list_next_entry(t, list);
356 		if (t == &bmgr->blkh)
357 			break;
358 
359 		DBG_BUGON(t->blkaddr != NULL_ADDR);
360 		(void)__erofs_mapbh(t);
361 	} while (t != bb);
362 	return bmgr->tail_blkaddr;
363 }
364 
erofs_bfree(struct erofs_buffer_block * bb)365 static void erofs_bfree(struct erofs_buffer_block *bb)
366 {
367 	struct erofs_bufmgr *bmgr = bb->buffers.fsprivate;
368 
369 	DBG_BUGON(!list_empty(&bb->buffers.list));
370 
371 	if (bb == bmgr->last_mapped_block)
372 		bmgr->last_mapped_block = list_prev_entry(bb, list);
373 
374 	list_del(&bb->mapped_list);
375 	list_del(&bb->list);
376 	free(bb);
377 }
378 
erofs_bflush(struct erofs_bufmgr * bmgr,struct erofs_buffer_block * bb)379 int erofs_bflush(struct erofs_bufmgr *bmgr,
380 		 struct erofs_buffer_block *bb)
381 {
382 	struct erofs_sb_info *sbi = bmgr->sbi;
383 	const unsigned int blksiz = erofs_blksiz(sbi);
384 	struct erofs_buffer_block *p, *n;
385 	erofs_blk_t blkaddr;
386 
387 	list_for_each_entry_safe(p, n, &bmgr->blkh.list, list) {
388 		struct erofs_buffer_head *bh, *nbh;
389 		unsigned int padding;
390 		bool skip = false;
391 		int ret;
392 
393 		if (p == bb)
394 			break;
395 
396 		blkaddr = __erofs_mapbh(p);
397 
398 		list_for_each_entry_safe(bh, nbh, &p->buffers.list, list) {
399 			if (bh->op == &erofs_skip_write_bhops) {
400 				skip = true;
401 				continue;
402 			}
403 
404 			/* flush and remove bh */
405 			ret = bh->op->flush(bh);
406 			if (ret < 0)
407 				return ret;
408 		}
409 
410 		if (skip)
411 			continue;
412 
413 		padding = blksiz - (p->buffers.off & (blksiz - 1));
414 		if (padding != blksiz)
415 			erofs_dev_fillzero(sbi, erofs_pos(sbi, blkaddr) - padding,
416 					   padding, true);
417 
418 		if (p->type != DATA)
419 			bmgr->metablkcnt +=
420 				BLK_ROUND_UP(sbi, p->buffers.off);
421 		erofs_dbg("block %u to %u flushed", p->blkaddr, blkaddr - 1);
422 		erofs_bfree(p);
423 	}
424 	return 0;
425 }
426 
erofs_bdrop(struct erofs_buffer_head * bh,bool tryrevoke)427 void erofs_bdrop(struct erofs_buffer_head *bh, bool tryrevoke)
428 {
429 	struct erofs_buffer_block *const bb = bh->block;
430 	struct erofs_bufmgr *bmgr = bb->buffers.fsprivate;
431 	struct erofs_sb_info *sbi = bmgr->sbi;
432 	const erofs_blk_t blkaddr = bh->block->blkaddr;
433 	bool rollback = false;
434 
435 	/* tail_blkaddr could be rolled back after revoking all bhs */
436 	if (tryrevoke && blkaddr != NULL_ADDR &&
437 	    bmgr->tail_blkaddr == blkaddr + BLK_ROUND_UP(sbi, bb->buffers.off))
438 		rollback = true;
439 
440 	bh->op = &erofs_drop_directly_bhops;
441 	erofs_bh_flush_generic_end(bh);
442 
443 	if (!list_empty(&bb->buffers.list))
444 		return;
445 
446 	if (!rollback && bb->type != DATA)
447 		bmgr->metablkcnt += BLK_ROUND_UP(sbi, bb->buffers.off);
448 	erofs_bfree(bb);
449 	if (rollback)
450 		bmgr->tail_blkaddr = blkaddr;
451 }
452 
erofs_total_metablocks(struct erofs_bufmgr * bmgr)453 erofs_blk_t erofs_total_metablocks(struct erofs_bufmgr *bmgr)
454 {
455 	return bmgr->metablkcnt;
456 }
457 
erofs_buffer_exit(struct erofs_bufmgr * bmgr)458 void erofs_buffer_exit(struct erofs_bufmgr *bmgr)
459 {
460 	free(bmgr);
461 }
462