1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * erofs-utils/lib/cache.c
4 *
5 * Copyright (C) 2018-2019 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Miao Xie <miaoxie@huawei.com>
8 * with heavy changes by Gao Xiang <gaoxiang25@huawei.com>
9 */
10 #include <stdlib.h>
11 #include <erofs/cache.h>
12 #include "erofs/io.h"
13 #include "erofs/print.h"
14
15 static struct erofs_buffer_block blkh = {
16 .list = LIST_HEAD_INIT(blkh.list),
17 .blkaddr = NULL_ADDR,
18 };
19 static erofs_blk_t tail_blkaddr;
20
erofs_bh_flush_drop_directly(struct erofs_buffer_head * bh)21 static bool erofs_bh_flush_drop_directly(struct erofs_buffer_head *bh)
22 {
23 return erofs_bh_flush_generic_end(bh);
24 }
25
26 struct erofs_bhops erofs_drop_directly_bhops = {
27 .flush = erofs_bh_flush_drop_directly,
28 };
29
erofs_bh_flush_skip_write(struct erofs_buffer_head * bh)30 static bool erofs_bh_flush_skip_write(struct erofs_buffer_head *bh)
31 {
32 return false;
33 }
34
35 struct erofs_bhops erofs_skip_write_bhops = {
36 .flush = erofs_bh_flush_skip_write,
37 };
38
erofs_bh_flush_generic_write(struct erofs_buffer_head * bh,void * buf)39 int erofs_bh_flush_generic_write(struct erofs_buffer_head *bh, void *buf)
40 {
41 struct erofs_buffer_head *nbh = list_next_entry(bh, list);
42 erofs_off_t offset = erofs_btell(bh, false);
43
44 DBG_BUGON(nbh->off < bh->off);
45 return dev_write(buf, offset, nbh->off - bh->off);
46 }
47
erofs_bh_flush_buf_write(struct erofs_buffer_head * bh)48 static bool erofs_bh_flush_buf_write(struct erofs_buffer_head *bh)
49 {
50 int err = erofs_bh_flush_generic_write(bh, bh->fsprivate);
51
52 if (err)
53 return false;
54 free(bh->fsprivate);
55 return erofs_bh_flush_generic_end(bh);
56 }
57
58 struct erofs_bhops erofs_buf_write_bhops = {
59 .flush = erofs_bh_flush_buf_write,
60 };
61
62 /* return buffer_head of erofs super block (with size 0) */
erofs_buffer_init(void)63 struct erofs_buffer_head *erofs_buffer_init(void)
64 {
65 struct erofs_buffer_head *bh = erofs_balloc(META, 0, 0, 0);
66
67 if (IS_ERR(bh))
68 return bh;
69
70 bh->op = &erofs_skip_write_bhops;
71 return bh;
72 }
73
74 /* return occupied bytes in specific buffer block if succeed */
__erofs_battach(struct erofs_buffer_block * bb,struct erofs_buffer_head * bh,erofs_off_t incr,unsigned int alignsize,unsigned int extrasize,bool dryrun)75 static int __erofs_battach(struct erofs_buffer_block *bb,
76 struct erofs_buffer_head *bh,
77 erofs_off_t incr,
78 unsigned int alignsize,
79 unsigned int extrasize,
80 bool dryrun)
81 {
82 const erofs_off_t alignedoffset = roundup(bb->buffers.off, alignsize);
83 const int oob = cmpsgn(roundup(bb->buffers.off % EROFS_BLKSIZ,
84 alignsize) + incr + extrasize,
85 EROFS_BLKSIZ);
86 bool tailupdate = false;
87 erofs_blk_t blkaddr;
88
89 if (oob >= 0) {
90 /* the next buffer block should be NULL_ADDR all the time */
91 if (oob && list_next_entry(bb, list)->blkaddr != NULL_ADDR)
92 return -EINVAL;
93
94 blkaddr = bb->blkaddr;
95 if (blkaddr != NULL_ADDR) {
96 tailupdate = (tail_blkaddr == blkaddr +
97 BLK_ROUND_UP(bb->buffers.off));
98 if (oob && !tailupdate)
99 return -EINVAL;
100 }
101 }
102
103 if (!dryrun) {
104 if (bh) {
105 bh->off = alignedoffset;
106 bh->block = bb;
107 list_add_tail(&bh->list, &bb->buffers.list);
108 }
109 bb->buffers.off = alignedoffset + incr;
110 /* need to update the tail_blkaddr */
111 if (tailupdate)
112 tail_blkaddr = blkaddr + BLK_ROUND_UP(bb->buffers.off);
113 }
114 return (alignedoffset + incr) % EROFS_BLKSIZ;
115 }
116
erofs_bh_balloon(struct erofs_buffer_head * bh,erofs_off_t incr)117 int erofs_bh_balloon(struct erofs_buffer_head *bh, erofs_off_t incr)
118 {
119 struct erofs_buffer_block *const bb = bh->block;
120
121 /* should be the tail bh in the corresponding buffer block */
122 if (bh->list.next != &bb->buffers.list)
123 return -EINVAL;
124
125 return __erofs_battach(bb, NULL, incr, 1, 0, false);
126 }
127
erofs_balloc(int type,erofs_off_t size,unsigned int required_ext,unsigned int inline_ext)128 struct erofs_buffer_head *erofs_balloc(int type, erofs_off_t size,
129 unsigned int required_ext,
130 unsigned int inline_ext)
131 {
132 struct erofs_buffer_block *cur, *bb;
133 struct erofs_buffer_head *bh;
134 unsigned int alignsize, used0, usedmax;
135
136 int ret = get_alignsize(type, &type);
137
138 if (ret < 0)
139 return ERR_PTR(ret);
140 alignsize = ret;
141
142 used0 = (size + required_ext) % EROFS_BLKSIZ + inline_ext;
143 usedmax = 0;
144 bb = NULL;
145
146 list_for_each_entry(cur, &blkh.list, list) {
147 unsigned int used_before, used;
148
149 used_before = cur->buffers.off % EROFS_BLKSIZ;
150
151 /* skip if buffer block is just full */
152 if (!used_before)
153 continue;
154
155 /* skip if the entry which has different type */
156 if (cur->type != type)
157 continue;
158
159 ret = __erofs_battach(cur, NULL, size, alignsize,
160 required_ext + inline_ext, true);
161 if (ret < 0)
162 continue;
163
164 used = (ret + required_ext) % EROFS_BLKSIZ + inline_ext;
165
166 /* should contain inline data in current block */
167 if (used > EROFS_BLKSIZ)
168 continue;
169
170 /*
171 * remaining should be smaller than before or
172 * larger than allocating a new buffer block
173 */
174 if (used < used_before && used < used0)
175 continue;
176
177 if (usedmax < used) {
178 bb = cur;
179 usedmax = used;
180 }
181 }
182
183 if (bb) {
184 bh = malloc(sizeof(struct erofs_buffer_head));
185 if (!bh)
186 return ERR_PTR(-ENOMEM);
187 goto found;
188 }
189
190 /* allocate a new buffer block */
191 if (used0 > EROFS_BLKSIZ)
192 return ERR_PTR(-ENOSPC);
193
194 bb = malloc(sizeof(struct erofs_buffer_block));
195 if (!bb)
196 return ERR_PTR(-ENOMEM);
197
198 bb->type = type;
199 bb->blkaddr = NULL_ADDR;
200 bb->buffers.off = 0;
201 init_list_head(&bb->buffers.list);
202 list_add_tail(&bb->list, &blkh.list);
203
204 bh = malloc(sizeof(struct erofs_buffer_head));
205 if (!bh) {
206 free(bb);
207 return ERR_PTR(-ENOMEM);
208 }
209 found:
210 ret = __erofs_battach(bb, bh, size, alignsize,
211 required_ext + inline_ext, false);
212 if (ret < 0)
213 return ERR_PTR(ret);
214 return bh;
215 }
216
erofs_battach(struct erofs_buffer_head * bh,int type,unsigned int size)217 struct erofs_buffer_head *erofs_battach(struct erofs_buffer_head *bh,
218 int type, unsigned int size)
219 {
220 struct erofs_buffer_block *const bb = bh->block;
221 struct erofs_buffer_head *nbh;
222 unsigned int alignsize;
223 int ret = get_alignsize(type, &type);
224
225 if (ret < 0)
226 return ERR_PTR(ret);
227 alignsize = ret;
228
229 /* should be the tail bh in the corresponding buffer block */
230 if (bh->list.next != &bb->buffers.list)
231 return ERR_PTR(-EINVAL);
232
233 nbh = malloc(sizeof(*nbh));
234 if (!nbh)
235 return ERR_PTR(-ENOMEM);
236
237 ret = __erofs_battach(bb, nbh, size, alignsize, 0, false);
238 if (ret < 0) {
239 free(nbh);
240 return ERR_PTR(ret);
241 }
242 return nbh;
243
244 }
245
__erofs_mapbh(struct erofs_buffer_block * bb)246 static erofs_blk_t __erofs_mapbh(struct erofs_buffer_block *bb)
247 {
248 erofs_blk_t blkaddr;
249
250 if (bb->blkaddr == NULL_ADDR)
251 bb->blkaddr = tail_blkaddr;
252
253 blkaddr = bb->blkaddr + BLK_ROUND_UP(bb->buffers.off);
254 if (blkaddr > tail_blkaddr)
255 tail_blkaddr = blkaddr;
256
257 return blkaddr;
258 }
259
erofs_mapbh(struct erofs_buffer_block * bb,bool end)260 erofs_blk_t erofs_mapbh(struct erofs_buffer_block *bb, bool end)
261 {
262 struct erofs_buffer_block *t, *nt;
263
264 if (!bb || bb->blkaddr == NULL_ADDR) {
265 list_for_each_entry_safe(t, nt, &blkh.list, list) {
266 if (!end && (t == bb || nt == &blkh))
267 break;
268 (void)__erofs_mapbh(t);
269 if (end && t == bb)
270 break;
271 }
272 }
273 return tail_blkaddr;
274 }
275
erofs_bflush(struct erofs_buffer_block * bb)276 bool erofs_bflush(struct erofs_buffer_block *bb)
277 {
278 struct erofs_buffer_block *p, *n;
279 erofs_blk_t blkaddr;
280
281 list_for_each_entry_safe(p, n, &blkh.list, list) {
282 struct erofs_buffer_head *bh, *nbh;
283 unsigned int padding;
284 bool skip = false;
285
286 if (p == bb)
287 break;
288
289 /* check if the buffer block can flush */
290 list_for_each_entry(bh, &p->buffers.list, list)
291 if (bh->op->preflush && !bh->op->preflush(bh))
292 return false;
293
294 blkaddr = __erofs_mapbh(p);
295
296 list_for_each_entry_safe(bh, nbh, &p->buffers.list, list) {
297 /* flush and remove bh */
298 if (!bh->op->flush(bh))
299 skip = true;
300 }
301
302 if (skip)
303 continue;
304
305 padding = EROFS_BLKSIZ - p->buffers.off % EROFS_BLKSIZ;
306 if (padding != EROFS_BLKSIZ)
307 dev_fillzero(blknr_to_addr(blkaddr) - padding,
308 padding, true);
309
310 DBG_BUGON(!list_empty(&p->buffers.list));
311
312 erofs_dbg("block %u to %u flushed", p->blkaddr, blkaddr - 1);
313
314 list_del(&p->list);
315 free(p);
316 }
317 return true;
318 }
319
erofs_bdrop(struct erofs_buffer_head * bh,bool tryrevoke)320 void erofs_bdrop(struct erofs_buffer_head *bh, bool tryrevoke)
321 {
322 struct erofs_buffer_block *const bb = bh->block;
323 const erofs_blk_t blkaddr = bh->block->blkaddr;
324 bool rollback = false;
325
326 /* tail_blkaddr could be rolled back after revoking all bhs */
327 if (tryrevoke && blkaddr != NULL_ADDR &&
328 tail_blkaddr == blkaddr + BLK_ROUND_UP(bb->buffers.off))
329 rollback = true;
330
331 bh->op = &erofs_drop_directly_bhops;
332 erofs_bh_flush_generic_end(bh);
333
334 if (!list_empty(&bb->buffers.list))
335 return;
336
337 list_del(&bb->list);
338 free(bb);
339
340 if (rollback)
341 tail_blkaddr = blkaddr;
342 }
343
344