8 #define min(a, b) (((b) < (a))?(b):(a))
10 ssize_t btget(struct store *st, struct btnode *tree, block_t bl, void *buf, size_t len)
14 struct btnode indir[BT_INDSZ];
23 /* This check should really only be necessary on the first
24 * iteration, but I felt it was easier to put it in the
26 if((bl >> (d * BT_INDBITS)) > 0) {
32 return(storeget(st, buf, len, &tree->a));
34 /* Luckily, this is tail recursive */
35 if((sz = storeget(st, indir, BT_INDBSZ, &tree->a)) < 0)
37 c = sz / sizeof(struct btnode);
38 sel = bl >> ((d - 1) * BT_INDBITS);
44 bl &= (1LL << ((d - 1) * BT_INDBITS)) - 1;
49 static int btputleaf(struct store *st, struct btnode *leaf, struct btop *op, block_t bloff)
57 buf = op->buf = malloc(op->len);
58 if(op->fillfn(buf, op->len, op->pdata))
61 ret = storeput(st, op->buf, op->len, &na);
71 static int countops(struct btop *ops, int numops, block_t bloff, block_t maxbl)
75 for(i = 0; i < numops; i++) {
76 if(ops[i].blk - bloff >= maxbl)
83 * blputmany() in many ways makes the code uglier, but it saves a
84 * *lot* of space, since it doesn't need to store intermediary blocks.
86 static int btputmany2(struct store *st, struct btnode *tree, struct btop *ops, int numops, block_t bloff)
88 int i, subops, d, f, hasid;
89 block_t c, sel, bl, nextsz;
91 struct btnode indir[BT_INDSZ];
99 for(i = 0; i < numops; ) {
100 bl = ops[i].blk - bloff;
102 if((d == 0) && (bl == 0)) {
103 if(btputleaf(st, tree, ops, bloff))
109 if(f && (bl == (1LL << (d * BT_INDBITS)))) {
110 /* New level of indirection */
112 if(storeput(st, indir, c * sizeof(struct btnode), &na))
127 /* Assume that numops == largest block number + 1 -- gaps
128 * will be detected as errors later */
129 for(bl = numops - 1; bl > 0; d++, bl <<= BT_INDBITS);
134 /* Get indirect block */
136 if((sz = storeget(st, indir, BT_INDBSZ, &tree->a)) < 0)
138 c = sz / sizeof(struct btnode);
143 sel = bl >> ((d - 1) * BT_INDBITS);
151 if((c > 0) && (!(indir[c - 1].d & 0x80) || ((indir[c - 1].d & 0x7f) < (d - 1)))) {
158 nextsz = 1LL << ((d - 1) * BT_INDBITS);
159 subops = countops(ops + i, numops - i, bloff + (sel * nextsz), nextsz);
160 if(btputmany2(st, &indir[sel], ops + i, subops, bloff + (sel * nextsz)))
164 if((sel == BT_INDSZ - 1) && (indir[sel].d == ((d - 1) | 0x80))) {
170 if(storeput(st, indir, c * sizeof(struct btnode), &na))
177 int btputmany(struct store *st, struct btnode *tree, struct btop *ops, int numops)
179 return(btputmany2(st, tree, ops, numops, 0));
182 int btput(struct store *st, struct btnode *tree, block_t bl, void *buf, size_t len)
189 return(btputmany(st, tree, ops, 1));
192 void btmkop(struct btop *op, block_t bl, void *buf, size_t len)
194 memset(op, 0, sizeof(*op));
200 static int opcmp(const struct btop **op1, const struct btop **op2)
202 return((*op1)->blk - (*op2)->blk);
205 void btsortops(struct btop *ops, int numops)
207 qsort(ops, numops, sizeof(*ops), (int (*)(const void *, const void *))opcmp);
213 int btappend(struct store *st, struct btnode *tree, void *buf, size_t len)
216 struct btnode indir[BT_INDSZ];
225 if(storeput(st, buf, len, &na))
230 if(storeput(st, indir, 2 * sizeof(*indir), &na))
238 if(storeput(st, buf, len, &na))
245 if((sz = storeget(st, indir, BT_INDBSZ, &tree->a)) < 0)
247 c = sz / sizeof(struct btnode);
248 if(!(indir[c - 1].d & 0x80) || ((indir[c - 1].d & 0x7f) < (d - 1))) {
249 if(btappend(st, &indir[c - 1], buf, len))
251 if(storeput(st, indir, sz, &na))
254 if((indir[c - 1].d & 0x80) && ((indir[c - 1].d & 0x7f) == (d - 1)))
259 if(storeput(st, buf, len, &na))
263 if(storeput(st, indir, sz + sizeof(struct btnode), &na))
270 block_t btcount(struct store *st, struct btnode *tree)
273 struct btnode indir[BT_INDSZ];
281 return(1LL << (d * BT_INDBITS));
288 if((sz = storeget(st, indir, BT_INDBSZ, &tree->a)) < 0)
290 c = sz / sizeof(struct btnode);
291 ret += (c - 1) * (1LL << ((d - 1) * BT_INDBITS));
292 d = indir[c - 1].d & 0x7f;
293 f = indir[c - 1].d & 0x80;
295 return(ret + (1LL << (d * BT_INDBITS)));
296 tree = &indir[c - 1];