1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/hpfs/anode.c 4 * 5 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 6 * 7 * handling HPFS anode tree that contains file allocation info 8 */ 9 10 #include "hpfs_fn.h" 11 12 /* Find a sector in allocation tree */ 13 14 secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, 15 struct bplus_header *btree, unsigned sec, 16 struct buffer_head *bh) 17 { 18 anode_secno a = -1; 19 struct anode *anode; 20 int i; 21 int c1, c2 = 0; 22 go_down: 23 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; 24 if (bp_internal(btree)) { 25 for (i = 0; i < btree->n_used_nodes; i++) 26 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { 27 a = le32_to_cpu(btree->u.internal[i].down); 28 brelse(bh); 29 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; 30 btree = GET_BTREE_PTR(&anode->btree); 31 goto go_down; 32 } 33 hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a); 34 brelse(bh); 35 return -1; 36 } 37 for (i = 0; i < btree->n_used_nodes; i++) 38 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && 39 le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { 40 a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); 41 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { 42 brelse(bh); 43 return -1; 44 } 45 if (inode) { 46 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 47 hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); 48 hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); 49 hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); 50 } 51 brelse(bh); 52 return a; 53 } 54 hpfs_error(s, "sector %08x not found in external anode %08x", sec, a); 55 brelse(bh); 56 return -1; 57 } 58 59 /* Add a sector to tree */ 60 61 secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno) 62 { 63 struct bplus_header *btree; 64 struct anode *anode = NULL, *ranode = NULL; 65 struct fnode *fnode; 66 anode_secno a, na = -1, ra, up = -1; 67 secno se; 68 struct buffer_head *bh, *bh1, *bh2; 69 int n; 70 unsigned fs; 71 int c1, c2 = 0; 72 73 if (fnod) { 74 if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1; 75 btree = GET_BTREE_PTR(&fnode->btree); 76 } else { 77 if (!(anode = hpfs_map_anode(s, node, &bh))) return -1; 78 btree = GET_BTREE_PTR(&anode->btree); 79 } 80 a = node; 81 go_down: 82 if ((n = btree->n_used_nodes - 1) < -!!fnod) { 83 hpfs_error(s, "anode %08x has no entries", a); 84 brelse(bh); 85 return -1; 86 } 87 if (bp_internal(btree)) { 88 a = le32_to_cpu(btree->u.internal[n].down); 89 btree->u.internal[n].file_secno = cpu_to_le32(-1); 90 mark_buffer_dirty(bh); 91 brelse(bh); 92 if (hpfs_sb(s)->sb_chk) 93 if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1; 94 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; 95 btree = GET_BTREE_PTR(&anode->btree); 96 goto go_down; 97 } 98 if (n >= 0) { 99 if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { 100 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", 101 le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, 102 fnod?'f':'a', node); 103 brelse(bh); 104 return -1; 105 } 106 if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { 107 le32_add_cpu(&btree->u.external[n].length, 1); 108 mark_buffer_dirty(bh); 109 brelse(bh); 110 return se; 111 } 112 } else { 113 if (fsecno) { 114 hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno); 115 brelse(bh); 116 return -1; 117 } 118 se = !fnod ? node : (node + 16384) & ~16383; 119 } 120 if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { 121 brelse(bh); 122 return -1; 123 } 124 fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); 125 if (!btree->n_free_nodes) { 126 up = a != node ? le32_to_cpu(anode->up) : -1; 127 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { 128 brelse(bh); 129 hpfs_free_sectors(s, se, 1); 130 return -1; 131 } 132 if (a == node && fnod) { 133 anode->up = cpu_to_le32(node); 134 anode->btree.flags |= BP_fnode_parent; 135 anode->btree.n_used_nodes = btree->n_used_nodes; 136 anode->btree.first_free = btree->first_free; 137 anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; 138 memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); 139 btree->flags |= BP_internal; 140 btree->n_free_nodes = 11; 141 btree->n_used_nodes = 1; 142 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); 143 btree->u.internal[0].file_secno = cpu_to_le32(-1); 144 btree->u.internal[0].down = cpu_to_le32(na); 145 mark_buffer_dirty(bh); 146 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { 147 brelse(bh); 148 brelse(bh1); 149 hpfs_free_sectors(s, se, 1); 150 hpfs_free_sectors(s, na, 1); 151 return -1; 152 } 153 brelse(bh); 154 bh = bh1; 155 btree = GET_BTREE_PTR(&anode->btree); 156 } 157 btree->n_free_nodes--; n = btree->n_used_nodes++; 158 le16_add_cpu(&btree->first_free, 12); 159 btree->u.external[n].disk_secno = cpu_to_le32(se); 160 btree->u.external[n].file_secno = cpu_to_le32(fs); 161 btree->u.external[n].length = cpu_to_le32(1); 162 mark_buffer_dirty(bh); 163 brelse(bh); 164 if ((a == node && fnod) || na == -1) return se; 165 c2 = 0; 166 while (up != (anode_secno)-1) { 167 struct anode *new_anode; 168 if (hpfs_sb(s)->sb_chk) 169 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; 170 if (up != node || !fnod) { 171 if (!(anode = hpfs_map_anode(s, up, &bh))) return -1; 172 btree = GET_BTREE_PTR(&anode->btree); 173 } else { 174 if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1; 175 btree = GET_BTREE_PTR(&fnode->btree); 176 } 177 if (btree->n_free_nodes) { 178 btree->n_free_nodes--; n = btree->n_used_nodes++; 179 le16_add_cpu(&btree->first_free, 8); 180 btree->u.internal[n].file_secno = cpu_to_le32(-1); 181 btree->u.internal[n].down = cpu_to_le32(na); 182 btree->u.internal[n-1].file_secno = cpu_to_le32(fs); 183 mark_buffer_dirty(bh); 184 brelse(bh); 185 brelse(bh2); 186 hpfs_free_sectors(s, ra, 1); 187 if ((anode = hpfs_map_anode(s, na, &bh))) { 188 anode->up = cpu_to_le32(up); 189 if (up == node && fnod) 190 anode->btree.flags |= BP_fnode_parent; 191 else 192 anode->btree.flags &= ~BP_fnode_parent; 193 mark_buffer_dirty(bh); 194 brelse(bh); 195 } 196 return se; 197 } 198 up = up != node ? le32_to_cpu(anode->up) : -1; 199 btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); 200 mark_buffer_dirty(bh); 201 brelse(bh); 202 a = na; 203 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { 204 anode = new_anode; 205 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ 206 anode->btree.flags |= BP_internal; 207 anode->btree.n_used_nodes = 1; 208 anode->btree.n_free_nodes = 59; 209 anode->btree.first_free = cpu_to_le16(16); 210 GET_BTREE_PTR(&anode->btree)->u.internal[0].down = cpu_to_le32(a); 211 GET_BTREE_PTR(&anode->btree)->u.internal[0].file_secno = cpu_to_le32(-1); 212 mark_buffer_dirty(bh); 213 brelse(bh); 214 if ((anode = hpfs_map_anode(s, a, &bh))) { 215 anode->up = cpu_to_le32(na); 216 mark_buffer_dirty(bh); 217 brelse(bh); 218 } 219 } else na = a; 220 } 221 if ((anode = hpfs_map_anode(s, na, &bh))) { 222 anode->up = cpu_to_le32(node); 223 if (fnod) 224 anode->btree.flags |= BP_fnode_parent; 225 mark_buffer_dirty(bh); 226 brelse(bh); 227 } 228 if (!fnod) { 229 if (!(anode = hpfs_map_anode(s, node, &bh))) { 230 brelse(bh2); 231 return -1; 232 } 233 btree = GET_BTREE_PTR(&anode->btree); 234 } else { 235 if (!(fnode = hpfs_map_fnode(s, node, &bh))) { 236 brelse(bh2); 237 return -1; 238 } 239 btree = GET_BTREE_PTR(&fnode->btree); 240 } 241 ranode->up = cpu_to_le32(node); 242 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); 243 if (fnod) 244 ranode->btree.flags |= BP_fnode_parent; 245 GET_BTREE_PTR(&ranode->btree)->n_free_nodes = (bp_internal(GET_BTREE_PTR(&ranode->btree)) ? 60 : 40) - GET_BTREE_PTR(&ranode->btree)->n_used_nodes; 246 if (bp_internal(GET_BTREE_PTR(&ranode->btree))) for (n = 0; n < GET_BTREE_PTR(&ranode->btree)->n_used_nodes; n++) { 247 struct anode *unode; 248 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { 249 unode->up = cpu_to_le32(ra); 250 unode->btree.flags &= ~BP_fnode_parent; 251 mark_buffer_dirty(bh1); 252 brelse(bh1); 253 } 254 } 255 btree->flags |= BP_internal; 256 btree->n_free_nodes = fnod ? 10 : 58; 257 btree->n_used_nodes = 2; 258 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); 259 btree->u.internal[0].file_secno = cpu_to_le32(fs); 260 btree->u.internal[0].down = cpu_to_le32(ra); 261 btree->u.internal[1].file_secno = cpu_to_le32(-1); 262 btree->u.internal[1].down = cpu_to_le32(na); 263 mark_buffer_dirty(bh); 264 brelse(bh); 265 mark_buffer_dirty(bh2); 266 brelse(bh2); 267 return se; 268 } 269 270 /* 271 * Remove allocation tree. Recursion would look much nicer but 272 * I want to avoid it because it can cause stack overflow. 273 */ 274 275 void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) 276 { 277 struct bplus_header *btree1 = btree; 278 struct anode *anode = NULL; 279 anode_secno ano = 0, oano; 280 struct buffer_head *bh; 281 int level = 0; 282 int pos = 0; 283 int i; 284 int c1, c2 = 0; 285 int d1, d2; 286 go_down: 287 d2 = 0; 288 while (bp_internal(btree1)) { 289 ano = le32_to_cpu(btree1->u.internal[pos].down); 290 if (level) brelse(bh); 291 if (hpfs_sb(s)->sb_chk) 292 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) 293 return; 294 if (!(anode = hpfs_map_anode(s, ano, &bh))) return; 295 btree1 = GET_BTREE_PTR(&anode->btree); 296 level++; 297 pos = 0; 298 } 299 for (i = 0; i < btree1->n_used_nodes; i++) 300 hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); 301 go_up: 302 if (!level) return; 303 brelse(bh); 304 if (hpfs_sb(s)->sb_chk) 305 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; 306 hpfs_free_sectors(s, ano, 1); 307 oano = ano; 308 ano = le32_to_cpu(anode->up); 309 if (--level) { 310 if (!(anode = hpfs_map_anode(s, ano, &bh))) return; 311 btree1 = GET_BTREE_PTR(&anode->btree); 312 } else btree1 = btree; 313 for (i = 0; i < btree1->n_used_nodes; i++) { 314 if (le32_to_cpu(btree1->u.internal[i].down) == oano) { 315 if ((pos = i + 1) < btree1->n_used_nodes) 316 goto go_down; 317 else 318 goto go_up; 319 } 320 } 321 hpfs_error(s, 322 "reference to anode %08x not found in anode %08x " 323 "(probably bad up pointer)", 324 oano, level ? ano : -1); 325 if (level) 326 brelse(bh); 327 } 328 329 /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */ 330 331 static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec) 332 { 333 struct anode *anode; 334 struct buffer_head *bh; 335 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; 336 return hpfs_bplus_lookup(s, NULL, GET_BTREE_PTR(&anode->btree), sec, bh); 337 } 338 339 int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos, 340 unsigned len, char *buf) 341 { 342 struct buffer_head *bh; 343 char *data; 344 secno sec; 345 unsigned l; 346 while (len) { 347 if (ano) { 348 if ((sec = anode_lookup(s, a, pos >> 9)) == -1) 349 return -1; 350 } else sec = a + (pos >> 9); 351 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1; 352 if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) 353 return -1; 354 l = 0x200 - (pos & 0x1ff); if (l > len) l = len; 355 memcpy(buf, data + (pos & 0x1ff), l); 356 brelse(bh); 357 buf += l; pos += l; len -= l; 358 } 359 return 0; 360 } 361 362 int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, 363 unsigned len, const char *buf) 364 { 365 struct buffer_head *bh; 366 char *data; 367 secno sec; 368 unsigned l; 369 while (len) { 370 if (ano) { 371 if ((sec = anode_lookup(s, a, pos >> 9)) == -1) 372 return -1; 373 } else sec = a + (pos >> 9); 374 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1; 375 if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) 376 return -1; 377 l = 0x200 - (pos & 0x1ff); if (l > len) l = len; 378 memcpy(data + (pos & 0x1ff), buf, l); 379 mark_buffer_dirty(bh); 380 brelse(bh); 381 buf += l; pos += l; len -= l; 382 } 383 return 0; 384 } 385 386 void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len) 387 { 388 struct anode *anode; 389 struct buffer_head *bh; 390 if (ano) { 391 if (!(anode = hpfs_map_anode(s, a, &bh))) return; 392 hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree)); 393 brelse(bh); 394 hpfs_free_sectors(s, a, 1); 395 } else hpfs_free_sectors(s, a, (len + 511) >> 9); 396 } 397 398 /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */ 399 400 void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) 401 { 402 struct fnode *fnode; 403 struct anode *anode; 404 struct buffer_head *bh; 405 struct bplus_header *btree; 406 anode_secno node = f; 407 int i, j, nodes; 408 int c1, c2 = 0; 409 if (fno) { 410 if (!(fnode = hpfs_map_fnode(s, f, &bh))) return; 411 btree = GET_BTREE_PTR(&fnode->btree); 412 } else { 413 if (!(anode = hpfs_map_anode(s, f, &bh))) return; 414 btree = GET_BTREE_PTR(&anode->btree); 415 } 416 if (!secs) { 417 hpfs_remove_btree(s, btree); 418 if (fno) { 419 btree->n_free_nodes = 8; 420 btree->n_used_nodes = 0; 421 btree->first_free = cpu_to_le16(8); 422 btree->flags &= ~BP_internal; 423 mark_buffer_dirty(bh); 424 } else hpfs_free_sectors(s, f, 1); 425 brelse(bh); 426 return; 427 } 428 while (bp_internal(btree)) { 429 nodes = btree->n_used_nodes + btree->n_free_nodes; 430 for (i = 0; i < btree->n_used_nodes; i++) 431 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; 432 brelse(bh); 433 hpfs_error(s, "internal btree %08x doesn't end with -1", node); 434 return; 435 f: 436 for (j = i + 1; j < btree->n_used_nodes; j++) 437 hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); 438 btree->n_used_nodes = i + 1; 439 btree->n_free_nodes = nodes - btree->n_used_nodes; 440 btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); 441 mark_buffer_dirty(bh); 442 if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { 443 brelse(bh); 444 return; 445 } 446 node = le32_to_cpu(btree->u.internal[i].down); 447 brelse(bh); 448 if (hpfs_sb(s)->sb_chk) 449 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) 450 return; 451 if (!(anode = hpfs_map_anode(s, node, &bh))) return; 452 btree = GET_BTREE_PTR(&anode->btree); 453 } 454 nodes = btree->n_used_nodes + btree->n_free_nodes; 455 for (i = 0; i < btree->n_used_nodes; i++) 456 if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; 457 brelse(bh); 458 return; 459 ff: 460 if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { 461 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); 462 if (i) i--; 463 } 464 else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { 465 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - 466 le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) 467 - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ 468 btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); 469 } 470 for (j = i + 1; j < btree->n_used_nodes; j++) 471 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); 472 btree->n_used_nodes = i + 1; 473 btree->n_free_nodes = nodes - btree->n_used_nodes; 474 btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); 475 mark_buffer_dirty(bh); 476 brelse(bh); 477 } 478 479 /* Remove file or directory and it's eas - note that directory must 480 be empty when this is called. */ 481 482 void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) 483 { 484 struct buffer_head *bh; 485 struct fnode *fnode; 486 struct extended_attribute *ea; 487 struct extended_attribute *ea_end; 488 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; 489 if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, GET_BTREE_PTR(&fnode->btree)); 490 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); 491 ea_end = fnode_end_ea(fnode); 492 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 493 if (ea_indirect(ea)) 494 hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); 495 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l)); 496 brelse(bh); 497 hpfs_free_sectors(s, fno, 1); 498 } 499