Lines Matching defs:cpos

62 				      u32 cpos, u32 old_cluster,
877 u64 cpos, unsigned int len,
890 le32_to_cpu(rec->r_clusters) <= cpos)
892 else if (le64_to_cpu(rec->r_cpos) > cpos)
895 /* ok, cpos fail in this rec. Just return. */
903 ret_rec->r_cpos = cpu_to_le64(cpos);
906 le64_to_cpu(rec->r_cpos) < cpos + len)
908 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
964 u32 cpos;
982 * We are the last extent rec, so any high cpos should
1004 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
1005 ret = ocfs2_find_path(ci, left_path, cpos);
1018 ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
1024 ret = ocfs2_find_path(ci, right_path, cpos);
1051 * Given a cpos and len, try to find the refcount record which contains cpos.
1052 * 1. If cpos can be found in one refcount record, return the record.
1053 * 2. If cpos can't be found, return a fake record which start from cpos
1054 * and end at a small value between cpos+len and start of the next record.
1059 u64 cpos, unsigned int len,
1075 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
1083 low_cpos = cpos & OCFS2_32BIT_POS_MASK;
1133 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1397 * The refcount cpos are ordered by their 64bit cpos,
1401 * Note: The refcount block is already sorted by their low 32 bit cpos,
1445 u32 cpos = 0;
1459 * If we know all the high 32 bit cpos is the same, no need to sort.
1462 * 1. sort the entries by their low 32 bit cpos first so that we can
1463 * find the split cpos easily.
1466 * 4. sort the entries by their 64 bit cpos.
1473 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1479 new_rb->rf_cpos = cpu_to_le32(cpos);
1502 *split_cpos = cpos;
1591 /* Insert the new leaf block with the specific offset cpos. */
1745 u64 cpos = le64_to_cpu(rec->r_cpos);
1756 cpos, len, NULL, &index,
1872 u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1882 * We have to re-get it since now cpos may be moved to
1886 cpos, len, &tmp_rec, &index,
1975 u64 cpos, u32 len, int merge,
1986 (unsigned long long)cpos, len);
1990 cpos, len, &rec, &index,
2009 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
2012 (unsigned long long)cpos, set_len,
2036 set_len = min((u64)(cpos + len),
2037 le64_to_cpu(rec.r_cpos) + set_len) - cpos;
2038 rec.r_cpos = cpu_to_le64(cpos);
2055 cpos += set_len;
2149 u64 cpos, u32 len,
2154 cpos, len, 1,
2162 int index, u64 cpos, unsigned int len,
2171 BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2172 BUG_ON(cpos + len >
2177 (unsigned long long)cpos, len);
2179 if (cpos == le64_to_cpu(rec->r_cpos) &&
2185 split.r_cpos = cpu_to_le64(cpos);
2217 u64 cpos, u32 len,
2230 (unsigned long long)cpos, len, delete);
2234 cpos, len, &rec, &index,
2246 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2247 le32_to_cpu(rec.r_clusters)) - cpos;
2251 cpos, r_len,
2260 ocfs2_clusters_to_blocks(sb, cpos),
2268 cpos += r_len;
2281 handle_t *handle, u32 cpos, u32 len,
2313 cpos, len, meta_ac, dealloc, delete);
2322 * Mark the already-existing extent at cpos as refcounted for len clusters.
2332 handle_t *handle, u32 cpos,
2340 cpos, len, phys);
2348 ret = ocfs2_change_extent_flag(handle, et, cpos,
2371 u64 cpos = start_cpos;
2379 cpos, clusters, &rec,
2409 recs_add, (unsigned long long)cpos, clusters,
2414 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2415 le32_to_cpu(rec.r_clusters)) - cpos;
2438 if (cpos == start_cpos &&
2439 cpos != le64_to_cpu(rec.r_cpos))
2443 if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2452 cpos += len;
2577 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2578 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2585 unsigned int cpos)
2587 BUG_ON(start > cpos);
2589 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2613 * cpos is virtual start cluster position we want to do CoW in a
2617 * Normal we will start CoW from the beginning of extent record containing cpos.
2623 u32 cpos,
2638 BUG_ON(cpos + write_len > max_cpos);
2641 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2670 le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2707 want_clusters = (cpos + write_len) -
2722 else if (*cow_len || (*cow_start == cpos)) {
2738 (cpos + write_len)) {
2745 } else if ((rec_end - cpos) <= contig_clusters) {
2748 * this extent will cover cpos.
2752 } else if ((rec_end - cpos) <= want_clusters) {
2755 * extent, we know that the write goes from cpos
2759 * Failing that (ie, cpos is within
2764 *cow_start, cpos);
2776 *cow_start, cpos);
2778 want_clusters = (cpos + write_len) - *cow_start;
2788 if ((*cow_start + *cow_len) >= (cpos + write_len))
2899 u32 cpos, u32 old_cluster,
2910 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2913 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2994 u32 cpos, u32 old_cluster,
3007 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
3049 u32 cpos, u32 p_cluster, u32 len,
3062 cpos, len, p_cluster, ext_flags);
3065 replace_rec.e_cpos = cpu_to_le32(cpos);
3079 ret = ocfs2_find_path(et->et_ci, path, cpos);
3087 index = ocfs2_search_extent_list(el, cpos);
3090 "Inode %llu has an extent at cpos %u which can no longer be found\n",
3091 (unsigned long long)ino, cpos);
3107 u32 cpos, u32 old,
3116 cpos, old, new, len, ext_flags);
3121 cpos, old, new, len);
3129 cpos, new, len, ext_flags,
3139 u32 cpos, u32 num_clusters)
3147 start = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
3168 u32 cpos, u32 p_cluster,
3180 trace_ocfs2_make_clusters_writable(cpos, p_cluster,
3232 cpos, p_cluster,
3253 cpos, p_cluster, new_bit,
3272 cpos += set_len;
3294 ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
3367 * Starting at cpos, try to CoW write_len clusters. Don't CoW
3373 u32 cpos, u32 write_len, u32 max_cpos)
3386 cpos, write_len, max_cpos,
3394 cpos, write_len, max_cpos,
3443 * CoW any and all clusters between cpos and cpos+write_len.
3445 * clusters between cpos and cpos+write_len are safe to modify.
3449 u32 cpos, u32 write_len, u32 max_cpos)
3456 ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3467 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3476 cpos += num_clusters;
3507 u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
3512 while (cpos < clusters) {
3513 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
3521 cpos += num_clusters;
3588 u32 cpos, u32 write_len,
3599 cpos, write_len, UINT_MAX,
3649 u32 cpos, u32 p_cluster, u32 num_clusters,
3693 cpos, num_clusters, p_cluster,
3767 u32 cpos, num_clusters, clusters, p_cluster;
3798 cpos = 0;
3799 while (cpos < clusters) {
3800 ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3809 ref_root_bh, cpos,
3819 cpos += num_clusters;
3862 u32 cpos, u32 p_cluster, u32 num_clusters,
3889 ret = ocfs2_insert_extent(handle, et, cpos,
3969 u32 p_cluster, num_clusters, clusters, cpos;
3979 cpos = 0;
3980 while (cpos < clusters) {
3981 ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
3990 cpos, p_cluster,
4000 cpos += num_clusters;