| /linux/drivers/block/drbd/ |
| H A D | drbd_interval.c | 16 #define NODE_END(node) ((node)->sector + ((node)->size >> 9)) 28 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval() 39 if (this->sector < here->sector) in drbd_insert_interval() 41 else if (this->sector > here->sector) in drbd_insert_interval() 69 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument 78 if (sector < here->sector) in drbd_contains_interval() 80 else if (sector > here->sector) in drbd_contains_interval() 118 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap() argument 122 sector_t end = sector + (size >> 9); in drbd_find_overlap() 131 sector < interval_end(node->rb_left)) { in drbd_find_overlap() [all …]
|
| H A D | drbd_actlog.c | 127 sector_t sector, enum req_op op) in _drbd_md_sync_page_io() argument 144 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io() 178 sector_t sector, enum req_op op) in drbd_md_sync_page_io() argument 187 (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io() 190 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io() 191 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io() 194 (unsigned long long)sector, in drbd_md_sync_page_io() 197 err = _drbd_md_sync_page_io(device, bdev, sector, op); in drbd_md_sync_page_io() 200 (unsigned long long)sector, in drbd_md_sync_page_io() 245 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath() [all …]
|
| H A D | drbd_worker.c | 127 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final() 157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final() 183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio() 356 sector_t sector = peer_req->i.sector; in w_e_send_csum() local 367 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum() 387 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument 397 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum() 591 sector_t sector; in make_resync_request() local 657 sector = BM_BIT_TO_SECT(bit); in make_resync_request() 659 if (drbd_try_rs_begin_io(peer_device, sector)) { in make_resync_request() [all …]
|
| H A D | drbd_interval.h | 10 sector_t sector; /* start sector of the interval */ member 38 #define drbd_for_each_overlap(i, root, sector, size) \ argument 39 for (i = drbd_find_overlap(root, sector, size); \ 41 i = drbd_next_overlap(i, sector, size))
|
| /linux/block/ |
| H A D | blk-lib.c | 13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) in bio_discard_limit() argument 19 sector += bdev->bd_start_sect; in bio_discard_limit() 22 round_up(sector, discard_granularity >> SECTOR_SHIFT); in bio_discard_limit() 28 if (granularity_aligned_sector != sector) in bio_discard_limit() 29 return granularity_aligned_sector - sector; in bio_discard_limit() 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument 41 sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); in blk_alloc_discard_bio() 50 bio->bi_iter.bi_sector = *sector; in blk_alloc_discard_bio() 52 *sector += bio_sects; in blk_alloc_discard_bio() 63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, in __blkdev_issue_discard() argument [all …]
|
| H A D | blk-zoned.c | 156 static void disk_zone_set_cond(struct gendisk *disk, sector_t sector, in disk_zone_set_cond() argument 164 unsigned int zno = disk_zone_no(disk, sector); in disk_zone_set_cond() 191 bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) in bdev_zone_is_seq() argument 194 unsigned int zno = disk_zone_no(disk, sector); in bdev_zone_is_seq() 222 static int blkdev_do_report_zones(struct block_device *bdev, sector_t sector, in blkdev_do_report_zones() argument 231 if (!nr_zones || sector >= get_capacity(disk)) in blkdev_do_report_zones() 234 return disk->fops->report_zones(disk, sector, nr_zones, args); in blkdev_do_report_zones() 256 int blkdev_report_zones(struct block_device *bdev, sector_t sector, in blkdev_report_zones() argument 264 return blkdev_do_report_zones(bdev, sector, nr_zones, &args); in blkdev_report_zones() 293 sector_t sector, sector_t nr_sectors) in blkdev_zone_mgmt() argument [all …]
|
| H A D | blk-ia-ranges.c | 18 return sprintf(buf, "%llu\n", iar->sector); in blk_ia_range_sector_show() 176 sector_t sector) in disk_find_ia_range() argument 183 if (sector >= iar->sector && in disk_find_ia_range() 184 sector < iar->sector + iar->nr_sectors) in disk_find_ia_range() 196 sector_t sector = 0; in disk_check_ia_ranges() local 208 tmp = disk_find_ia_range(iars, sector); in disk_check_ia_ranges() 209 if (!tmp || tmp->sector != sector) { in disk_check_ia_ranges() 216 swap(iar->sector, tmp->sector); in disk_check_ia_ranges() 220 sector += iar->nr_sectors; in disk_check_ia_ranges() 223 if (sector != capacity) { in disk_check_ia_ranges() [all …]
|
| /linux/include/trace/events/ |
| H A D | block.h | 32 __field( sector_t, sector ) 38 __entry->sector = bh->b_blocknr; 44 (unsigned long long)__entry->sector, __entry->size 91 __field( sector_t, sector ) 100 __entry->sector = blk_rq_trace_sector(rq); 111 (unsigned long long)__entry->sector, __entry->nr_sector, 126 __field( sector_t, sector ) 136 __entry->sector = blk_rq_pos(rq); 148 (unsigned long long)__entry->sector, __entry->nr_sector, 198 __field( sector_t, sector ) [all …]
|
| H A D | bcache.h | 18 __field(sector_t, sector ) 28 __entry->sector = bio->bi_iter.bi_sector; 36 __entry->rwbs, (unsigned long long)__entry->sector, 96 __field(sector_t, sector ) 103 __entry->sector = bio->bi_iter.bi_sector; 110 (unsigned long long)__entry->sector, __entry->nr_sector) 129 __field(sector_t, sector ) 138 __entry->sector = bio->bi_iter.bi_sector; 147 __entry->rwbs, (unsigned long long)__entry->sector, 159 __field(sector_t, sector ) [all …]
|
| /linux/drivers/scsi/ |
| H A D | sr_vendor.c | 174 unsigned long sector; in sr_cd_check() local 186 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check() 212 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 216 sector = 0; in sr_cd_check() 243 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 271 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 272 if (sector) in sr_cd_check() 273 sector -= CD_MSF_OFFSET; in sr_cd_check() 309 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 318 sector = 0; in sr_cd_check() [all …]
|
| /linux/fs/fat/ |
| H A D | cache.c | 305 int fat_get_mapped_cluster(struct inode *inode, sector_t sector, in fat_get_mapped_cluster() argument 313 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_get_mapped_cluster() 314 offset = sector & (sbi->sec_per_clus - 1); in fat_get_mapped_cluster() 321 if (*mapped_blocks > last_block - sector) in fat_get_mapped_cluster() 322 *mapped_blocks = last_block - sector; in fat_get_mapped_cluster() 328 static int is_exceed_eof(struct inode *inode, sector_t sector, in is_exceed_eof() argument 336 if (sector >= *last_block) { in is_exceed_eof() 346 if (sector >= *last_block) in is_exceed_eof() 353 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, in fat_bmap() argument 362 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap() [all …]
|
| /linux/samples/bpf/ |
| H A D | tracex3.bpf.c | 15 sector_t sector; member 32 .sector = ctx->sector in bpf_prog1() 63 .sector = ctx->sector in bpf_prog2()
|
| /linux/fs/zonefs/ |
| H A D | trace.h | 30 __field(sector_t, sector) 38 __entry->sector = z->z_sector; 43 blk_op_str(__entry->op), __entry->sector, 54 __field(sector_t, sector) 62 __entry->sector = zonefs_inode_zone(inode)->z_sector; 70 __entry->sector, __entry->size, __entry->wpoffset,
|
| /linux/drivers/vdpa/vdpa_sim/ |
| H A D | vdpa_sim_blk.c | 118 u64 sector; in vdpasim_blk_handle_req() local 156 sector = vdpasim64_to_cpu(vdpasim, hdr.sector); in vdpasim_blk_handle_req() 157 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req() 161 sector != 0) { in vdpasim_blk_handle_req() 164 type, sector); in vdpasim_blk_handle_req() 171 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req() 194 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req() 255 sector = le64_to_cpu(range.sector); in vdpasim_blk_handle_req() 256 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req() 277 if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors, in vdpasim_blk_handle_req()
|
| /linux/drivers/md/ |
| H A D | dm-log-writes.c | 97 __le64 sector; member 126 sector_t sector; member 214 sector_t sector) in write_metadata() argument 223 bio->bi_iter.bi_sector = sector; in write_metadata() 224 bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? in write_metadata() 260 sector_t sector) in write_inline_data() argument 276 bio->bi_iter.bi_sector = sector; in write_inline_data() 308 sector += bio_pages * PAGE_SECTORS; in write_inline_data() 319 struct pending_block *block, sector_t sector) in log_one_block() argument 326 entry.sector = cpu_to_le64(block->sector); in log_one_block() [all …]
|
| H A D | raid5-ppl.c | 166 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_partial_parity() 277 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); in ppl_log_stripe() 303 if (!data_disks || dev->sector < data_sector) in ppl_log_stripe() 304 data_sector = dev->sector; in ppl_log_stripe() 328 if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) && in ppl_log_stripe() 461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < in ppl_submit_iounit() 463 log->next_io_sector = log->rdev->ppl.sector; in ppl_submit_iounit() 857 sector_t sector; in ppl_recover_entry() local 876 sector = raid5_compute_sector(conf, r_sector, 0, in ppl_recover_entry() 881 (unsigned long long)sector); in ppl_recover_entry() [all …]
|
| H A D | dm-ebs-target.c | 37 static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector) in __sector_to_block() argument 39 return sector >> ec->block_shift; in __sector_to_block() 42 static inline sector_t __block_mod(sector_t sector, unsigned int bs) in __block_mod() argument 44 return sector & (bs - 1); in __block_mod() 145 sector_t block, blocks, sector = bio->bi_iter.bi_sector; in __ebs_discard_bio() local 147 block = __sector_to_block(ec, sector); in __ebs_discard_bio() 154 if (__block_mod(sector, ec->u_bs)) { in __ebs_discard_bio() 169 sector_t blocks, sector = bio->bi_iter.bi_sector; in __ebs_forget_bio() local 173 dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks); in __ebs_forget_bio()
|
| /linux/fs/exfat/ |
| H A D | nls.c | 648 sector_t sector, unsigned long long num_sectors, in exfat_load_upcase_table() argument 663 num_sectors += sector; in exfat_load_upcase_table() 665 while (sector < num_sectors) { in exfat_load_upcase_table() 668 bh = sb_bread(sb, sector); in exfat_load_upcase_table() 671 (unsigned long long)sector); in exfat_load_upcase_table() 674 sector++; in exfat_load_upcase_table() 742 sector_t sector; in exfat_create_upcase_table() local 773 sector = exfat_cluster_to_sector(sbi, tbl_clu); in exfat_create_upcase_table() 775 ret = exfat_load_upcase_table(sb, sector, num_sectors, in exfat_create_upcase_table()
|
| /linux/drivers/char/ |
| H A D | ps3flash.c | 98 u64 size, sector, offset; in ps3flash_read() local 118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 128 res = ps3flash_fetch(dev, sector); in ps3flash_read() 151 sector += priv->chunk_sectors; in ps3flash_read() 167 u64 size, sector, offset; in ps3flash_write() local 187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 198 res = ps3flash_fetch(dev, sector); in ps3flash_write() 199 else if (sector != priv->tag) in ps3flash_write() 219 priv->tag = sector; in ps3flash_write() 226 sector += priv->chunk_sectors; in ps3flash_write()
|
| /linux/drivers/block/null_blk/ |
| H A D | main.c | 890 static void null_free_sector(struct nullb *nullb, sector_t sector, in null_free_sector() argument 899 idx = sector >> PAGE_SECTORS_SHIFT; in null_free_sector() 900 sector_bit = (sector & SECTOR_MASK); in null_free_sector() 963 sector_t sector, bool for_write, bool is_cache) in __null_lookup_page() argument 970 idx = sector >> PAGE_SECTORS_SHIFT; in __null_lookup_page() 971 sector_bit = (sector & SECTOR_MASK); in __null_lookup_page() 984 sector_t sector, bool for_write, bool ignore_cache) in null_lookup_page() argument 989 page = __null_lookup_page(nullb, sector, for_write, true); in null_lookup_page() 992 return __null_lookup_page(nullb, sector, for_write, false); in null_lookup_page() 996 sector_t sector, bool ignore_cache) in null_insert_page() argument [all …]
|
| /linux/tools/testing/selftests/ublk/trace/ |
| H A D | seq_io.bt | 14 if ((uint64)args.sector != $last) { 16 args.sector, $last); 18 @last_rw[$dev, str($2)] = (args.sector + args.nr_sector);
|
| /linux/drivers/mtd/nand/raw/ |
| H A D | sh_flctl.c | 484 (struct sh_flctl *flctl, uint8_t *buff, int sector) in read_ecfiforeg() argument 490 res = wait_recfifo_ready(flctl , sector); in read_ecfiforeg() 624 int sector, page_sectors; in execmd_read_page_sector() local 640 for (sector = 0; sector < page_sectors; sector++) { in execmd_read_page_sector() 641 read_fiforeg(flctl, 512, 512 * sector); in execmd_read_page_sector() 644 &flctl->done_buff[mtd->writesize + 16 * sector], in execmd_read_page_sector() 645 sector); in execmd_read_page_sector() 695 int sector, page_sectors; in execmd_write_page_sector() local 708 for (sector = 0; sector < page_sectors; sector++) { in execmd_write_page_sector() 709 write_fiforeg(flctl, 512, 512 * sector); in execmd_write_page_sector() [all …]
|
| /linux/fs/iomap/ |
| H A D | bio.c | 37 sector_t sector; in iomap_bio_read_folio_range() local 40 sector = iomap_sector(iomap, pos); in iomap_bio_read_folio_range() 41 if (!bio || bio_end_sector(bio) != sector || in iomap_bio_read_folio_range() 63 bio->bi_iter.bi_sector = sector; in iomap_bio_read_folio_range()
|
| /linux/drivers/mtd/devices/ |
| H A D | docg3.c | 418 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) in doc_setup_addr_sector() argument 421 doc_flash_address(docg3, sector & 0xff); in doc_setup_addr_sector() 422 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_addr_sector() 423 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_addr_sector() 433 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) in doc_setup_writeaddr_sector() argument 438 doc_flash_address(docg3, sector & 0xff); in doc_setup_writeaddr_sector() 439 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_writeaddr_sector() 440 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_writeaddr_sector() 459 int sector, ret = 0; in doc_read_seek() local 481 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); in doc_read_seek() [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | dm-integrity.rst | 6 per-sector tags that can be used for storing integrity information. 8 A general problem with storing integrity tags with every sector is that 9 writing the sector and the integrity tag must be atomic - i.e. in case of 10 crash, either both sector and integrity tag or none of them is written. 13 writes sector data and integrity tags into a journal, commits the journal 53 2. load the dm-integrity target with one-sector size, the kernel driver 67 2. the number of reserved sector at the beginning of the device - the 160 an attacker reading the journal could see the last sector numbers 161 that were written. From the sector numbers, the attacker can infer 166 Protect sector numbers in the journal from accidental or malicious [all …]
|