| /linux/drivers/block/drbd/ |
| H A D | drbd_interval.c | 16 #define NODE_END(node) ((node)->sector + ((node)->size >> 9)) 28 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval() 39 if (this->sector < here->sector) in drbd_insert_interval() 41 else if (this->sector > here->sector) in drbd_insert_interval() 60 * @sector: start sector of @interval 63 * Returns if the tree contains the node @interval with start sector @start. 66 * sector number. 69 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument 78 if (sector < here->sector) in drbd_contains_interval() 80 else if (sector > here->sector) in drbd_contains_interval() [all …]
|
| H A D | drbd_actlog.c | 127 sector_t sector, enum req_op op) in _drbd_md_sync_page_io() argument 144 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io() 178 sector_t sector, enum req_op op) in drbd_md_sync_page_io() argument 187 (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io() 190 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io() 191 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io() 194 (unsigned long long)sector, in drbd_md_sync_page_io() 197 err = _drbd_md_sync_page_io(device, bdev, sector, op); in drbd_md_sync_page_io() 200 (unsigned long long)sector, in drbd_md_sync_page_io() 245 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath() [all …]
|
| H A D | drbd_interval.h | 10 sector_t sector; /* start sector of the interval */ member 38 #define drbd_for_each_overlap(i, root, sector, size) \ argument 39 for (i = drbd_find_overlap(root, sector, size); \ 41 i = drbd_next_overlap(i, sector, size))
|
| H A D | drbd_worker.c | 127 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final() 157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final() 183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio() 356 sector_t sector = peer_req->i.sector; in w_e_send_csum() local 367 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum() 387 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument 397 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum() 591 sector_t sector; in make_resync_request() local 657 sector = BM_BIT_TO_SECT(bit); in make_resync_request() 659 if (drbd_try_rs_begin_io(peer_device, sector)) { in make_resync_request() [all …]
|
| H A D | drbd_receiver.c | 177 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument 208 peer_req->i.sector = sector; in drbd_alloc_peer_req() 1336 /* Zero-sector (unknown) and one-sector granularities are the same. */ in drbd_issue_discard_or_zero_out() 1414 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, in drbd_issue_peer_discard_or_zero_out() 1452 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local 1505 /* > peer_req->i.sector, unless this is the first bio */ in drbd_submit_peer_request() 1506 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request() 1519 sector += len >> 9; in drbd_submit_peer_request() 1659 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument 1709 if (sector + (ds>>9) > capacity) { in read_in_block() [all …]
|
| /linux/block/ |
| H A D | blk-lib.c | 13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) in bio_discard_limit() argument 19 sector += bdev->bd_start_sect; in bio_discard_limit() 22 round_up(sector, discard_granularity >> SECTOR_SHIFT); in bio_discard_limit() 28 if (granularity_aligned_sector != sector) in bio_discard_limit() 29 return granularity_aligned_sector - sector; in bio_discard_limit() 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument 41 sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); in blk_alloc_discard_bio() 50 bio->bi_iter.bi_sector = *sector; in blk_alloc_discard_bio() 52 *sector += bio_sects; in blk_alloc_discard_bio() 63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, in __blkdev_issue_discard() argument [all …]
|
| H A D | blk-ia-ranges.c | 18 return sprintf(buf, "%llu\n", iar->sector); in blk_ia_range_sector_show() 34 .attr = { .name = "sector", .mode = 0444 }, 120 * At this point, iars is the new set of sector access ranges that needs in disk_register_independent_access_ranges() 176 sector_t sector) in disk_find_ia_range() argument 183 if (sector >= iar->sector && in disk_find_ia_range() 184 sector < iar->sector + iar->nr_sectors) in disk_find_ia_range() 196 sector_t sector = 0; in disk_check_ia_ranges() local 204 * ranges do not overlap, that there are no sector holes and that all in disk_check_ia_ranges() 208 tmp = disk_find_ia_range(iars, sector); in disk_check_ia_ranges() 209 if (!tmp || tmp->sector != sector) { in disk_check_ia_ranges() [all …]
|
| H A D | blk-zoned.c | 139 * @sector: Sector from which to report zones 145 * Get zone information starting from the zone containing @sector for at most 147 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES 155 int blkdev_report_zones(struct block_device *bdev, sector_t sector, in blkdev_report_zones() argument 169 if (!nr_zones || sector >= capacity) in blkdev_report_zones() 172 return disk->fops->report_zones(disk, sector, nr_zones, in blkdev_report_zones() 190 * @sector: Start sector of the first zone to operate on 196 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range 202 sector_t sector, sector_t nr_sectors) in blkdev_zone_mgmt() argument 206 sector_t end_sector = sector + nr_sectors; in blkdev_zone_mgmt() [all …]
|
| /linux/include/trace/events/ |
| H A D | block.h | 32 __field( sector_t, sector ) 38 __entry->sector = bh->b_blocknr; 42 TP_printk("%d,%d sector=%llu size=%zu", 44 (unsigned long long)__entry->sector, __entry->size 91 __field( sector_t, sector ) 100 __entry->sector = blk_rq_trace_sector(rq); 111 (unsigned long long)__entry->sector, __entry->nr_sector, 126 __field( sector_t, sector ) 136 __entry->sector = blk_rq_pos(rq); 148 (unsigned long long)__entry->sector, __entry->nr_sector, [all …]
|
| H A D | bcache.h | 18 __field(sector_t, sector ) 28 __entry->sector = bio->bi_iter.bi_sector; 36 __entry->rwbs, (unsigned long long)__entry->sector, 96 __field(sector_t, sector ) 103 __entry->sector = bio->bi_iter.bi_sector; 110 (unsigned long long)__entry->sector, __entry->nr_sector) 129 __field(sector_t, sector ) 138 __entry->sector = bio->bi_iter.bi_sector; 147 __entry->rwbs, (unsigned long long)__entry->sector, 159 __field(sector_t, sector ) [all …]
|
| /linux/include/uapi/linux/ |
| H A D | blkzoned.h | 88 * @start: Zone start in 512 B sector units 89 * @len: Zone length in 512 B sector units 90 * @wp: Zone write pointer location in 512 B sector units 97 * @capacity: Zone usable capacity in 512 B sector units 101 * start, len, capacity and wp use the regular 512 B sector unit, regardless 107 __u64 start; /* Zone start sector */ 122 * @sector: starting sector of report 130 __u64 sector; member 140 * @sector: Starting sector of the first zone to operate on. 144 __u64 sector; member [all …]
|
| /linux/drivers/block/null_blk/ |
| H A D | zoned.c | 56 sector_t sector = 0; in null_init_zoned_dev() local 134 zone->start = sector; in null_init_zoned_dev() 141 sector += dev->zone_size_sects; in null_init_zoned_dev() 148 zone->start = sector; in null_init_zoned_dev() 164 sector += dev->zone_size_sects; in null_init_zoned_dev() 193 int null_report_zones(struct gendisk *disk, sector_t sector, in null_report_zones() argument 203 first_zone = null_zone_no(dev, sector); in null_report_zones() 241 sector_t sector, unsigned int len) in null_zone_valid_read_len() argument 244 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; in null_zone_valid_read_len() 249 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len() [all …]
|
| H A D | null_blk.h | 132 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector, 135 sector_t sector, unsigned int nr_sectors); 136 blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector, 139 sector_t sector, sector_t nr_sectors); 145 int null_report_zones(struct gendisk *disk, sector_t sector, 148 sector_t sector, sector_t nr_sectors); 150 sector_t sector, unsigned int len); 166 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument 171 sector_t sector, in null_zone_valid_read_len() argument
|
| /linux/fs/btrfs/ |
| H A D | raid56.c | 137 * A structure to present a sector inside a page, the length is fixed to 268 * Even if the sector is not covered by bio, if it is in cache_rbio_pages() 269 * a data sector it should still be uptodate as it is in cache_rbio_pages() 357 /* Also update the sector->uptodate bits. */ in steal_rbio_page() 372 * Thus if the first sector of the page belongs to data stripes, then in is_data_stripe_page() 690 /* Return a sector from rbio->stripe_sectors, not from the bio list */ 699 /* Grab a sector inside P stripe */ 706 /* Grab a sector inside Q stripe, return NULL if not RAID6 */ 947 * Get a sector pointer specified by its @stripe_nr and @sector_nr. 951 * @sector_nr: Sector number inside the stripe, [all …]
|
| /linux/fs/hpfs/ |
| H A D | hpfs.h | 29 typedef u32 secno; /* sector number, partition relative */ 31 typedef secno dnode_secno; /* sector number of a dnode */ 32 typedef secno fnode_secno; /* sector number of an fnode */ 33 typedef secno anode_secno; /* sector number of an anode */ 37 /* sector 0 */ 71 /* sector 16 */ 96 __le32 dir_band_start; /* first sector in dir band */ 97 __le32 dir_band_end; /* last sector in dir band */ 105 /* sector 17 */ 120 u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | dm-integrity.rst | 6 per-sector tags that can be used for storing integrity information. 8 A general problem with storing integrity tags with every sector is that 9 writing the sector and the integrity tag must be atomic - i.e. in case of 10 crash, either both sector and integrity tag or none of them is written. 13 writes sector data and integrity tags into a journal, commits the journal 53 2. load the dm-integrity target with one-sector size, the kernel driver 67 2. the number of reserved sector at the beginning of the device - the 160 an attacker reading the journal could see the last sector numbers 161 that were written. From the sector numbers, the attacker can infer 166 Protect sector numbers in the journal from accidental or malicious [all …]
|
| H A D | dm-crypt.rst | 78 then sectors are encrypted according to their offsets (sector 0 uses key0; 79 sector 1 uses key1 etc.). <keycount> must be a power of two. 82 The IV offset is a sector count that is added to the sector number 91 Starting sector within the device where the encrypted data begins. 138 The device requires additional <bytes> metadata per-sector stored 157 Virtual device will announce this size as a minimal IO and logical sector. 160 IV generators will use sector number counted in <sector_size> units 164 sector will be 8 (without flag) and 1 if iv_large_sectors is present.
|
| H A D | dm-dust.rst | 22 This emulates the "remapped sector" behavior of a drive with bad 286 specified sector (sector 0x1234, hardcoded in the source code), but 293 When a bad sector occurs on a hard disk drive, reads to that sector 296 the sector may succeed, and result in the sector becoming readable 298 sector (or after a reallocation of the sector). However, there may 303 of a bad sector at a known sector location, at a known time, based
|
| /linux/drivers/scsi/ |
| H A D | sr_vendor.c | 25 * Some XA-Sector tweaking, required for older drives. 90 is followed by a read for the same sector - aeb */ in sr_vendor_init() 174 unsigned long sector; in sr_cd_check() local 186 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check() 212 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 215 /* ignore sector offsets from first track */ in sr_cd_check() 216 sector = 0; in sr_cd_check() 243 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 271 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 272 if (sector) in sr_cd_check() [all …]
|
| /linux/fs/zonefs/ |
| H A D | trace.h | 30 __field(sector_t, sector) 38 __entry->sector = z->z_sector; 41 TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu", 43 blk_op_str(__entry->op), __entry->sector, 54 __field(sector_t, sector) 62 __entry->sector = zonefs_inode_zone(inode)->z_sector; 68 TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu", 70 __entry->sector, __entry->size, __entry->wpoffset,
|
| /linux/fs/fat/ |
| H A D | cache.c | 310 int fat_get_mapped_cluster(struct inode *inode, sector_t sector, in fat_get_mapped_cluster() argument 318 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_get_mapped_cluster() 319 offset = sector & (sbi->sec_per_clus - 1); in fat_get_mapped_cluster() 326 if (*mapped_blocks > last_block - sector) in fat_get_mapped_cluster() 327 *mapped_blocks = last_block - sector; in fat_get_mapped_cluster() 333 static int is_exceed_eof(struct inode *inode, sector_t sector, in is_exceed_eof() argument 341 if (sector >= *last_block) { in is_exceed_eof() 351 if (sector >= *last_block) in is_exceed_eof() 358 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, in fat_bmap() argument 367 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap() [all …]
|
| /linux/drivers/vdpa/vdpa_sim/ |
| H A D | vdpa_sim_blk.c | 84 "starting sector exceeds the capacity - start: 0x%llx capacity: 0x%x\n", in vdpasim_blk_check_range() 118 u64 sector; in vdpasim_blk_handle_req() local 156 sector = vdpasim64_to_cpu(vdpasim, hdr.sector); in vdpasim_blk_handle_req() 157 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req() 161 sector != 0) { in vdpasim_blk_handle_req() 163 "sector must be 0 for %u request - sector: 0x%llx\n", in vdpasim_blk_handle_req() 164 type, sector); in vdpasim_blk_handle_req() 171 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req() 194 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req() 255 sector = le64_to_cpu(range.sector); in vdpasim_blk_handle_req() [all …]
|
| /linux/block/partitions/ |
| H A D | msdos.c | 75 Sector sect; in aix_magic_present() 135 Sector sect; in parse_extended() 229 __le32 s_start; /* start sector no of partition */ 238 __le16 v_sectorsz; /* sector size in bytes */ 254 Sector sect; in parse_solaris_x86() 313 __u32 d_secsize; /* # of bytes per sector */ 323 __u16 d_interleave; /* hardware sector interleave */ 324 __u16 d_trackskew; /* sector 0 skew, per track */ 325 __u16 d_cylskew; /* sector 0 skew, per cylinder */ 342 __le32 p_offset; /* starting sector */ [all …]
|
| /linux/drivers/md/ |
| H A D | raid5-ppl.c | 40 * sh->sector dd0 dd1 dd2 ppl 48 * data_sector is the first raid sector of the modified data, data_size is the 166 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_partial_parity() 277 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); in ppl_log_stripe() 303 if (!data_disks || dev->sector < data_sector) in ppl_log_stripe() 304 data_sector = dev->sector; in ppl_log_stripe() 328 if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) && in ppl_log_stripe() 420 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %pg\n", in ppl_submit_iounit_bio() 461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < in ppl_submit_iounit() 463 log->next_io_sector = log->rdev->ppl.sector; in ppl_submit_iounit() [all …]
|
| /linux/include/linux/ |
| H A D | blkdev.h | 217 * Independent sector access ranges. This is always NULL for 436 int blkdev_report_zones(struct block_device *bdev, sector_t sector, 450 * and must include all sectors within the disk capacity (no sector holes 458 sector_t sector; member 733 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) in disk_zone_no() argument 737 return sector >> ilog2(disk->queue->limits.chunk_sectors); in disk_zone_no() 903 * disk_zone_capacity - returns the zone capacity of zone containing @sector 905 * @sector: sector number within the querying zone 907 * Returns the zone capacity of a zone containing @sector. @sector can be any 908 * sector in the zone. [all …]
|