Lines Matching refs:bdev
37 u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev)); in nvmet_bdev_zns_enable()
38 struct gendisk *bd_disk = ns->bdev->bd_disk; in nvmet_bdev_zns_enable()
52 if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) in nvmet_bdev_zns_enable()
59 ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), in nvmet_bdev_zns_enable()
64 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); in nvmet_bdev_zns_enable()
123 if (!bdev_is_zoned(req->ns->bdev)) { in nvmet_execute_identify_ns_zns()
129 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> in nvmet_execute_identify_ns_zns()
133 mor = bdev_max_open_zones(req->ns->bdev); in nvmet_execute_identify_ns_zns()
140 mar = bdev_max_active_zones(req->ns->bdev); in nvmet_execute_identify_ns_zns()
159 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv()
255 return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect); in nvmet_req_nr_zones_from_slba()
294 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, in nvmet_bdev_zone_zmgmt_recv_work()
398 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zone_mgmt_emulate_all() local
399 unsigned int nr_zones = bdev_nr_zones(bdev); in nvmet_bdev_zone_mgmt_emulate_all()
408 GFP_NOIO, bdev->bd_disk->node_id); in nvmet_bdev_zone_mgmt_emulate_all()
415 ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d); in nvmet_bdev_zone_mgmt_emulate_all()
425 while (sector < bdev_nr_sectors(bdev)) { in nvmet_bdev_zone_mgmt_emulate_all()
426 if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) { in nvmet_bdev_zone_mgmt_emulate_all()
427 bio = blk_next_bio(bio, bdev, 0, in nvmet_bdev_zone_mgmt_emulate_all()
434 sector += bdev_zone_sectors(bdev); in nvmet_bdev_zone_mgmt_emulate_all()
454 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, in nvmet_bdev_execute_zmgmt_send_all()
455 get_capacity(req->ns->bdev->bd_disk)); in nvmet_bdev_execute_zmgmt_send_all()
477 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zmgmt_send_work() local
478 sector_t zone_sectors = bdev_zone_sectors(bdev); in nvmet_bdev_zmgmt_send_work()
494 if (sect >= get_capacity(bdev->bd_disk)) { in nvmet_bdev_zmgmt_send_work()
506 ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors); in nvmet_bdev_zmgmt_send_work()
549 bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) { in nvmet_bdev_execute_zone_append()
560 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append()
566 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append()
574 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_zone_append()
577 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append()