Searched refs:queue_max_hw_sectors (Results 1 – 14 of 14) sorted by relevance
276 unsigned int max_sectors = queue_max_hw_sectors(rq->q); in bio_map_user_iov()769 if (len > (queue_max_hw_sectors(q) << 9)) in blk_rq_map_kern()
318 if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q)) in bio_integrity_map_user()
445 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
1063 queue_max_hw_sectors(q), &same_page); in bio_add_pc_page()
580 return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue)); in max_sectors_show()
424 if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9)) in sg_io()
553 cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev))); in rnbd_srv_fill_msg_open_rsp()
132 dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q), in iblock_configure_device()
1200 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors() function
542 queue_max_hw_sectors(q) << SECTOR_SHIFT); in virtblk_alloc_report_buffer()
299 min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); in ublk_alloc_report_buffer()
622 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; in make_resync_request()
1265 unsigned int now = queue_max_hw_sectors(q) << 9; in drbd_reconsider_queue_parameters()1274 queue_max_hw_sectors(b) << SECTOR_SHIFT; in drbd_reconsider_queue_parameters()
925 max_bio_size = queue_max_hw_sectors(q) << 9; in drbd_send_sizes()