Lines Matching full:sio
309 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) argument
310 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) argument
311 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) argument
312 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) argument
313 #define SIO_GET_END_OFFSET(sio) \ argument
314 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
315 #define SIO_GET_MUSED(sio) \ argument
316 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
362 scan_io_t *sio);
369 /* sio->sio_nr_dvas must be set so we know which cache to free from */
371 sio_free(scan_io_t *sio) in sio_free() argument
373 ASSERT3U(sio->sio_nr_dvas, >, 0); in sio_free()
374 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); in sio_free()
376 kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); in sio_free()
379 /* It is up to the caller to set sio->sio_nr_dvas for freeing */
434 sio2bp(const scan_io_t *sio, blkptr_t *bp) in sio2bp() argument
437 bp->blk_prop = sio->sio_blk_prop; in sio2bp()
438 BP_SET_PHYSICAL_BIRTH(bp, sio->sio_phys_birth); in sio2bp()
439 BP_SET_LOGICAL_BIRTH(bp, sio->sio_birth); in sio2bp()
441 bp->blk_cksum = sio->sio_cksum; in sio2bp()
443 ASSERT3U(sio->sio_nr_dvas, >, 0); in sio2bp()
444 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); in sio2bp()
446 memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t)); in sio2bp()
450 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) in bp2sio() argument
452 sio->sio_blk_prop = bp->blk_prop; in bp2sio()
453 sio->sio_phys_birth = BP_GET_PHYSICAL_BIRTH(bp); in bp2sio()
454 sio->sio_birth = BP_GET_LOGICAL_BIRTH(bp); in bp2sio()
455 sio->sio_cksum = bp->blk_cksum; in bp2sio()
456 sio->sio_nr_dvas = BP_GET_NDVAS(bp); in bp2sio()
459 * Copy the DVAs to the sio. We need all copies of the block so in bp2sio()
462 * in the sio since this is the primary one that we want to issue. in bp2sio()
464 for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { in bp2sio()
465 sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; in bp2sio()
2985 * However, dsl_scan_sync() expects that no new sio's will in dsl_scan_ddt_entry()
2988 * completed. This check ensures we do not issue new sio's in dsl_scan_ddt_entry()
3258 scan_io_t *sio; in scan_io_queue_issue() local
3261 while ((sio = list_head(io_list)) != NULL) { in scan_io_queue_issue()
3269 sio2bp(sio, &bp); in scan_io_queue_issue()
3270 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, in scan_io_queue_issue()
3271 &sio->sio_zb, queue); in scan_io_queue_issue()
3274 sio_free(sio); in scan_io_queue_issue()
3290 scan_io_t *srch_sio, *sio, *next_sio; in scan_io_queue_gather() local
3306 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); in scan_io_queue_gather()
3309 if (sio == NULL) in scan_io_queue_gather()
3310 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); in scan_io_queue_gather()
3312 while (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs, in scan_io_queue_gather()
3314 ASSERT3U(SIO_GET_OFFSET(sio), >=, zfs_rs_get_start(rs, in scan_io_queue_gather()
3316 ASSERT3U(SIO_GET_END_OFFSET(sio), <=, zfs_rs_get_end(rs, in scan_io_queue_gather()
3319 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); in scan_io_queue_gather()
3320 avl_remove(&queue->q_sios_by_addr, sio); in scan_io_queue_gather()
3323 queue->q_sio_memused -= SIO_GET_MUSED(sio); in scan_io_queue_gather()
3325 bytes_issued += SIO_GET_ASIZE(sio); in scan_io_queue_gather()
3327 list_insert_tail(list, sio); in scan_io_queue_gather()
3328 sio = next_sio; in scan_io_queue_gather()
3337 if (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs, in scan_io_queue_gather()
3342 SIO_GET_OFFSET(sio), zfs_rs_get_end(rs, in scan_io_queue_gather()
3343 queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); in scan_io_queue_gather()
3344 queue->q_last_ext_addr = SIO_GET_OFFSET(sio); in scan_io_queue_gather()
3433 scan_io_t *sio; in scan_io_queues_run_one() local
3482 * queue lock. The sio queue won't be updated by in scan_io_queues_run_one()
3506 while ((sio = list_remove_head(&sio_list)) != NULL) in scan_io_queues_run_one()
3507 scan_io_queue_insert_impl(queue, sio); in scan_io_queues_run_one()
4713 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) in scan_io_queue_insert_impl() argument
4722 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { in scan_io_queue_insert_impl()
4724 sio_free(sio); in scan_io_queue_insert_impl()
4727 avl_insert(&queue->q_sios_by_addr, sio, idx); in scan_io_queue_insert_impl()
4728 queue->q_sio_memused += SIO_GET_MUSED(sio); in scan_io_queue_insert_impl()
4729 zfs_range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), in scan_io_queue_insert_impl()
4730 SIO_GET_ASIZE(sio)); in scan_io_queue_insert_impl()
4743 scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); in scan_io_queue_insert() local
4748 bp2sio(bp, sio, dva_i); in scan_io_queue_insert()
4749 sio->sio_flags = zio_flags; in scan_io_queue_insert()
4750 sio->sio_zb = *zb; in scan_io_queue_insert()
4753 scan_io_queue_insert_impl(queue, sio); in scan_io_queue_insert()
5097 scan_io_t *sio; in dsl_scan_io_queue_destroy() local
5104 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != in dsl_scan_io_queue_destroy()
5107 SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); in dsl_scan_io_queue_destroy()
5108 queue->q_sio_memused -= SIO_GET_MUSED(sio); in dsl_scan_io_queue_destroy()
5109 sio_free(sio); in dsl_scan_io_queue_destroy()
5166 scan_io_t *srch_sio, *sio; in dsl_scan_freed_dva() local
5204 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); in dsl_scan_freed_dva()
5207 if (sio != NULL) { in dsl_scan_freed_dva()
5211 ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); in dsl_scan_freed_dva()
5212 ASSERT3U(size, ==, SIO_GET_ASIZE(sio)); in dsl_scan_freed_dva()
5213 avl_remove(&queue->q_sios_by_addr, sio); in dsl_scan_freed_dva()
5216 queue->q_sio_memused -= SIO_GET_MUSED(sio); in dsl_scan_freed_dva()
5223 sio2bp(sio, &tmpbp); in dsl_scan_freed_dva()
5226 sio_free(sio); in dsl_scan_freed_dva()