Lines Matching full:sio
313 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) argument
314 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) argument
315 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) argument
316 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) argument
317 #define SIO_GET_END_OFFSET(sio) \ argument
318 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
319 #define SIO_GET_MUSED(sio) \ argument
320 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
366 scan_io_t *sio);
373 /* sio->sio_nr_dvas must be set so we know which cache to free from */
375 sio_free(scan_io_t *sio) in sio_free() argument
377 ASSERT3U(sio->sio_nr_dvas, >, 0); in sio_free()
378 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); in sio_free()
380 kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); in sio_free()
383 /* It is up to the caller to set sio->sio_nr_dvas for freeing */
438 sio2bp(const scan_io_t *sio, blkptr_t *bp) in sio2bp() argument
441 bp->blk_prop = sio->sio_blk_prop; in sio2bp()
442 BP_SET_PHYSICAL_BIRTH(bp, sio->sio_phys_birth); in sio2bp()
443 BP_SET_LOGICAL_BIRTH(bp, sio->sio_birth); in sio2bp()
445 bp->blk_cksum = sio->sio_cksum; in sio2bp()
447 ASSERT3U(sio->sio_nr_dvas, >, 0); in sio2bp()
448 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); in sio2bp()
450 memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t)); in sio2bp()
454 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) in bp2sio() argument
456 sio->sio_blk_prop = bp->blk_prop; in bp2sio()
457 sio->sio_phys_birth = BP_GET_RAW_PHYSICAL_BIRTH(bp); in bp2sio()
458 sio->sio_birth = BP_GET_LOGICAL_BIRTH(bp); in bp2sio()
459 sio->sio_cksum = bp->blk_cksum; in bp2sio()
460 sio->sio_nr_dvas = BP_GET_NDVAS(bp); in bp2sio()
463 * Copy the DVAs to the sio. We need all copies of the block so in bp2sio()
466 * in the sio since this is the primary one that we want to issue. in bp2sio()
468 for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { in bp2sio()
469 sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; in bp2sio()
2997 * However, dsl_scan_sync() expects that no new sio's will in dsl_scan_ddt_entry()
3000 * completed. This check ensures we do not issue new sio's in dsl_scan_ddt_entry()
3270 scan_io_t *sio; in scan_io_queue_issue() local
3273 while ((sio = list_head(io_list)) != NULL) { in scan_io_queue_issue()
3281 sio2bp(sio, &bp); in scan_io_queue_issue()
3282 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, in scan_io_queue_issue()
3283 &sio->sio_zb, queue); in scan_io_queue_issue()
3286 sio_free(sio); in scan_io_queue_issue()
3302 scan_io_t *srch_sio, *sio, *next_sio; in scan_io_queue_gather() local
3318 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); in scan_io_queue_gather()
3321 if (sio == NULL) in scan_io_queue_gather()
3322 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); in scan_io_queue_gather()
3324 while (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs, in scan_io_queue_gather()
3326 ASSERT3U(SIO_GET_OFFSET(sio), >=, zfs_rs_get_start(rs, in scan_io_queue_gather()
3328 ASSERT3U(SIO_GET_END_OFFSET(sio), <=, zfs_rs_get_end(rs, in scan_io_queue_gather()
3331 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); in scan_io_queue_gather()
3332 avl_remove(&queue->q_sios_by_addr, sio); in scan_io_queue_gather()
3335 queue->q_sio_memused -= SIO_GET_MUSED(sio); in scan_io_queue_gather()
3337 bytes_issued += SIO_GET_ASIZE(sio); in scan_io_queue_gather()
3339 list_insert_tail(list, sio); in scan_io_queue_gather()
3340 sio = next_sio; in scan_io_queue_gather()
3349 if (sio != NULL && SIO_GET_OFFSET(sio) < zfs_rs_get_end(rs, in scan_io_queue_gather()
3354 SIO_GET_OFFSET(sio), zfs_rs_get_end(rs, in scan_io_queue_gather()
3355 queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); in scan_io_queue_gather()
3356 queue->q_last_ext_addr = SIO_GET_OFFSET(sio); in scan_io_queue_gather()
3445 scan_io_t *sio; in scan_io_queues_run_one() local
3494 * queue lock. The sio queue won't be updated by in scan_io_queues_run_one()
3518 while ((sio = list_remove_head(&sio_list)) != NULL) in scan_io_queues_run_one()
3519 scan_io_queue_insert_impl(queue, sio); in scan_io_queues_run_one()
4725 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) in scan_io_queue_insert_impl() argument
4734 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { in scan_io_queue_insert_impl()
4736 sio_free(sio); in scan_io_queue_insert_impl()
4739 avl_insert(&queue->q_sios_by_addr, sio, idx); in scan_io_queue_insert_impl()
4740 queue->q_sio_memused += SIO_GET_MUSED(sio); in scan_io_queue_insert_impl()
4741 zfs_range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), in scan_io_queue_insert_impl()
4742 SIO_GET_ASIZE(sio)); in scan_io_queue_insert_impl()
4755 scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); in scan_io_queue_insert() local
4760 bp2sio(bp, sio, dva_i); in scan_io_queue_insert()
4761 sio->sio_flags = zio_flags; in scan_io_queue_insert()
4762 sio->sio_zb = *zb; in scan_io_queue_insert()
4765 scan_io_queue_insert_impl(queue, sio); in scan_io_queue_insert()
5109 scan_io_t *sio; in dsl_scan_io_queue_destroy() local
5116 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != in dsl_scan_io_queue_destroy()
5119 SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); in dsl_scan_io_queue_destroy()
5120 queue->q_sio_memused -= SIO_GET_MUSED(sio); in dsl_scan_io_queue_destroy()
5121 sio_free(sio); in dsl_scan_io_queue_destroy()
5178 scan_io_t *srch_sio, *sio; in dsl_scan_freed_dva() local
5216 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); in dsl_scan_freed_dva()
5219 if (sio != NULL) { in dsl_scan_freed_dva()
5223 ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); in dsl_scan_freed_dva()
5224 ASSERT3U(size, ==, SIO_GET_ASIZE(sio)); in dsl_scan_freed_dva()
5225 avl_remove(&queue->q_sios_by_addr, sio); in dsl_scan_freed_dva()
5228 queue->q_sio_memused -= SIO_GET_MUSED(sio); in dsl_scan_freed_dva()
5235 sio2bp(sio, &tmpbp); in dsl_scan_freed_dva()
5238 sio_free(sio); in dsl_scan_freed_dva()