Lines Matching +full:user +full:- +full:visible

1 // SPDX-License-Identifier: GPL-2.0-or-later
27 * marshal_virt_to_resize() - translate uvirtual to resize structure
34 resize->hdr = virt->hdr; in marshal_virt_to_resize()
35 resize->context_id = virt->context_id; in marshal_virt_to_resize()
36 resize->rsrc_handle = virt->rsrc_handle; in marshal_virt_to_resize()
37 resize->req_size = virt->lun_size; in marshal_virt_to_resize()
38 resize->last_lba = virt->last_lba; in marshal_virt_to_resize()
42 * marshal_clone_to_rele() - translate clone to release structure
49 release->hdr = clone->hdr; in marshal_clone_to_rele()
50 release->context_id = clone->context_id_dst; in marshal_clone_to_rele()
54 * ba_init() - initializes a block allocator
57 * Return: 0 on success, -errno on failure
67 "ba_lun->lsize=%lx ba_lun->au_size=%lX\n", in ba_init()
68 __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size); in ba_init()
71 lun_size_au = ba_lun->lsize / ba_lun->au_size; in ba_init()
74 return -EINVAL; in ba_init()
81 __func__, ba_lun->lun_id); in ba_init()
82 return -ENOMEM; in ba_init()
85 bali->total_aus = lun_size_au; in ba_init()
86 bali->lun_bmap_size = lun_size_au / BITS_PER_LONG; in ba_init()
89 bali->lun_bmap_size++; in ba_init()
92 bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)), in ba_init()
94 if (unlikely(!bali->lun_alloc_map)) { in ba_init()
96 "lun_id=%016llx\n", __func__, ba_lun->lun_id); in ba_init()
98 return -ENOMEM; in ba_init()
102 bali->free_aun_cnt = lun_size_au; in ba_init()
104 for (i = 0; i < bali->lun_bmap_size; i++) in ba_init()
105 bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL; in ba_init()
108 last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG); in ba_init()
109 last_word_underflow -= bali->free_aun_cnt; in ba_init()
111 lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1]; in ba_init()
112 for (i = (HIBIT - last_word_underflow + 1); in ba_init()
119 bali->free_high_idx = bali->lun_bmap_size; in ba_init()
122 bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)), in ba_init()
124 if (unlikely(!bali->aun_clone_map)) { in ba_init()
126 __func__, ba_lun->lun_id); in ba_init()
127 kfree(bali->lun_alloc_map); in ba_init()
129 return -ENOMEM; in ba_init()
132 /* Pass the allocated LUN info as a handle to the user */ in ba_init()
133 ba_lun->ba_lun_handle = bali; in ba_init()
137 __func__, ba_lun->lun_id, bali->lun_bmap_size, in ba_init()
138 bali->free_aun_cnt); in ba_init()
143 * find_free_range() - locates a free bit within the block allocator
149 * Return: The bit position within the passed back word, -1 on failure
156 u64 bit_pos = -1; in find_free_range()
160 if (bali->lun_alloc_map[i] != 0) { in find_free_range()
161 lam = (ulong *)&bali->lun_alloc_map[i]; in find_free_range()
167 __func__, bit_pos, bali->lun_alloc_map[i], i); in find_free_range()
170 bali->free_aun_cnt--; in find_free_range()
179 * ba_alloc() - allocates a block from the block allocator
182 * Return: The allocated block, -1 on failure
186 u64 bit_pos = -1; in ba_alloc()
190 bali = ba_lun->ba_lun_handle; in ba_alloc()
194 __func__, ba_lun->lun_id, bali->free_aun_cnt); in ba_alloc()
196 if (bali->free_aun_cnt == 0) { in ba_alloc()
198 __func__, ba_lun->lun_id); in ba_alloc()
199 return -1ULL; in ba_alloc()
202 /* Search to find a free entry, curr->high then low->curr */ in ba_alloc()
203 bit_pos = find_free_range(bali->free_curr_idx, in ba_alloc()
204 bali->free_high_idx, bali, &bit_word); in ba_alloc()
205 if (bit_pos == -1) { in ba_alloc()
206 bit_pos = find_free_range(bali->free_low_idx, in ba_alloc()
207 bali->free_curr_idx, in ba_alloc()
209 if (bit_pos == -1) { in ba_alloc()
211 " lun_id=%016llx\n", __func__, ba_lun->lun_id); in ba_alloc()
212 return -1ULL; in ba_alloc()
218 bali->free_curr_idx = bit_word + 1; in ba_alloc()
220 bali->free_curr_idx = bit_word; in ba_alloc()
224 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id, in ba_alloc()
225 bali->free_aun_cnt); in ba_alloc()
231 * validate_alloc() - validates the specified block has been allocated
235 * Return: 0 on success, -1 on failure
244 if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx])) in validate_alloc()
245 return -1; in validate_alloc()
251 * ba_free() - frees a block from the block allocator
255 * Return: 0 on success, -1 on failure
262 bali = ba_lun->ba_lun_handle; in ba_free()
266 __func__, to_free, ba_lun->lun_id); in ba_free()
267 return -1; in ba_free()
271 "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id, in ba_free()
272 bali->free_aun_cnt); in ba_free()
274 if (bali->aun_clone_map[to_free] > 0) { in ba_free()
276 __func__, to_free, ba_lun->lun_id, in ba_free()
277 bali->aun_clone_map[to_free]); in ba_free()
278 bali->aun_clone_map[to_free]--; in ba_free()
285 set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]); in ba_free()
286 bali->free_aun_cnt++; in ba_free()
288 if (idx < bali->free_low_idx) in ba_free()
289 bali->free_low_idx = idx; in ba_free()
290 else if (idx > bali->free_high_idx) in ba_free()
291 bali->free_high_idx = idx; in ba_free()
295 ba_lun->lun_id, bali->free_aun_cnt); in ba_free()
301 * ba_clone() - Clone a chunk of the block allocation table
305 * Return: 0 on success, -1 on failure
309 struct ba_lun_info *bali = ba_lun->ba_lun_handle; in ba_clone()
313 __func__, to_clone, ba_lun->lun_id); in ba_clone()
314 return -1; in ba_clone()
318 __func__, to_clone, ba_lun->lun_id); in ba_clone()
320 if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) { in ba_clone()
322 __func__, to_clone, ba_lun->lun_id); in ba_clone()
323 return -1; in ba_clone()
326 bali->aun_clone_map[to_clone]++; in ba_clone()
332 * ba_space() - returns the amount of free space left in the block allocator
339 struct ba_lun_info *bali = ba_lun->ba_lun_handle; in ba_space()
341 return bali->free_aun_cnt; in ba_space()
345 * cxlflash_ba_terminate() - frees resources associated with the block allocator
352 struct ba_lun_info *bali = ba_lun->ba_lun_handle; in cxlflash_ba_terminate()
355 kfree(bali->aun_clone_map); in cxlflash_ba_terminate()
356 kfree(bali->lun_alloc_map); in cxlflash_ba_terminate()
358 ba_lun->ba_lun_handle = NULL; in cxlflash_ba_terminate()
363 * init_vlun() - initializes a LUN for virtual use
366 * Return: 0 on success, -errno on failure
371 struct glun_info *gli = lli->parent; in init_vlun()
372 struct blka *blka = &gli->blka; in init_vlun()
375 mutex_init(&blka->mutex); in init_vlun()
378 blka->ba_lun.lun_id = lli->lun_index; in init_vlun()
379 blka->ba_lun.lsize = gli->max_lba + 1; in init_vlun()
380 blka->ba_lun.lba_size = gli->blk_len; in init_vlun()
382 blka->ba_lun.au_size = MC_CHUNK_SIZE; in init_vlun()
383 blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE; in init_vlun()
385 rc = ba_init(&blka->ba_lun); in init_vlun()
394 * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
417 * Return: 0 on success, -errno on failure
429 struct cxlflash_cfg *cfg = shost_priv(sdev->host); in write_same16()
430 struct device *dev = &cfg->dev->dev; in write_same16()
431 const u32 s = ilog2(sdev->sector_size) - 9; in write_same16()
432 const u32 to = sdev->request_queue->rq_timeout; in write_same16()
434 sdev->request_queue->limits.max_write_zeroes_sectors >> s; in write_same16()
439 rc = -ENOMEM; in write_same16()
446 scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0; in write_same16()
452 up_read(&cfg->ioctl_rwsem); in write_same16()
456 down_read(&cfg->ioctl_rwsem); in write_same16()
461 rc = -ENODEV; in write_same16()
469 rc = -EIO; in write_same16()
472 left -= ws_limit; in write_same16()
484 * grow_lxt() - expands the translation table associated with the specified RHTE
495 * amount of space. The user is made aware of this by returning the size
498 * Return: 0 on success, -errno on failure
507 struct cxlflash_cfg *cfg = shost_priv(sdev->host); in grow_lxt()
508 struct device *dev = &cfg->dev->dev; in grow_lxt()
510 struct llun_info *lli = sdev->hostdata; in grow_lxt()
511 struct glun_info *gli = lli->parent; in grow_lxt()
512 struct blka *blka = &gli->blka; in grow_lxt()
516 u64 delta = *new_size - rhte->lxt_cnt; in grow_lxt()
521 * Check what is available in the block allocator before re-allocating in grow_lxt()
525 mutex_lock(&blka->mutex); in grow_lxt()
526 av_size = ba_space(&blka->ba_lun); in grow_lxt()
530 mutex_unlock(&blka->mutex); in grow_lxt()
531 rc = -ENOSPC; in grow_lxt()
538 lxt_old = rhte->lxt_start; in grow_lxt()
539 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt); in grow_lxt()
540 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta); in grow_lxt()
547 mutex_unlock(&blka->mutex); in grow_lxt()
548 rc = -ENOMEM; in grow_lxt()
553 memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt)); in grow_lxt()
558 my_new_size = rhte->lxt_cnt + delta; in grow_lxt()
561 for (i = rhte->lxt_cnt; i < my_new_size; i++) { in grow_lxt()
565 * leave a rlba_base of -1u which will likely be a in grow_lxt()
568 aun = ba_alloc(&blka->ba_lun); in grow_lxt()
569 if ((aun == -1ULL) || (aun >= blka->nchunk)) in grow_lxt()
571 "max=%llu\n", __func__, aun, blka->nchunk - 1); in grow_lxt()
575 (lli->lun_index << LXT_LUNIDX_SHIFT) | in grow_lxt()
577 lli->port_sel)); in grow_lxt()
580 mutex_unlock(&blka->mutex); in grow_lxt()
586 dma_wmb(); /* Make LXT updates are visible */ in grow_lxt()
588 rhte->lxt_start = lxt; in grow_lxt()
589 dma_wmb(); /* Make RHT entry's LXT table update visible */ in grow_lxt()
591 rhte->lxt_cnt = my_new_size; in grow_lxt()
592 dma_wmb(); /* Make RHT entry's LXT table size update visible */ in grow_lxt()
596 rc = -EAGAIN; in grow_lxt()
608 * shrink_lxt() - reduces translation table associated with the specified RHTE
616 * Return: 0 on success, -errno on failure
625 struct cxlflash_cfg *cfg = shost_priv(sdev->host); in shrink_lxt()
626 struct device *dev = &cfg->dev->dev; in shrink_lxt()
628 struct llun_info *lli = sdev->hostdata; in shrink_lxt()
629 struct glun_info *gli = lli->parent; in shrink_lxt()
630 struct blka *blka = &gli->blka; in shrink_lxt()
631 ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid); in shrink_lxt()
632 bool needs_ws = ctxi->rht_needs_ws[rhndl]; in shrink_lxt()
633 bool needs_sync = !ctxi->err_recovery_active; in shrink_lxt()
636 u64 delta = rhte->lxt_cnt - *new_size; in shrink_lxt()
640 lxt_old = rhte->lxt_start; in shrink_lxt()
641 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt); in shrink_lxt()
642 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta); in shrink_lxt()
650 rc = -ENOMEM; in shrink_lxt()
656 (sizeof(*lxt) * (rhte->lxt_cnt - delta))); in shrink_lxt()
663 my_new_size = rhte->lxt_cnt - delta; in shrink_lxt()
669 rhte->lxt_cnt = my_new_size; in shrink_lxt()
670 dma_wmb(); /* Make RHT entry's LXT table size update visible */ in shrink_lxt()
672 rhte->lxt_start = lxt; in shrink_lxt()
673 dma_wmb(); /* Make RHT entry's LXT table update visible */ in shrink_lxt()
678 rc = -EAGAIN; in shrink_lxt()
686 ctxi->unavail = true; in shrink_lxt()
687 mutex_unlock(&ctxi->mutex); in shrink_lxt()
691 mutex_lock(&blka->mutex); in shrink_lxt()
692 for (i = delta - 1; i >= 0; i--) { in shrink_lxt()
696 ba_free(&blka->ba_lun, aun); in shrink_lxt()
698 mutex_unlock(&blka->mutex); in shrink_lxt()
701 /* Make the context visible again */ in shrink_lxt()
702 mutex_lock(&ctxi->mutex); in shrink_lxt()
703 ctxi->unavail = false; in shrink_lxt()
716 * _cxlflash_vlun_resize() - changes the size of a virtual LUN
721 * On successful return, the user is informed of the new size (in blocks)
723 * LUN is zero, the last LBA is reflected as -1. See comment in the
727 * Return: 0 on success, -errno on failure
733 struct cxlflash_cfg *cfg = shost_priv(sdev->host); in _cxlflash_vlun_resize()
734 struct device *dev = &cfg->dev->dev; in _cxlflash_vlun_resize()
735 struct llun_info *lli = sdev->hostdata; in _cxlflash_vlun_resize()
736 struct glun_info *gli = lli->parent; in _cxlflash_vlun_resize()
737 struct afu *afu = cfg->afu; in _cxlflash_vlun_resize()
740 res_hndl_t rhndl = resize->rsrc_handle; in _cxlflash_vlun_resize()
743 u64 ctxid = DECODE_CTXID(resize->context_id), in _cxlflash_vlun_resize()
744 rctxid = resize->context_id; in _cxlflash_vlun_resize()
754 nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len; in _cxlflash_vlun_resize()
758 __func__, ctxid, resize->rsrc_handle, resize->req_size, in _cxlflash_vlun_resize()
761 if (unlikely(gli->mode != MODE_VIRTUAL)) { in _cxlflash_vlun_resize()
763 __func__, gli->mode); in _cxlflash_vlun_resize()
764 rc = -EINVAL; in _cxlflash_vlun_resize()
774 rc = -EINVAL; in _cxlflash_vlun_resize()
785 rc = -EINVAL; in _cxlflash_vlun_resize()
789 if (new_size > rhte->lxt_cnt) in _cxlflash_vlun_resize()
791 else if (new_size < rhte->lxt_cnt) in _cxlflash_vlun_resize()
804 rc = -EAGAIN; in _cxlflash_vlun_resize()
809 resize->hdr.return_flags = 0; in _cxlflash_vlun_resize()
810 resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len); in _cxlflash_vlun_resize()
811 resize->last_lba /= CXLFLASH_BLOCK_SIZE; in _cxlflash_vlun_resize()
812 resize->last_lba--; in _cxlflash_vlun_resize()
818 __func__, resize->last_lba, rc); in _cxlflash_vlun_resize()
828 * cxlflash_restore_luntable() - Restore LUN table to prior state
836 struct device *dev = &cfg->dev->dev; in cxlflash_restore_luntable()
841 list_for_each_entry_safe(lli, temp, &cfg->lluns, list) { in cxlflash_restore_luntable()
842 if (!lli->in_table) in cxlflash_restore_luntable()
845 lind = lli->lun_index; in cxlflash_restore_luntable()
848 for (k = 0; k < cfg->num_fc_ports; k++) in cxlflash_restore_luntable()
849 if (lli->port_sel & (1 << k)) { in cxlflash_restore_luntable()
851 writeq_be(lli->lun_id[k], &fc_port_luns[lind]); in cxlflash_restore_luntable()
852 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); in cxlflash_restore_luntable()
860 * get_num_ports() - compute number of ports from port selection mask
874 * init_luntable() - write an entry in the LUN table
879 * - at the top for LUNs visible on multiple ports.
880 * - at the bottom for LUNs visible only on one port.
882 * Return: 0 on success, -errno on failure
891 struct device *dev = &cfg->dev->dev; in init_luntable()
896 if (lli->in_table) in init_luntable()
899 nports = get_num_ports(lli->port_sel); in init_luntable()
900 if (nports == 0 || nports > cfg->num_fc_ports) { in init_luntable()
902 rc = -EIO; in init_luntable()
908 * When LUN is visible from multiple ports, we will put in init_luntable()
911 for (k = 0; k < cfg->num_fc_ports; k++) { in init_luntable()
912 if (!(lli->port_sel & (1 << k))) in init_luntable()
915 if (cfg->promote_lun_index == cfg->last_lun_index[k]) { in init_luntable()
916 rc = -ENOSPC; in init_luntable()
921 lind = lli->lun_index = cfg->promote_lun_index; in init_luntable()
924 for (k = 0; k < cfg->num_fc_ports; k++) { in init_luntable()
925 if (!(lli->port_sel & (1 << k))) in init_luntable()
929 writeq_be(lli->lun_id[k], &fc_port_luns[lind]); in init_luntable()
930 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); in init_luntable()
933 cfg->promote_lun_index++; in init_luntable()
936 * When LUN is visible only from one port, we will put in init_luntable()
939 chan = PORTMASK2CHAN(lli->port_sel); in init_luntable()
940 if (cfg->promote_lun_index == cfg->last_lun_index[chan]) { in init_luntable()
941 rc = -ENOSPC; in init_luntable()
945 lind = lli->lun_index = cfg->last_lun_index[chan]; in init_luntable()
947 writeq_be(lli->lun_id[chan], &fc_port_luns[lind]); in init_luntable()
948 cfg->last_lun_index[chan]--; in init_luntable()
950 __func__, lind, chan, lli->lun_id[chan]); in init_luntable()
953 lli->in_table = true; in init_luntable()
961 * cxlflash_disk_virtual_open() - open a virtual disk of specified size
965 * On successful return, the user is informed of the resource handle
968 * is zero, the last LBA is reflected as -1.
970 * Return: 0 on success, -errno on failure
974 struct cxlflash_cfg *cfg = shost_priv(sdev->host); in cxlflash_disk_virtual_open()
975 struct device *dev = &cfg->dev->dev; in cxlflash_disk_virtual_open()
976 struct llun_info *lli = sdev->hostdata; in cxlflash_disk_virtual_open()
977 struct glun_info *gli = lli->parent; in cxlflash_disk_virtual_open()
982 u64 ctxid = DECODE_CTXID(virt->context_id), in cxlflash_disk_virtual_open()
983 rctxid = virt->context_id; in cxlflash_disk_virtual_open()
984 u64 lun_size = virt->lun_size; in cxlflash_disk_virtual_open()
986 u64 rsrc_handle = -1; in cxlflash_disk_virtual_open()
996 mutex_lock(&gli->mutex); in cxlflash_disk_virtual_open()
997 if (gli->mode == MODE_NONE) { in cxlflash_disk_virtual_open()
1002 rc = -ENOMEM; in cxlflash_disk_virtual_open()
1012 mutex_unlock(&gli->mutex); in cxlflash_disk_virtual_open()
1023 rc = -EINVAL; in cxlflash_disk_virtual_open()
1031 rc = -EMFILE; /* too many opens */ in cxlflash_disk_virtual_open()
1035 rsrc_handle = (rhte - ctxi->rht_start); in cxlflash_disk_virtual_open()
1038 rhte->nmask = MC_RHT_NMASK; in cxlflash_disk_virtual_open()
1039 rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms); in cxlflash_disk_virtual_open()
1051 if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME) in cxlflash_disk_virtual_open()
1052 ctxi->rht_needs_ws[rsrc_handle] = true; in cxlflash_disk_virtual_open()
1054 virt->hdr.return_flags = 0; in cxlflash_disk_virtual_open()
1055 virt->last_lba = last_lba; in cxlflash_disk_virtual_open()
1056 virt->rsrc_handle = rsrc_handle; in cxlflash_disk_virtual_open()
1058 if (get_num_ports(lli->port_sel) > 1) in cxlflash_disk_virtual_open()
1059 virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE; in cxlflash_disk_virtual_open()
1074 cxlflash_ba_terminate(&gli->blka.ba_lun); in cxlflash_disk_virtual_open()
1075 mutex_unlock(&gli->mutex); in cxlflash_disk_virtual_open()
1080 * clone_lxt() - copies translation tables from source to destination RHTE
1088 * Return: 0 on success, -errno on failure
1097 struct cxlflash_cfg *cfg = afu->parent; in clone_lxt()
1098 struct device *dev = &cfg->dev->dev; in clone_lxt()
1107 ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt); in clone_lxt()
1114 rc = -ENOMEM; in clone_lxt()
1119 memcpy(lxt, rhte_src->lxt_start, in clone_lxt()
1120 (sizeof(*lxt) * rhte_src->lxt_cnt)); in clone_lxt()
1127 mutex_lock(&blka->mutex); in clone_lxt()
1129 for (i = 0; i < rhte_src->lxt_cnt; i++) { in clone_lxt()
1131 if (ba_clone(&blka->ba_lun, aun) == -1ULL) { in clone_lxt()
1132 rc = -EIO; in clone_lxt()
1142 dma_wmb(); /* Make LXT updates are visible */ in clone_lxt()
1144 rhte->lxt_start = lxt; in clone_lxt()
1145 dma_wmb(); /* Make RHT entry's LXT table update visible */ in clone_lxt()
1147 rhte->lxt_cnt = rhte_src->lxt_cnt; in clone_lxt()
1148 dma_wmb(); /* Make RHT entry's LXT table size update visible */ in clone_lxt()
1152 rc = -EAGAIN; in clone_lxt()
1158 mutex_unlock(&blka->mutex); in clone_lxt()
1163 rhte->lxt_cnt = 0; in clone_lxt()
1165 rhte->lxt_start = NULL; in clone_lxt()
1171 ba_free(&blka->ba_lun, aun); in clone_lxt()
1178 * cxlflash_disk_clone() - clone a context by making snapshot of another
1183 * in-use virtual resource in the source context. Note that the destination
1187 * Return: 0 on success, -errno on failure
1192 struct cxlflash_cfg *cfg = shost_priv(sdev->host); in cxlflash_disk_clone()
1193 struct device *dev = &cfg->dev->dev; in cxlflash_disk_clone()
1194 struct llun_info *lli = sdev->hostdata; in cxlflash_disk_clone()
1195 struct glun_info *gli = lli->parent; in cxlflash_disk_clone()
1196 struct blka *blka = &gli->blka; in cxlflash_disk_clone()
1197 struct afu *afu = cfg->afu; in cxlflash_disk_clone()
1204 u64 ctxid_src = DECODE_CTXID(clone->context_id_src), in cxlflash_disk_clone()
1205 ctxid_dst = DECODE_CTXID(clone->context_id_dst), in cxlflash_disk_clone()
1206 rctxid_src = clone->context_id_src, in cxlflash_disk_clone()
1207 rctxid_dst = clone->context_id_dst; in cxlflash_disk_clone()
1218 rc = -EINVAL; in cxlflash_disk_clone()
1222 if (unlikely(gli->mode != MODE_VIRTUAL)) { in cxlflash_disk_clone()
1223 rc = -EINVAL; in cxlflash_disk_clone()
1225 __func__, gli->mode); in cxlflash_disk_clone()
1234 rc = -EINVAL; in cxlflash_disk_clone()
1240 if (ctxi_dst->rht_start[i].nmask != 0) { in cxlflash_disk_clone()
1241 rc = -EINVAL; in cxlflash_disk_clone()
1246 list_for_each_entry(lun_access_src, &ctxi_src->luns, list) { in cxlflash_disk_clone()
1248 list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list) in cxlflash_disk_clone()
1249 if (lun_access_dst->sdev == lun_access_src->sdev) { in cxlflash_disk_clone()
1260 rc = -ENOMEM; in cxlflash_disk_clone()
1265 list_add(&lun_access_dst->list, &sidecar); in cxlflash_disk_clone()
1269 if (unlikely(!ctxi_src->rht_out)) { in cxlflash_disk_clone()
1274 /* User specified permission on attach */ in cxlflash_disk_clone()
1275 perms = ctxi_dst->rht_perms; in cxlflash_disk_clone()
1278 * Copy over checked-out RHT (and their associated LXT) entries by in cxlflash_disk_clone()
1289 if (ctxi_src->rht_out == ctxi_dst->rht_out) in cxlflash_disk_clone()
1291 if (ctxi_src->rht_start[i].nmask == 0) in cxlflash_disk_clone()
1295 ctxi_dst->rht_out++; in cxlflash_disk_clone()
1296 ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask; in cxlflash_disk_clone()
1297 ctxi_dst->rht_start[i].fp = in cxlflash_disk_clone()
1298 SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms); in cxlflash_disk_clone()
1299 ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i]; in cxlflash_disk_clone()
1302 &ctxi_dst->rht_start[i], in cxlflash_disk_clone()
1303 &ctxi_src->rht_start[i]); in cxlflash_disk_clone()
1313 rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]); in cxlflash_disk_clone()
1317 cxlflash_lun_attach(gli, gli->mode, false); in cxlflash_disk_clone()
1321 list_splice(&sidecar, &ctxi_dst->luns); in cxlflash_disk_clone()