Lines Matching refs:rb_key

246 	set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,  in set_cache_mkc()
250 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); in set_cache_mkc()
252 (ent->rb_key.access_mode >> 2) & 0x7); in set_cache_mkc()
253 MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); in set_cache_mkc()
256 get_mkc_octo_size(ent->rb_key.access_mode, in set_cache_mkc()
257 ent->rb_key.ndescs)); in set_cache_mkc()
663 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); in mlx5_cache_ent_insert()
681 struct mlx5r_cache_rb_key rb_key) in mkey_cache_ent_from_rb_key() argument
693 cmp = cache_ent_key_cmp(cur->rb_key, rb_key); in mkey_cache_ent_from_rb_key()
708 ndescs_limit = max_t(u64, rb_key.ndescs * 2, in mkey_cache_ent_from_rb_key()
712 smallest->rb_key.access_mode == rb_key.access_mode && in mkey_cache_ent_from_rb_key()
713 smallest->rb_key.access_flags == rb_key.access_flags && in mkey_cache_ent_from_rb_key()
714 smallest->rb_key.ats == rb_key.ats && in mkey_cache_ent_from_rb_key()
715 smallest->rb_key.ndescs <= ndescs_limit) ? in mkey_cache_ent_from_rb_key()
753 mr->mmkey.rb_key = ent->rb_key; in _mlx5_mr_cache_alloc()
787 struct mlx5r_cache_rb_key rb_key = { in mlx5_mr_cache_alloc() local
792 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key); in mlx5_mr_cache_alloc()
812 int order = order_base_2(ent->rb_key.ndescs); in mlx5_mkey_cache_debugfs_add_ent()
818 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5_mkey_cache_debugfs_add_ent()
874 struct mlx5r_cache_rb_key rb_key, in mlx5r_cache_create_ent_locked() argument
888 ent->rb_key = rb_key; in mlx5r_cache_create_ent_locked()
899 if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5r_cache_create_ent_locked()
902 order = order_base_2(rb_key.ndescs) - 2; in mlx5r_cache_create_ent_locked()
926 struct mlx5r_cache_rb_key rb_key = { in mlx5_mkey_cache_init() local
948 rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i; in mlx5_mkey_cache_init()
949 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); in mlx5_mkey_cache_init()
1115 struct mlx5r_cache_rb_key rb_key = {}; in alloc_cacheable_mr() local
1127 rb_key.access_mode = access_mode; in alloc_cacheable_mr()
1128 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size); in alloc_cacheable_mr()
1129 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); in alloc_cacheable_mr()
1130 rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags); in alloc_cacheable_mr()
1131 ent = mkey_cache_ent_from_rb_key(dev, rb_key); in alloc_cacheable_mr()
1142 mr->mmkey.rb_key = rb_key; in alloc_cacheable_mr()
1750 return (mr->mmkey.cache_ent->rb_key.ndescs) >= in can_use_umr_rereg_pas()
1965 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1967 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1979 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()