Lines Matching refs:channel

197 	err = mlx5e_enable_sq(sq, param, &priv->channel[ix].bfreg, priv->rl.tisn);  in mlx5e_rl_open_sq()
477 iq_channel = &sq->priv->channel[sq->sqn % sq->priv->params.num_channels]; in mlx5e_rl_remap_sq()
499 struct mlx5e_rl_channel *channel, uint64_t rate) in mlx5e_rlw_channel_set_rate_locked() argument
553 channel->last_rate != 0 && rate != 0; in mlx5e_rlw_channel_set_rate_locked()
556 temp = channel->last_rate; in mlx5e_rlw_channel_set_rate_locked()
557 channel->last_rate = rate; in mlx5e_rlw_channel_set_rate_locked()
561 temp = channel->last_burst; in mlx5e_rlw_channel_set_rate_locked()
562 channel->last_burst = burst; in mlx5e_rlw_channel_set_rate_locked()
573 sq = channel->sq; in mlx5e_rlw_channel_set_rate_locked()
575 if (!use_sq_remap || mlx5e_rl_remap_sq(sq, index, channel)) { in mlx5e_rlw_channel_set_rate_locked()
576 while (atomic_load_int(&channel->refcount) != 0 && in mlx5e_rlw_channel_set_rate_locked()
597 struct mlx5e_rl_channel *channel; in mlx5e_rl_worker() local
621 struct mlx5e_rl_channel *channel = rlw->channels + x; in mlx5e_rl_worker() local
624 if (channel->state == MLX5E_RL_ST_FREE) in mlx5e_rl_worker()
631 &priv->rl.chan_param, &channel->sq); in mlx5e_rl_worker()
640 mlx5e_rlw_channel_set_rate_locked(rlw, channel, channel->init_rate); in mlx5e_rl_worker()
652 channel = STAILQ_FIRST(&rlw->process_head); in mlx5e_rl_worker()
653 if (channel != NULL) { in mlx5e_rl_worker()
656 switch (channel->state) { in mlx5e_rl_worker()
658 channel->state = MLX5E_RL_ST_USED; in mlx5e_rl_worker()
662 if (channel->sq == NULL) { in mlx5e_rl_worker()
665 &priv->rl.chan_param, &channel->sq); in mlx5e_rl_worker()
675 mlx5e_resume_sq(channel->sq); in mlx5e_rl_worker()
680 error = mlx5e_rlw_channel_set_rate_locked(rlw, channel, in mlx5e_rl_worker()
681 channel->new_rate * 8ULL); in mlx5e_rl_worker()
690 error = mlx5e_rlw_channel_set_rate_locked(rlw, channel, 0); in mlx5e_rl_worker()
696 if (channel->sq != NULL) { in mlx5e_rl_worker()
703 mlx5e_drain_sq(channel->sq); in mlx5e_rl_worker()
707 STAILQ_INSERT_HEAD(&rlw->index_list_head, channel, entry); in mlx5e_rl_worker()
708 channel->state = MLX5E_RL_ST_FREE; in mlx5e_rl_worker()
720 struct mlx5e_rl_channel *channel = rlw->channels + x; in mlx5e_rl_worker() local
723 channel->init_rate = channel->last_rate; in mlx5e_rl_worker()
726 mlx5e_rlw_channel_set_rate_locked(rlw, channel, 0); in mlx5e_rl_worker()
728 if (channel->sq != NULL) { in mlx5e_rl_worker()
730 mlx5e_rl_close_channel(&channel->sq); in mlx5e_rl_worker()
955 struct mlx5e_rl_channel *channel = rlw->channels + i; in mlx5e_rl_init() local
956 channel->worker = rlw; in mlx5e_rl_init()
957 STAILQ_INSERT_TAIL(&rlw->index_list_head, channel, entry); in mlx5e_rl_init()
1101 struct mlx5e_rl_channel *channel) in mlx5e_rlw_queue_channel_locked() argument
1103 STAILQ_INSERT_TAIL(&rlw->process_head, channel, entry); in mlx5e_rlw_queue_channel_locked()
1108 mlx5e_rl_free(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel) in mlx5e_rl_free() argument
1110 if (channel == NULL) in mlx5e_rl_free()
1114 switch (channel->state) { in mlx5e_rl_free()
1116 channel->state = MLX5E_RL_ST_DESTROY; in mlx5e_rl_free()
1119 channel->state = MLX5E_RL_ST_DESTROY; in mlx5e_rl_free()
1120 mlx5e_rlw_queue_channel_locked(rlw, channel); in mlx5e_rl_free()
1129 mlx5e_rl_modify(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel, uint64_t rate) in mlx5e_rl_modify() argument
1133 channel->new_rate = rate; in mlx5e_rl_modify()
1134 switch (channel->state) { in mlx5e_rl_modify()
1136 channel->state = MLX5E_RL_ST_MODIFY; in mlx5e_rl_modify()
1137 mlx5e_rlw_queue_channel_locked(rlw, channel); in mlx5e_rl_modify()
1148 mlx5e_rl_query(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel, in mlx5e_rl_query() argument
1154 switch (channel->state) { in mlx5e_rl_query()
1156 params->rate_limit.max_rate = channel->last_rate; in mlx5e_rl_query()
1157 params->rate_limit.queue_level = mlx5e_sq_queue_level(channel->sq); in mlx5e_rl_query()
1161 params->rate_limit.max_rate = channel->last_rate; in mlx5e_rl_query()
1162 params->rate_limit.queue_level = mlx5e_sq_queue_level(channel->sq); in mlx5e_rl_query()
1178 struct mlx5e_rl_channel *channel; in mlx5e_find_available_tx_ring_index() local
1183 if ((channel = STAILQ_FIRST(&rlw->index_list_head)) != NULL) { in mlx5e_find_available_tx_ring_index()
1187 channel->state = MLX5E_RL_ST_USED; in mlx5e_find_available_tx_ring_index()
1194 *pchannel = channel; in mlx5e_find_available_tx_ring_index()
1197 "Channel pointer for rate limit connection is %p\n", channel); in mlx5e_find_available_tx_ring_index()
1207 struct mlx5e_rl_channel *channel; in mlx5e_rl_snd_tag_alloc() local
1224 error = mlx5e_find_available_tx_ring_index(rlw, &channel); in mlx5e_rl_snd_tag_alloc()
1228 error = mlx5e_rl_modify(rlw, channel, params->rate_limit.max_rate); in mlx5e_rl_snd_tag_alloc()
1230 mlx5e_rl_free(rlw, channel); in mlx5e_rl_snd_tag_alloc()
1235 MPASS(channel->tag.refcount == 0); in mlx5e_rl_snd_tag_alloc()
1236 m_snd_tag_init(&channel->tag, ifp, &mlx5e_rl_snd_tag_sw); in mlx5e_rl_snd_tag_alloc()
1237 *ppmt = &channel->tag; in mlx5e_rl_snd_tag_alloc()
1246 struct mlx5e_rl_channel *channel = in mlx5e_rl_snd_tag_modify() local
1249 return (mlx5e_rl_modify(channel->worker, channel, params->rate_limit.max_rate)); in mlx5e_rl_snd_tag_modify()
1255 struct mlx5e_rl_channel *channel = in mlx5e_rl_snd_tag_query() local
1258 return (mlx5e_rl_query(channel->worker, channel, params)); in mlx5e_rl_snd_tag_query()
1264 struct mlx5e_rl_channel *channel = in mlx5e_rl_snd_tag_free() local
1267 mlx5e_rl_free(channel->worker, channel); in mlx5e_rl_snd_tag_free()
1325 struct mlx5e_rl_channel *channel; in mlx5e_rl_refresh_channel_params() local
1328 channel = rlw->channels + x; in mlx5e_rl_refresh_channel_params()
1329 sq = channel->sq; in mlx5e_rl_refresh_channel_params()
1359 struct mlx5e_rl_channel *channel; in mlx5e_rl_refresh_sq_inline() local
1362 channel = rlw->channels + x; in mlx5e_rl_refresh_sq_inline()
1363 sq = channel->sq; in mlx5e_rl_refresh_sq_inline()