Lines Matching refs:rl

38 static void mlx5e_rl_sysctl_add_stats_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x,
54 mlx5e_rl_build_sq_param(struct mlx5e_rl_priv_data *rl, in mlx5e_rl_build_sq_param() argument
59 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size); in mlx5e_rl_build_sq_param()
63 MLX5_SET(wq, wq, pd, rl->priv->pdn); in mlx5e_rl_build_sq_param()
69 mlx5e_rl_build_cq_param(struct mlx5e_rl_priv_data *rl, in mlx5e_rl_build_cq_param() argument
73 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size); in mlx5e_rl_build_cq_param()
76 MLX5_SET(cqc, cqc, cq_period, rl->param.tx_coalesce_usecs); in mlx5e_rl_build_cq_param()
77 MLX5_SET(cqc, cqc, cq_max_count, rl->param.tx_coalesce_pkts); in mlx5e_rl_build_cq_param()
78 MLX5_SET(cqc, cqc, uar_page, rl->priv->mdev->priv.uar->index); in mlx5e_rl_build_cq_param()
80 switch (rl->param.tx_coalesce_mode) { in mlx5e_rl_build_cq_param()
85 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_start_from_cqe)) in mlx5e_rl_build_cq_param()
94 mlx5e_rl_build_channel_param(struct mlx5e_rl_priv_data *rl, in mlx5e_rl_build_channel_param() argument
99 mlx5e_rl_build_sq_param(rl, &cparam->sq); in mlx5e_rl_build_channel_param()
100 mlx5e_rl_build_cq_param(rl, &cparam->cq); in mlx5e_rl_build_channel_param()
197 err = mlx5e_enable_sq(sq, param, &priv->channel[ix].bfreg, priv->rl.tisn); in mlx5e_rl_open_sq()
235 sq->cev_factor = priv->rl.param.tx_completion_fact; in mlx5e_rl_chan_mtx_init()
282 atomic_add_64(&priv->rl.stats.tx_allocate_resource_failure, 1ULL); in mlx5e_rl_open_channel()
313 mlx5e_rl_sync_tx_completion_fact(struct mlx5e_rl_priv_data *rl) in mlx5e_rl_sync_tx_completion_fact() argument
324 uint64_t max = rl->param.tx_queue_size / in mlx5e_rl_sync_tx_completion_fact()
336 rl->param.tx_completion_fact_max = max; in mlx5e_rl_sync_tx_completion_fact()
342 if (rl->param.tx_completion_fact < 1) in mlx5e_rl_sync_tx_completion_fact()
343 rl->param.tx_completion_fact = 1; in mlx5e_rl_sync_tx_completion_fact()
344 else if (rl->param.tx_completion_fact > max) in mlx5e_rl_sync_tx_completion_fact()
345 rl->param.tx_completion_fact = max; in mlx5e_rl_sync_tx_completion_fact()
387 mlx5e_rl_find_best_rate_locked(struct mlx5e_rl_priv_data *rl, uint64_t user_rate) in mlx5e_rl_find_best_rate_locked() argument
395 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_find_best_rate_locked()
396 uint64_t rate = rl->rate_limit_table[x]; in mlx5e_rl_find_best_rate_locked()
413 if (user_rate > rl->param.tx_limit_max) in mlx5e_rl_find_best_rate_locked()
414 user_rate = rl->param.tx_limit_max; in mlx5e_rl_find_best_rate_locked()
418 rl->param.tx_allowed_deviation, 1000ULL)) in mlx5e_rl_find_best_rate_locked()
501 struct mlx5e_rl_priv_data *rl = &rlw->priv->rl; in mlx5e_rlw_channel_set_rate_locked() local
512 MLX5E_RL_RLOCK(rl); in mlx5e_rlw_channel_set_rate_locked()
515 temp = rl->param.tx_burst_size * in mlx5e_rlw_channel_set_rate_locked()
524 rate = mlx5e_rl_find_best_rate_locked(rl, rate); in mlx5e_rlw_channel_set_rate_locked()
526 MLX5E_RL_RUNLOCK(rl); in mlx5e_rlw_channel_set_rate_locked()
532 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL); in mlx5e_rlw_channel_set_rate_locked()
542 atomic_add_64(&rlw->priv->rl.stats.tx_add_new_rate_failure, 1ULL); in mlx5e_rlw_channel_set_rate_locked()
582 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL); in mlx5e_rlw_channel_set_rate_locked()
613 ix = (rlw - priv->rl.workers) % in mlx5e_rl_worker()
620 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) { in mlx5e_rl_worker()
629 MLX5E_RL_RLOCK(&priv->rl); in mlx5e_rl_worker()
631 &priv->rl.chan_param, &channel->sq); in mlx5e_rl_worker()
632 MLX5E_RL_RUNLOCK(&priv->rl); in mlx5e_rl_worker()
663 MLX5E_RL_RLOCK(&priv->rl); in mlx5e_rl_worker()
665 &priv->rl.chan_param, &channel->sq); in mlx5e_rl_worker()
666 MLX5E_RL_RUNLOCK(&priv->rl); in mlx5e_rl_worker()
672 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, 1ULL); in mlx5e_rl_worker()
709 atomic_add_64(&priv->rl.stats.tx_active_connections, -1ULL); in mlx5e_rl_worker()
719 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) { in mlx5e_rl_worker()
731 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, -1ULL); in mlx5e_rl_worker()
755 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->rl.tisn)); in mlx5e_rl_open_tis()
761 mlx5_core_destroy_tis(priv->mdev, priv->rl.tisn, 0); in mlx5e_rl_close_tis()
851 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_init() local
863 rl->priv = priv; in mlx5e_rl_init()
865 sysctl_ctx_init(&rl->ctx); in mlx5e_rl_init()
867 sx_init(&rl->rl_sxlock, "ratelimit-sxlock"); in mlx5e_rl_init()
875 mlx5e_rl_set_default_params(&rl->param, priv->mdev); in mlx5e_rl_init()
878 mlx5e_rl_sync_tx_completion_fact(rl); in mlx5e_rl_init()
881 node = SYSCTL_ADD_NODE(&rl->ctx, in mlx5e_rl_init()
888 mlx5e_rl_sysctl_add_u64_oid(rl, in mlx5e_rl_init()
894 stats = SYSCTL_ADD_NODE(&rl->ctx, SYSCTL_CHILDREN(node), in mlx5e_rl_init()
900 mlx5e_rl_sysctl_add_stats_u64_oid(rl, i, in mlx5e_rl_init()
908 rl->workers = malloc(sizeof(rl->workers[0]) * in mlx5e_rl_init()
909 rl->param.tx_worker_threads_def, M_MLX5EN, M_WAITOK | M_ZERO); in mlx5e_rl_init()
912 rl->rate_limit_table = malloc(sizeof(rl->rate_limit_table[0]) * in mlx5e_rl_init()
913 rl->param.tx_rates_def, M_MLX5EN, M_WAITOK | M_ZERO); in mlx5e_rl_init()
917 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_init()
919 CTLFLAG_MPSAFE, rl, 0, &mlx5e_rl_sysctl_show_rate_table, in mlx5e_rl_init()
923 for (i = 0; i != rl->param.tx_rates_def; i++) { in mlx5e_rl_init()
928 mlx5e_rl_tx_limit_add(rl, j); in mlx5e_rl_init()
933 mlx5e_rl_sysctl_add_u64_oid(rl, in mlx5e_rl_init()
940 for (j = 0; j < rl->param.tx_worker_threads_def; j++) { in mlx5e_rl_init()
941 struct mlx5e_rl_worker *rlw = rl->workers + j; in mlx5e_rl_init()
951 rl->param.tx_channels_per_worker_def, M_MLX5EN, M_WAITOK | M_ZERO); in mlx5e_rl_init()
954 for (i = 0; i < rl->param.tx_channels_per_worker_def; i++) { in mlx5e_rl_init()
974 sysctl_ctx_free(&rl->ctx); in mlx5e_rl_init()
975 sx_destroy(&rl->rl_sxlock); in mlx5e_rl_init()
982 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_open_workers() local
988 if (priv->gone || rl->opened) in mlx5e_rl_open_workers()
991 MLX5E_RL_WLOCK(rl); in mlx5e_rl_open_workers()
993 mlx5e_rl_build_channel_param(rl, &rl->chan_param); in mlx5e_rl_open_workers()
994 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_open_workers()
996 for (j = 0; j < rl->param.tx_worker_threads_def; j++) { in mlx5e_rl_open_workers()
997 struct mlx5e_rl_worker *rlw = rl->workers + j; in mlx5e_rl_open_workers()
1003 mlx5_en_err(rl->priv->ifp, in mlx5e_rl_open_workers()
1009 rl->opened = 1; in mlx5e_rl_open_workers()
1017 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_close_workers() local
1020 if (rl->opened == 0) in mlx5e_rl_close_workers()
1024 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_close_workers()
1025 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_close_workers()
1040 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_close_workers()
1041 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_close_workers()
1050 rl->opened = 0; in mlx5e_rl_close_workers()
1054 mlx5e_rl_reset_rates(struct mlx5e_rl_priv_data *rl) in mlx5e_rl_reset_rates() argument
1058 MLX5E_RL_WLOCK(rl); in mlx5e_rl_reset_rates()
1059 for (x = 0; x != rl->param.tx_rates_def; x++) in mlx5e_rl_reset_rates()
1060 rl->rate_limit_table[x] = 0; in mlx5e_rl_reset_rates()
1061 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_reset_rates()
1067 struct mlx5e_rl_priv_data *rl = &priv->rl; in mlx5e_rl_cleanup() local
1076 sysctl_ctx_free(&rl->ctx); in mlx5e_rl_cleanup()
1082 mlx5e_rl_reset_rates(rl); in mlx5e_rl_cleanup()
1087 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_cleanup()
1088 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_cleanup()
1094 free(rl->rate_limit_table, M_MLX5EN); in mlx5e_rl_cleanup()
1095 free(rl->workers, M_MLX5EN); in mlx5e_rl_cleanup()
1096 sx_destroy(&rl->rl_sxlock); in mlx5e_rl_cleanup()
1188 atomic_add_64(&rlw->priv->rl.stats.tx_active_connections, 1ULL); in mlx5e_find_available_tx_ring_index()
1190 atomic_add_64(&rlw->priv->rl.stats.tx_available_resource_failure, 1ULL); in mlx5e_find_available_tx_ring_index()
1221 rlw = priv->rl.workers + ((params->rate_limit.hdr.flowid % 128) % in mlx5e_rl_snd_tag_alloc()
1222 priv->rl.param.tx_worker_threads_def); in mlx5e_rl_snd_tag_alloc()
1273 struct mlx5e_rl_priv_data *rl = arg1; in mlx5e_rl_sysctl_show_rate_table() local
1274 struct mlx5e_priv *priv = rl->priv; in mlx5e_rl_sysctl_show_rate_table()
1285 sbuf_new_for_sysctl(&sbuf, NULL, 128 * rl->param.tx_rates_def, req); in mlx5e_rl_sysctl_show_rate_table()
1291 MLX5E_RL_RLOCK(rl); in mlx5e_rl_sysctl_show_rate_table()
1292 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_sysctl_show_rate_table()
1293 if (rl->rate_limit_table[x] == 0) in mlx5e_rl_sysctl_show_rate_table()
1297 x, (unsigned)rl->param.tx_burst_size, in mlx5e_rl_sysctl_show_rate_table()
1298 (long long)rl->rate_limit_table[x]); in mlx5e_rl_sysctl_show_rate_table()
1300 MLX5E_RL_RUNLOCK(rl); in mlx5e_rl_sysctl_show_rate_table()
1311 mlx5e_rl_refresh_channel_params(struct mlx5e_rl_priv_data *rl) in mlx5e_rl_refresh_channel_params() argument
1316 MLX5E_RL_WLOCK(rl); in mlx5e_rl_refresh_channel_params()
1318 mlx5e_rl_build_channel_param(rl, &rl->chan_param); in mlx5e_rl_refresh_channel_params()
1319 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_refresh_channel_params()
1321 for (y = 0; y != rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_refresh_channel_params()
1322 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_refresh_channel_params()
1324 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) { in mlx5e_rl_refresh_channel_params()
1334 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_mode_modify)) { in mlx5e_rl_refresh_channel_params()
1335 mlx5_core_modify_cq_moderation_mode(rl->priv->mdev, &sq->cq.mcq, in mlx5e_rl_refresh_channel_params()
1336 rl->param.tx_coalesce_usecs, in mlx5e_rl_refresh_channel_params()
1337 rl->param.tx_coalesce_pkts, in mlx5e_rl_refresh_channel_params()
1338 rl->param.tx_coalesce_mode); in mlx5e_rl_refresh_channel_params()
1340 mlx5_core_modify_cq_moderation(rl->priv->mdev, &sq->cq.mcq, in mlx5e_rl_refresh_channel_params()
1341 rl->param.tx_coalesce_usecs, in mlx5e_rl_refresh_channel_params()
1342 rl->param.tx_coalesce_pkts); in mlx5e_rl_refresh_channel_params()
1350 mlx5e_rl_refresh_sq_inline(struct mlx5e_rl_priv_data *rl) in mlx5e_rl_refresh_sq_inline() argument
1355 for (y = 0; y != rl->param.tx_worker_threads_def; y++) { in mlx5e_rl_refresh_sq_inline()
1356 struct mlx5e_rl_worker *rlw = rl->workers + y; in mlx5e_rl_refresh_sq_inline()
1358 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) { in mlx5e_rl_refresh_sq_inline()
1376 mlx5e_rl_tx_limit_add(struct mlx5e_rl_priv_data *rl, uint64_t value) in mlx5e_rl_tx_limit_add() argument
1382 mlx5_rl_is_in_range(rl->priv->mdev, howmany(value, 1000), 0) == 0) in mlx5e_rl_tx_limit_add()
1385 MLX5E_RL_WLOCK(rl); in mlx5e_rl_tx_limit_add()
1389 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_tx_limit_add()
1390 if (rl->rate_limit_table[x] != value) in mlx5e_rl_tx_limit_add()
1397 if (x == rl->param.tx_rates_def) { in mlx5e_rl_tx_limit_add()
1398 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_tx_limit_add()
1399 if (rl->rate_limit_table[x] != 0) in mlx5e_rl_tx_limit_add()
1401 rl->rate_limit_table[x] = value; in mlx5e_rl_tx_limit_add()
1406 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_tx_limit_add()
1412 mlx5e_rl_tx_limit_clr(struct mlx5e_rl_priv_data *rl, uint64_t value) in mlx5e_rl_tx_limit_clr() argument
1420 MLX5E_RL_WLOCK(rl); in mlx5e_rl_tx_limit_clr()
1423 for (x = 0; x != rl->param.tx_rates_def; x++) { in mlx5e_rl_tx_limit_clr()
1424 if (rl->rate_limit_table[x] != value) in mlx5e_rl_tx_limit_clr()
1427 rl->rate_limit_table[x] = 0; in mlx5e_rl_tx_limit_clr()
1432 if (x == rl->param.tx_rates_def) in mlx5e_rl_tx_limit_clr()
1436 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_tx_limit_clr()
1444 struct mlx5e_rl_priv_data *rl = arg1; in mlx5e_rl_sysctl_handler() local
1445 struct mlx5e_priv *priv = rl->priv; in mlx5e_rl_sysctl_handler()
1453 MLX5E_RL_RLOCK(rl); in mlx5e_rl_sysctl_handler()
1454 value = rl->param.arg[arg2]; in mlx5e_rl_sysctl_handler()
1455 MLX5E_RL_RUNLOCK(rl); in mlx5e_rl_sysctl_handler()
1460 value == rl->param.arg[arg2]) in mlx5e_rl_sysctl_handler()
1471 was_opened = rl->opened; in mlx5e_rl_sysctl_handler()
1476 if (value > rl->param.tx_worker_threads_max) in mlx5e_rl_sysctl_handler()
1477 value = rl->param.tx_worker_threads_max; in mlx5e_rl_sysctl_handler()
1482 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1486 if (value > rl->param.tx_channels_per_worker_max) in mlx5e_rl_sysctl_handler()
1487 value = rl->param.tx_channels_per_worker_max; in mlx5e_rl_sysctl_handler()
1492 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1496 if (value > rl->param.tx_rates_max) in mlx5e_rl_sysctl_handler()
1497 value = rl->param.tx_rates_max; in mlx5e_rl_sysctl_handler()
1502 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1513 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1517 error = mlx5e_rl_refresh_channel_params(rl); in mlx5e_rl_sysctl_handler()
1528 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1532 error = mlx5e_rl_refresh_channel_params(rl); in mlx5e_rl_sysctl_handler()
1545 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1552 error = mlx5e_rl_refresh_channel_params(rl); in mlx5e_rl_sysctl_handler()
1571 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1574 mlx5e_rl_sync_tx_completion_fact(rl); in mlx5e_rl_sysctl_handler()
1587 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1590 mlx5e_rl_sync_tx_completion_fact(rl); in mlx5e_rl_sysctl_handler()
1598 error = mlx5e_rl_tx_limit_add(rl, value); in mlx5e_rl_sysctl_handler()
1602 error = mlx5e_rl_tx_limit_clr(rl, value); in mlx5e_rl_sysctl_handler()
1607 if (value > rl->param.tx_allowed_deviation_max) in mlx5e_rl_sysctl_handler()
1608 value = rl->param.tx_allowed_deviation_max; in mlx5e_rl_sysctl_handler()
1609 else if (value < rl->param.tx_allowed_deviation_min) in mlx5e_rl_sysctl_handler()
1610 value = rl->param.tx_allowed_deviation_min; in mlx5e_rl_sysctl_handler()
1612 MLX5E_RL_WLOCK(rl); in mlx5e_rl_sysctl_handler()
1613 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1614 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_sysctl_handler()
1619 if (value > rl->param.tx_burst_size_max) in mlx5e_rl_sysctl_handler()
1620 value = rl->param.tx_burst_size_max; in mlx5e_rl_sysctl_handler()
1621 else if (value < rl->param.tx_burst_size_min) in mlx5e_rl_sysctl_handler()
1622 value = rl->param.tx_burst_size_min; in mlx5e_rl_sysctl_handler()
1624 MLX5E_RL_WLOCK(rl); in mlx5e_rl_sysctl_handler()
1625 rl->param.arg[arg2] = value; in mlx5e_rl_sysctl_handler()
1626 MLX5E_RL_WUNLOCK(rl); in mlx5e_rl_sysctl_handler()
1638 mlx5e_rl_sysctl_add_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x, in mlx5e_rl_sysctl_add_u64_oid() argument
1648 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_sysctl_add_u64_oid()
1650 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc); in mlx5e_rl_sysctl_add_u64_oid()
1655 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_sysctl_add_u64_oid()
1657 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc); in mlx5e_rl_sysctl_add_u64_oid()
1661 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, in mlx5e_rl_sysctl_add_u64_oid()
1663 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc); in mlx5e_rl_sysctl_add_u64_oid()
1669 mlx5e_rl_sysctl_add_stats_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x, in mlx5e_rl_sysctl_add_stats_u64_oid() argument
1673 SYSCTL_ADD_U64(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, name, in mlx5e_rl_sysctl_add_stats_u64_oid()
1674 CTLFLAG_RD, &rl->stats.arg[x], 0, desc); in mlx5e_rl_sysctl_add_stats_u64_oid()