1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 3 #include <stdio.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <errno.h> 7 #include <net/if.h> 8 9 #include <ynl.h> 10 #include "netdev-user.h" 11 12 #include "main.h" 13 14 struct pp_stat { 15 unsigned int ifc; 16 17 struct { 18 unsigned int cnt; 19 size_t refs, bytes; 20 } live[2]; 21 22 size_t alloc_slow, alloc_fast, recycle_ring, recycle_cache; 23 }; 24 25 struct pp_stats_array { 26 unsigned int i, max; 27 struct pp_stat *s; 28 }; 29 30 static struct pp_stat *find_ifc(struct pp_stats_array *a, unsigned int ifindex) 31 { 32 unsigned int i; 33 34 for (i = 0; i < a->i; i++) { 35 if (a->s[i].ifc == ifindex) 36 return &a->s[i]; 37 } 38 39 a->i++; 40 if (a->i == a->max) { 41 a->max *= 2; 42 a->s = reallocarray(a->s, a->max, sizeof(*a->s)); 43 } 44 a->s[i].ifc = ifindex; 45 return &a->s[i]; 46 } 47 48 static void count_pool(struct pp_stat *s, unsigned int l, 49 struct netdev_page_pool_get_rsp *pp) 50 { 51 s->live[l].cnt++; 52 if (pp->_present.inflight) 53 s->live[l].refs += pp->inflight; 54 if (pp->_present.inflight_mem) 55 s->live[l].bytes += pp->inflight_mem; 56 } 57 58 /* We don't know how many pages are sitting in cache and ring 59 * so we will under-count the recycling rate a bit. 60 */ 61 static void print_json_recycling_stats(struct pp_stat *s) 62 { 63 double recycle; 64 65 if (s->alloc_fast + s->alloc_slow) { 66 recycle = (double)(s->recycle_ring + s->recycle_cache) / 67 (s->alloc_fast + s->alloc_slow) * 100; 68 jsonw_float_field(json_wtr, "recycling_pct", recycle); 69 } 70 71 jsonw_name(json_wtr, "alloc"); 72 jsonw_start_object(json_wtr); 73 jsonw_uint_field(json_wtr, "slow", s->alloc_slow); 74 jsonw_uint_field(json_wtr, "fast", s->alloc_fast); 75 jsonw_end_object(json_wtr); 76 77 jsonw_name(json_wtr, "recycle"); 78 jsonw_start_object(json_wtr); 79 jsonw_uint_field(json_wtr, "ring", s->recycle_ring); 80 jsonw_uint_field(json_wtr, "cache", s->recycle_cache); 81 jsonw_end_object(json_wtr); 82 } 83 84 static void print_plain_recycling_stats(struct pp_stat *s) 85 { 86 double recycle; 87 88 if (s->alloc_fast + s->alloc_slow) { 89 recycle = (double)(s->recycle_ring + s->recycle_cache) / 90 (s->alloc_fast + s->alloc_slow) * 100; 91 printf("recycling: %.1lf%% (alloc: %zu:%zu recycle: %zu:%zu)", 92 recycle, s->alloc_slow, s->alloc_fast, 93 s->recycle_ring, s->recycle_cache); 94 } 95 } 96 97 static void print_json_stats(struct pp_stats_array *a) 98 { 99 jsonw_start_array(json_wtr); 100 101 for (unsigned int i = 0; i < a->i; i++) { 102 char ifname[IF_NAMESIZE]; 103 struct pp_stat *s = &a->s[i]; 104 const char *name; 105 106 jsonw_start_object(json_wtr); 107 108 if (!s->ifc) { 109 jsonw_string_field(json_wtr, "ifname", "<orphan>"); 110 jsonw_uint_field(json_wtr, "ifindex", 0); 111 } else { 112 name = if_indextoname(s->ifc, ifname); 113 if (name) 114 jsonw_string_field(json_wtr, "ifname", name); 115 jsonw_uint_field(json_wtr, "ifindex", s->ifc); 116 } 117 118 jsonw_uint_field(json_wtr, "page_pools", s->live[1].cnt); 119 jsonw_uint_field(json_wtr, "zombies", s->live[0].cnt); 120 121 jsonw_name(json_wtr, "live"); 122 jsonw_start_object(json_wtr); 123 jsonw_uint_field(json_wtr, "refs", s->live[1].refs); 124 jsonw_uint_field(json_wtr, "bytes", s->live[1].bytes); 125 jsonw_end_object(json_wtr); 126 127 jsonw_name(json_wtr, "zombie"); 128 jsonw_start_object(json_wtr); 129 jsonw_uint_field(json_wtr, "refs", s->live[0].refs); 130 jsonw_uint_field(json_wtr, "bytes", s->live[0].bytes); 131 jsonw_end_object(json_wtr); 132 133 if (s->alloc_fast || s->alloc_slow) 134 print_json_recycling_stats(s); 135 136 jsonw_end_object(json_wtr); 137 } 138 139 jsonw_end_array(json_wtr); 140 } 141 142 static void print_plain_stats(struct pp_stats_array *a) 143 { 144 for (unsigned int i = 0; i < a->i; i++) { 145 char ifname[IF_NAMESIZE]; 146 struct pp_stat *s = &a->s[i]; 147 const char *name; 148 149 if (!s->ifc) { 150 printf("<orphan>\t"); 151 } else { 152 name = if_indextoname(s->ifc, ifname); 153 if (name) 154 printf("%8s", name); 155 printf("[%u]\t", s->ifc); 156 } 157 158 printf("page pools: %u (zombies: %u)\n", 159 s->live[1].cnt, s->live[0].cnt); 160 printf("\t\trefs: %zu bytes: %zu (refs: %zu bytes: %zu)\n", 161 s->live[1].refs, s->live[1].bytes, 162 s->live[0].refs, s->live[0].bytes); 163 164 if (s->alloc_fast || s->alloc_slow) { 165 printf("\t\t"); 166 print_plain_recycling_stats(s); 167 printf("\n"); 168 } 169 } 170 } 171 172 static bool 173 find_pool_stat_in_list(struct netdev_page_pool_stats_get_list *pp_stats, 174 __u64 pool_id, struct pp_stat *pstat) 175 { 176 ynl_dump_foreach(pp_stats, pp) { 177 if (!pp->_present.info || !pp->info._present.id) 178 continue; 179 if (pp->info.id != pool_id) 180 continue; 181 182 memset(pstat, 0, sizeof(*pstat)); 183 if (pp->_present.alloc_fast) 184 pstat->alloc_fast = pp->alloc_fast; 185 if (pp->_present.alloc_refill) 186 pstat->alloc_fast += pp->alloc_refill; 187 if (pp->_present.alloc_slow) 188 pstat->alloc_slow = pp->alloc_slow; 189 if (pp->_present.recycle_ring) 190 pstat->recycle_ring = pp->recycle_ring; 191 if (pp->_present.recycle_cached) 192 pstat->recycle_cache = pp->recycle_cached; 193 return true; 194 } 195 return false; 196 } 197 198 static void 199 print_json_pool_list(struct netdev_page_pool_get_list *pools, 200 struct netdev_page_pool_stats_get_list *pp_stats, 201 bool zombies_only) 202 { 203 jsonw_start_array(json_wtr); 204 205 ynl_dump_foreach(pools, pp) { 206 char ifname[IF_NAMESIZE]; 207 struct pp_stat pstat; 208 const char *name; 209 210 if (zombies_only && !pp->_present.detach_time) 211 continue; 212 213 jsonw_start_object(json_wtr); 214 215 jsonw_uint_field(json_wtr, "id", pp->id); 216 217 if (pp->_present.ifindex) { 218 name = if_indextoname(pp->ifindex, ifname); 219 if (name) 220 jsonw_string_field(json_wtr, "ifname", name); 221 jsonw_uint_field(json_wtr, "ifindex", pp->ifindex); 222 } 223 224 if (pp->_present.napi_id) 225 jsonw_uint_field(json_wtr, "napi_id", pp->napi_id); 226 227 if (pp->_present.inflight) 228 jsonw_uint_field(json_wtr, "refs", pp->inflight); 229 230 if (pp->_present.inflight_mem) 231 jsonw_uint_field(json_wtr, "bytes", pp->inflight_mem); 232 233 if (pp->_present.detach_time) 234 jsonw_uint_field(json_wtr, "detach_time", pp->detach_time); 235 236 if (pp->_present.dmabuf) 237 jsonw_uint_field(json_wtr, "dmabuf", pp->dmabuf); 238 239 if (find_pool_stat_in_list(pp_stats, pp->id, &pstat) && 240 (pstat.alloc_fast || pstat.alloc_slow)) 241 print_json_recycling_stats(&pstat); 242 243 jsonw_end_object(json_wtr); 244 } 245 246 jsonw_end_array(json_wtr); 247 } 248 249 static void 250 print_plain_pool_list(struct netdev_page_pool_get_list *pools, 251 struct netdev_page_pool_stats_get_list *pp_stats, 252 bool zombies_only) 253 { 254 ynl_dump_foreach(pools, pp) { 255 char ifname[IF_NAMESIZE]; 256 struct pp_stat pstat; 257 const char *name; 258 259 if (zombies_only && !pp->_present.detach_time) 260 continue; 261 262 printf("pool id: %llu", pp->id); 263 264 if (pp->_present.ifindex) { 265 name = if_indextoname(pp->ifindex, ifname); 266 if (name) 267 printf(" dev: %s", name); 268 printf("[%u]", pp->ifindex); 269 } 270 271 if (pp->_present.napi_id) 272 printf(" napi: %llu", pp->napi_id); 273 274 printf("\n"); 275 276 if (pp->_present.inflight || pp->_present.inflight_mem) { 277 printf(" inflight:"); 278 if (pp->_present.inflight) 279 printf(" %llu pages", pp->inflight); 280 if (pp->_present.inflight_mem) 281 printf(" %llu bytes", pp->inflight_mem); 282 printf("\n"); 283 } 284 285 if (pp->_present.detach_time) 286 printf(" detached: %llu\n", pp->detach_time); 287 288 if (pp->_present.dmabuf) 289 printf(" dmabuf: %u\n", pp->dmabuf); 290 291 if (find_pool_stat_in_list(pp_stats, pp->id, &pstat) && 292 (pstat.alloc_fast || pstat.alloc_slow)) { 293 printf(" "); 294 print_plain_recycling_stats(&pstat); 295 printf("\n"); 296 } 297 } 298 } 299 300 static void aggregate_device_stats(struct pp_stats_array *a, 301 struct netdev_page_pool_get_list *pools, 302 struct netdev_page_pool_stats_get_list *pp_stats) 303 { 304 ynl_dump_foreach(pools, pp) { 305 struct pp_stat *s = find_ifc(a, pp->ifindex); 306 307 count_pool(s, 1, pp); 308 if (pp->_present.detach_time) 309 count_pool(s, 0, pp); 310 } 311 312 ynl_dump_foreach(pp_stats, pp) { 313 struct pp_stat *s = find_ifc(a, pp->info.ifindex); 314 315 if (pp->_present.alloc_fast) 316 s->alloc_fast += pp->alloc_fast; 317 if (pp->_present.alloc_refill) 318 s->alloc_fast += pp->alloc_refill; 319 if (pp->_present.alloc_slow) 320 s->alloc_slow += pp->alloc_slow; 321 if (pp->_present.recycle_ring) 322 s->recycle_ring += pp->recycle_ring; 323 if (pp->_present.recycle_cached) 324 s->recycle_cache += pp->recycle_cached; 325 } 326 } 327 328 static int do_stats(int argc, char **argv) 329 { 330 struct netdev_page_pool_stats_get_list *pp_stats; 331 struct netdev_page_pool_get_list *pools; 332 enum { 333 GROUP_BY_DEVICE, 334 GROUP_BY_POOL, 335 } group_by = GROUP_BY_DEVICE; 336 bool zombies_only = false; 337 struct pp_stats_array a = {}; 338 struct ynl_error yerr; 339 struct ynl_sock *ys; 340 int ret = 0; 341 342 /* Parse options */ 343 while (argc > 0) { 344 if (is_prefix(*argv, "group-by")) { 345 NEXT_ARG(); 346 347 if (!REQ_ARGS(1)) 348 return -1; 349 350 if (is_prefix(*argv, "device")) { 351 group_by = GROUP_BY_DEVICE; 352 } else if (is_prefix(*argv, "pp") || 353 is_prefix(*argv, "page-pool") || 354 is_prefix(*argv, "none")) { 355 group_by = GROUP_BY_POOL; 356 } else { 357 p_err("invalid group-by value '%s'", *argv); 358 return -1; 359 } 360 NEXT_ARG(); 361 } else if (is_prefix(*argv, "zombies")) { 362 zombies_only = true; 363 group_by = GROUP_BY_POOL; 364 NEXT_ARG(); 365 } else { 366 p_err("unknown option '%s'", *argv); 367 return -1; 368 } 369 } 370 371 ys = ynl_sock_create(&ynl_netdev_family, &yerr); 372 if (!ys) { 373 p_err("YNL: %s", yerr.msg); 374 return -1; 375 } 376 377 pools = netdev_page_pool_get_dump(ys); 378 if (!pools) { 379 p_err("failed to get page pools: %s", ys->err.msg); 380 ret = -1; 381 goto exit_close; 382 } 383 384 pp_stats = netdev_page_pool_stats_get_dump(ys); 385 if (!pp_stats) { 386 p_err("failed to get page pool stats: %s", ys->err.msg); 387 ret = -1; 388 goto exit_free_pp_list; 389 } 390 391 /* If grouping by pool, print individual pools */ 392 if (group_by == GROUP_BY_POOL) { 393 if (json_output) 394 print_json_pool_list(pools, pp_stats, zombies_only); 395 else 396 print_plain_pool_list(pools, pp_stats, zombies_only); 397 } else { 398 /* Aggregated stats mode (group-by device) */ 399 a.max = 64; 400 a.s = calloc(a.max, sizeof(*a.s)); 401 if (!a.s) { 402 p_err("failed to allocate stats array"); 403 ret = -1; 404 goto exit_free_stats_list; 405 } 406 407 aggregate_device_stats(&a, pools, pp_stats); 408 409 if (json_output) 410 print_json_stats(&a); 411 else 412 print_plain_stats(&a); 413 414 free(a.s); 415 } 416 417 exit_free_stats_list: 418 netdev_page_pool_stats_get_list_free(pp_stats); 419 exit_free_pp_list: 420 netdev_page_pool_get_list_free(pools); 421 exit_close: 422 ynl_sock_destroy(ys); 423 return ret; 424 } 425 426 static int do_help(int argc __attribute__((unused)), 427 char **argv __attribute__((unused))) 428 { 429 if (json_output) { 430 jsonw_null(json_wtr); 431 return 0; 432 } 433 434 fprintf(stderr, 435 "Usage: %s page-pool { COMMAND | help }\n" 436 " %s page-pool stats [ OPTIONS ]\n" 437 "\n" 438 " OPTIONS := { group-by { device | page-pool | none } | zombies }\n" 439 "\n" 440 " stats - Display page pool statistics\n" 441 " stats group-by device - Group statistics by network device (default)\n" 442 " stats group-by page-pool | pp | none\n" 443 " - Show individual page pool details (no grouping)\n" 444 " stats zombies - Show only zombie page pools (detached but with\n" 445 " pages in flight). Implies group-by page-pool.\n" 446 "", 447 bin_name, bin_name); 448 449 return 0; 450 } 451 452 static const struct cmd page_pool_cmds[] = { 453 { "help", do_help }, 454 { "stats", do_stats }, 455 { 0 } 456 }; 457 458 int do_page_pool(int argc, char **argv) 459 { 460 return cmd_select(page_pool_cmds, argc, argv, do_help); 461 } 462