1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/gen_stats.c 4 * 5 * Authors: Thomas Graf <tgraf@suug.ch> 6 * Jamal Hadi Salim 7 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * 9 * See Documentation/networking/gen_stats.rst 10 */ 11 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/interrupt.h> 16 #include <linux/socket.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/gen_stats.h> 19 #include <net/netlink.h> 20 #include <net/gen_stats.h> 21 #include <net/sch_generic.h> 22 23 static inline int 24 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) 25 { 26 if (nla_put_64bit(d->skb, type, size, buf, padattr)) 27 goto nla_put_failure; 28 return 0; 29 30 nla_put_failure: 31 if (d->lock) 32 spin_unlock_bh(d->lock); 33 kfree(d->xstats); 34 d->xstats = NULL; 35 d->xstats_len = 0; 36 return -1; 37 } 38 39 /** 40 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode 41 * @skb: socket buffer to put statistics TLVs into 42 * @type: TLV type for top level statistic TLV 43 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV 44 * @xstats_type: TLV type for backward compatibility xstats TLV 45 * @lock: statistics lock 46 * @d: dumping handle 47 * @padattr: padding attribute 48 * 49 * Initializes the dumping handle, grabs the statistic lock and appends 50 * an empty TLV header to the socket buffer for use a container for all 51 * other statistic TLVS. 52 * 53 * The dumping handle is marked to be in backward compatibility mode telling 54 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats. 55 * 56 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. 57 */ 58 int 59 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, 60 int xstats_type, spinlock_t *lock, 61 struct gnet_dump *d, int padattr) 62 __acquires(lock) 63 { 64 memset(d, 0, sizeof(*d)); 65 66 if (type) 67 d->tail = (struct nlattr *)skb_tail_pointer(skb); 68 d->skb = skb; 69 d->compat_tc_stats = tc_stats_type; 70 d->compat_xstats = xstats_type; 71 d->padattr = padattr; 72 if (lock) { 73 d->lock = lock; 74 spin_lock_bh(lock); 75 } 76 if (d->tail) { 77 int ret = gnet_stats_copy(d, type, NULL, 0, padattr); 78 79 /* The initial attribute added in gnet_stats_copy() may be 80 * preceded by a padding attribute, in which case d->tail will 81 * end up pointing at the padding instead of the real attribute. 82 * Fix this so gnet_stats_finish_copy() adjusts the length of 83 * the right attribute. 84 */ 85 if (ret == 0 && d->tail->nla_type == padattr) 86 d->tail = (struct nlattr *)((char *)d->tail + 87 NLA_ALIGN(d->tail->nla_len)); 88 return ret; 89 } 90 91 return 0; 92 } 93 EXPORT_SYMBOL(gnet_stats_start_copy_compat); 94 95 /** 96 * gnet_stats_start_copy - start dumping procedure in compatibility mode 97 * @skb: socket buffer to put statistics TLVs into 98 * @type: TLV type for top level statistic TLV 99 * @lock: statistics lock 100 * @d: dumping handle 101 * @padattr: padding attribute 102 * 103 * Initializes the dumping handle, grabs the statistic lock and appends 104 * an empty TLV header to the socket buffer for use a container for all 105 * other statistic TLVS. 106 * 107 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient. 108 */ 109 int 110 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, 111 struct gnet_dump *d, int padattr) 112 { 113 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr); 114 } 115 EXPORT_SYMBOL(gnet_stats_start_copy); 116 117 /* Must not be inlined, due to u64_stats seqcount_t lockdep key */ 118 void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b) 119 { 120 u64_stats_set(&b->bytes, 0); 121 u64_stats_set(&b->packets, 0); 122 u64_stats_init(&b->syncp); 123 } 124 EXPORT_SYMBOL(gnet_stats_basic_sync_init); 125 126 static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats, 127 struct gnet_stats_basic_sync __percpu *cpu) 128 { 129 u64 t_bytes = 0, t_packets = 0; 130 int i; 131 132 for_each_possible_cpu(i) { 133 struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i); 134 unsigned int start; 135 u64 bytes, packets; 136 137 do { 138 start = u64_stats_fetch_begin_irq(&bcpu->syncp); 139 bytes = u64_stats_read(&bcpu->bytes); 140 packets = u64_stats_read(&bcpu->packets); 141 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); 142 143 t_bytes += bytes; 144 t_packets += packets; 145 } 146 _bstats_update(bstats, t_bytes, t_packets); 147 } 148 149 void gnet_stats_add_basic(const seqcount_t *running, 150 struct gnet_stats_basic_sync *bstats, 151 struct gnet_stats_basic_sync __percpu *cpu, 152 struct gnet_stats_basic_sync *b) 153 { 154 unsigned int seq; 155 u64 bytes = 0; 156 u64 packets = 0; 157 158 if (cpu) { 159 gnet_stats_add_basic_cpu(bstats, cpu); 160 return; 161 } 162 do { 163 if (running) 164 seq = read_seqcount_begin(running); 165 bytes = u64_stats_read(&b->bytes); 166 packets = u64_stats_read(&b->packets); 167 } while (running && read_seqcount_retry(running, seq)); 168 169 _bstats_update(bstats, bytes, packets); 170 } 171 EXPORT_SYMBOL(gnet_stats_add_basic); 172 173 static int 174 ___gnet_stats_copy_basic(const seqcount_t *running, 175 struct gnet_dump *d, 176 struct gnet_stats_basic_sync __percpu *cpu, 177 struct gnet_stats_basic_sync *b, 178 int type) 179 { 180 struct gnet_stats_basic_sync bstats; 181 u64 bstats_bytes, bstats_packets; 182 183 gnet_stats_basic_sync_init(&bstats); 184 gnet_stats_add_basic(running, &bstats, cpu, b); 185 186 bstats_bytes = u64_stats_read(&bstats.bytes); 187 bstats_packets = u64_stats_read(&bstats.packets); 188 189 if (d->compat_tc_stats && type == TCA_STATS_BASIC) { 190 d->tc_stats.bytes = bstats_bytes; 191 d->tc_stats.packets = bstats_packets; 192 } 193 194 if (d->tail) { 195 struct gnet_stats_basic sb; 196 int res; 197 198 memset(&sb, 0, sizeof(sb)); 199 sb.bytes = bstats_bytes; 200 sb.packets = bstats_packets; 201 res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD); 202 if (res < 0 || sb.packets == bstats_packets) 203 return res; 204 /* emit 64bit stats only if needed */ 205 return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets, 206 sizeof(bstats_packets), TCA_STATS_PAD); 207 } 208 return 0; 209 } 210 211 /** 212 * gnet_stats_copy_basic - copy basic statistics into statistic TLV 213 * @running: seqcount_t pointer 214 * @d: dumping handle 215 * @cpu: copy statistic per cpu 216 * @b: basic statistics 217 * 218 * Appends the basic statistics to the top level TLV created by 219 * gnet_stats_start_copy(). 220 * 221 * Returns 0 on success or -1 with the statistic lock released 222 * if the room in the socket buffer was not sufficient. 223 */ 224 int 225 gnet_stats_copy_basic(const seqcount_t *running, 226 struct gnet_dump *d, 227 struct gnet_stats_basic_sync __percpu *cpu, 228 struct gnet_stats_basic_sync *b) 229 { 230 return ___gnet_stats_copy_basic(running, d, cpu, b, 231 TCA_STATS_BASIC); 232 } 233 EXPORT_SYMBOL(gnet_stats_copy_basic); 234 235 /** 236 * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV 237 * @running: seqcount_t pointer 238 * @d: dumping handle 239 * @cpu: copy statistic per cpu 240 * @b: basic statistics 241 * 242 * Appends the basic statistics to the top level TLV created by 243 * gnet_stats_start_copy(). 244 * 245 * Returns 0 on success or -1 with the statistic lock released 246 * if the room in the socket buffer was not sufficient. 247 */ 248 int 249 gnet_stats_copy_basic_hw(const seqcount_t *running, 250 struct gnet_dump *d, 251 struct gnet_stats_basic_sync __percpu *cpu, 252 struct gnet_stats_basic_sync *b) 253 { 254 return ___gnet_stats_copy_basic(running, d, cpu, b, 255 TCA_STATS_BASIC_HW); 256 } 257 EXPORT_SYMBOL(gnet_stats_copy_basic_hw); 258 259 /** 260 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV 261 * @d: dumping handle 262 * @rate_est: rate estimator 263 * 264 * Appends the rate estimator statistics to the top level TLV created by 265 * gnet_stats_start_copy(). 266 * 267 * Returns 0 on success or -1 with the statistic lock released 268 * if the room in the socket buffer was not sufficient. 269 */ 270 int 271 gnet_stats_copy_rate_est(struct gnet_dump *d, 272 struct net_rate_estimator __rcu **rate_est) 273 { 274 struct gnet_stats_rate_est64 sample; 275 struct gnet_stats_rate_est est; 276 int res; 277 278 if (!gen_estimator_read(rate_est, &sample)) 279 return 0; 280 est.bps = min_t(u64, UINT_MAX, sample.bps); 281 /* we have some time before reaching 2^32 packets per second */ 282 est.pps = sample.pps; 283 284 if (d->compat_tc_stats) { 285 d->tc_stats.bps = est.bps; 286 d->tc_stats.pps = est.pps; 287 } 288 289 if (d->tail) { 290 res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est), 291 TCA_STATS_PAD); 292 if (res < 0 || est.bps == sample.bps) 293 return res; 294 /* emit 64bit stats only if needed */ 295 return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample, 296 sizeof(sample), TCA_STATS_PAD); 297 } 298 299 return 0; 300 } 301 EXPORT_SYMBOL(gnet_stats_copy_rate_est); 302 303 static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats, 304 const struct gnet_stats_queue __percpu *q) 305 { 306 int i; 307 308 for_each_possible_cpu(i) { 309 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); 310 311 qstats->qlen += qcpu->backlog; 312 qstats->backlog += qcpu->backlog; 313 qstats->drops += qcpu->drops; 314 qstats->requeues += qcpu->requeues; 315 qstats->overlimits += qcpu->overlimits; 316 } 317 } 318 319 void gnet_stats_add_queue(struct gnet_stats_queue *qstats, 320 const struct gnet_stats_queue __percpu *cpu, 321 const struct gnet_stats_queue *q) 322 { 323 if (cpu) { 324 gnet_stats_add_queue_cpu(qstats, cpu); 325 } else { 326 qstats->qlen += q->qlen; 327 qstats->backlog += q->backlog; 328 qstats->drops += q->drops; 329 qstats->requeues += q->requeues; 330 qstats->overlimits += q->overlimits; 331 } 332 } 333 EXPORT_SYMBOL(gnet_stats_add_queue); 334 335 /** 336 * gnet_stats_copy_queue - copy queue statistics into statistics TLV 337 * @d: dumping handle 338 * @cpu_q: per cpu queue statistics 339 * @q: queue statistics 340 * @qlen: queue length statistics 341 * 342 * Appends the queue statistics to the top level TLV created by 343 * gnet_stats_start_copy(). Using per cpu queue statistics if 344 * they are available. 345 * 346 * Returns 0 on success or -1 with the statistic lock released 347 * if the room in the socket buffer was not sufficient. 348 */ 349 int 350 gnet_stats_copy_queue(struct gnet_dump *d, 351 struct gnet_stats_queue __percpu *cpu_q, 352 struct gnet_stats_queue *q, __u32 qlen) 353 { 354 struct gnet_stats_queue qstats = {0}; 355 356 gnet_stats_add_queue(&qstats, cpu_q, q); 357 qstats.qlen = qlen; 358 359 if (d->compat_tc_stats) { 360 d->tc_stats.drops = qstats.drops; 361 d->tc_stats.qlen = qstats.qlen; 362 d->tc_stats.backlog = qstats.backlog; 363 d->tc_stats.overlimits = qstats.overlimits; 364 } 365 366 if (d->tail) 367 return gnet_stats_copy(d, TCA_STATS_QUEUE, 368 &qstats, sizeof(qstats), 369 TCA_STATS_PAD); 370 371 return 0; 372 } 373 EXPORT_SYMBOL(gnet_stats_copy_queue); 374 375 /** 376 * gnet_stats_copy_app - copy application specific statistics into statistics TLV 377 * @d: dumping handle 378 * @st: application specific statistics data 379 * @len: length of data 380 * 381 * Appends the application specific statistics to the top level TLV created by 382 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping 383 * handle is in backward compatibility mode. 384 * 385 * Returns 0 on success or -1 with the statistic lock released 386 * if the room in the socket buffer was not sufficient. 387 */ 388 int 389 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) 390 { 391 if (d->compat_xstats) { 392 d->xstats = kmemdup(st, len, GFP_ATOMIC); 393 if (!d->xstats) 394 goto err_out; 395 d->xstats_len = len; 396 } 397 398 if (d->tail) 399 return gnet_stats_copy(d, TCA_STATS_APP, st, len, 400 TCA_STATS_PAD); 401 402 return 0; 403 404 err_out: 405 if (d->lock) 406 spin_unlock_bh(d->lock); 407 d->xstats_len = 0; 408 return -1; 409 } 410 EXPORT_SYMBOL(gnet_stats_copy_app); 411 412 /** 413 * gnet_stats_finish_copy - finish dumping procedure 414 * @d: dumping handle 415 * 416 * Corrects the length of the top level TLV to include all TLVs added 417 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs 418 * if gnet_stats_start_copy_compat() was used and releases the statistics 419 * lock. 420 * 421 * Returns 0 on success or -1 with the statistic lock released 422 * if the room in the socket buffer was not sufficient. 423 */ 424 int 425 gnet_stats_finish_copy(struct gnet_dump *d) 426 { 427 if (d->tail) 428 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; 429 430 if (d->compat_tc_stats) 431 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, 432 sizeof(d->tc_stats), d->padattr) < 0) 433 return -1; 434 435 if (d->compat_xstats && d->xstats) { 436 if (gnet_stats_copy(d, d->compat_xstats, d->xstats, 437 d->xstats_len, d->padattr) < 0) 438 return -1; 439 } 440 441 if (d->lock) 442 spin_unlock_bh(d->lock); 443 kfree(d->xstats); 444 d->xstats = NULL; 445 d->xstats_len = 0; 446 return 0; 447 } 448 EXPORT_SYMBOL(gnet_stats_finish_copy); 449