1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Arm Statistical Profiling Extensions (SPE) support 4 * Copyright (c) 2017-2018, Arm Ltd. 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/bitops.h> 10 #include <linux/log2.h> 11 #include <linux/string.h> 12 #include <linux/zalloc.h> 13 #include <time.h> 14 15 #include "../../../util/cpumap.h" 16 #include "../../../util/event.h" 17 #include "../../../util/evsel.h" 18 #include "../../../util/evsel_config.h" 19 #include "../../../util/evlist.h" 20 #include "../../../util/session.h" 21 #include <internal/lib.h> // page_size 22 #include "../../../util/pmu.h" 23 #include "../../../util/debug.h" 24 #include "../../../util/auxtrace.h" 25 #include "../../../util/record.h" 26 #include "../../../util/header.h" 27 #include "../../../util/arm-spe.h" 28 #include <tools/libc_compat.h> // reallocarray 29 30 #define ARM_SPE_CPU_MAGIC 0x1010101010101010ULL 31 32 #define KiB(x) ((x) * 1024) 33 #define MiB(x) ((x) * 1024 * 1024) 34 35 struct arm_spe_recording { 36 struct auxtrace_record itr; 37 struct perf_pmu *arm_spe_pmu; 38 struct evlist *evlist; 39 int wrapped_cnt; 40 bool *wrapped; 41 }; 42 43 /* Iterate config list to detect if the "freq" parameter is set */ 44 static bool arm_spe_is_set_freq(struct evsel *evsel) 45 { 46 struct evsel_config_term *term; 47 48 list_for_each_entry(term, &evsel->config_terms, list) { 49 if (term->type == EVSEL__CONFIG_TERM_FREQ) 50 return true; 51 } 52 53 return false; 54 } 55 56 /* 57 * arm_spe_find_cpus() returns a new cpu map, and the caller should invoke 58 * perf_cpu_map__put() to release the map after use. 59 */ 60 static struct perf_cpu_map *arm_spe_find_cpus(struct evlist *evlist) 61 { 62 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; 63 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 64 struct perf_cpu_map *intersect_cpus; 65 66 /* cpu map is not "any" CPU , we have specific CPUs to work with */ 67 if (!perf_cpu_map__has_any_cpu(event_cpus)) { 68 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); 69 perf_cpu_map__put(online_cpus); 70 /* Event can be "any" CPU so count all CPUs. */ 71 } else { 72 intersect_cpus = online_cpus; 73 } 74 75 return intersect_cpus; 76 } 77 78 static size_t 79 arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused, 80 struct evlist *evlist) 81 { 82 struct perf_cpu_map *cpu_map = arm_spe_find_cpus(evlist); 83 size_t size; 84 85 if (!cpu_map) 86 return 0; 87 88 size = ARM_SPE_AUXTRACE_PRIV_MAX + 89 ARM_SPE_CPU_PRIV_MAX * perf_cpu_map__nr(cpu_map); 90 size *= sizeof(u64); 91 92 perf_cpu_map__put(cpu_map); 93 return size; 94 } 95 96 static int arm_spe_save_cpu_header(struct auxtrace_record *itr, 97 struct perf_cpu cpu, __u64 data[]) 98 { 99 struct arm_spe_recording *sper = 100 container_of(itr, struct arm_spe_recording, itr); 101 struct perf_pmu *pmu = NULL; 102 char *cpuid = NULL; 103 u64 val; 104 105 /* Read CPU MIDR */ 106 cpuid = get_cpuid_allow_env_override(cpu); 107 if (!cpuid) 108 return -ENOMEM; 109 val = strtol(cpuid, NULL, 16); 110 111 data[ARM_SPE_MAGIC] = ARM_SPE_CPU_MAGIC; 112 data[ARM_SPE_CPU] = cpu.cpu; 113 data[ARM_SPE_CPU_NR_PARAMS] = ARM_SPE_CPU_PRIV_MAX - ARM_SPE_CPU_MIDR; 114 data[ARM_SPE_CPU_MIDR] = val; 115 116 /* Find the associate Arm SPE PMU for the CPU */ 117 if (perf_cpu_map__has(sper->arm_spe_pmu->cpus, cpu)) 118 pmu = sper->arm_spe_pmu; 119 120 if (!pmu) { 121 /* No Arm SPE PMU is found */ 122 data[ARM_SPE_CPU_PMU_TYPE] = ULLONG_MAX; 123 data[ARM_SPE_CAP_MIN_IVAL] = 0; 124 } else { 125 data[ARM_SPE_CPU_PMU_TYPE] = pmu->type; 126 127 if (perf_pmu__scan_file(pmu, "caps/min_interval", "%lu", &val) != 1) 128 val = 0; 129 data[ARM_SPE_CAP_MIN_IVAL] = val; 130 } 131 132 free(cpuid); 133 return ARM_SPE_CPU_PRIV_MAX; 134 } 135 136 static int arm_spe_info_fill(struct auxtrace_record *itr, 137 struct perf_session *session, 138 struct perf_record_auxtrace_info *auxtrace_info, 139 size_t priv_size) 140 { 141 int i, ret; 142 size_t offset; 143 struct arm_spe_recording *sper = 144 container_of(itr, struct arm_spe_recording, itr); 145 struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu; 146 struct perf_cpu_map *cpu_map; 147 struct perf_cpu cpu; 148 __u64 *data; 149 150 if (priv_size != arm_spe_info_priv_size(itr, session->evlist)) 151 return -EINVAL; 152 153 if (!session->evlist->core.nr_mmaps) 154 return -EINVAL; 155 156 cpu_map = arm_spe_find_cpus(session->evlist); 157 if (!cpu_map) 158 return -EINVAL; 159 160 auxtrace_info->type = PERF_AUXTRACE_ARM_SPE; 161 auxtrace_info->priv[ARM_SPE_HEADER_VERSION] = ARM_SPE_HEADER_CURRENT_VERSION; 162 auxtrace_info->priv[ARM_SPE_HEADER_SIZE] = 163 ARM_SPE_AUXTRACE_PRIV_MAX - ARM_SPE_HEADER_VERSION; 164 auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2] = arm_spe_pmu->type; 165 auxtrace_info->priv[ARM_SPE_CPUS_NUM] = perf_cpu_map__nr(cpu_map); 166 167 offset = ARM_SPE_AUXTRACE_PRIV_MAX; 168 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { 169 assert(offset < priv_size); 170 data = &auxtrace_info->priv[offset]; 171 ret = arm_spe_save_cpu_header(itr, cpu, data); 172 if (ret < 0) 173 goto out; 174 offset += ret; 175 } 176 177 ret = 0; 178 out: 179 perf_cpu_map__put(cpu_map); 180 return ret; 181 } 182 183 static void 184 arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts, 185 bool privileged) 186 { 187 /* 188 * The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap size nor 189 * snapshot size is specified, then the default is 4MiB for privileged users, 128KiB for 190 * unprivileged users. 191 * 192 * The default auxtrace mmap size is 4MiB/page_size for privileged users, 128KiB for 193 * unprivileged users. If an unprivileged user does not specify mmap pages, the mmap pages 194 * will be reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the 195 * user is likely to get an error as they exceed their mlock limmit. 196 */ 197 198 /* 199 * No size were given to '-S' or '-m,', so go with the default 200 */ 201 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { 202 if (privileged) { 203 opts->auxtrace_mmap_pages = MiB(4) / page_size; 204 } else { 205 opts->auxtrace_mmap_pages = KiB(128) / page_size; 206 if (opts->mmap_pages == UINT_MAX) 207 opts->mmap_pages = KiB(256) / page_size; 208 } 209 } else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) { 210 opts->mmap_pages = KiB(256) / page_size; 211 } 212 213 /* 214 * '-m,xyz' was specified but no snapshot size, so make the snapshot size as big as the 215 * auxtrace mmap area. 216 */ 217 if (!opts->auxtrace_snapshot_size) 218 opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size; 219 220 /* 221 * '-Sxyz' was specified but no auxtrace mmap area, so make the auxtrace mmap area big 222 * enough to fit the requested snapshot size. 223 */ 224 if (!opts->auxtrace_mmap_pages) { 225 size_t sz = opts->auxtrace_snapshot_size; 226 227 sz = round_up(sz, page_size) / page_size; 228 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 229 } 230 } 231 232 static __u64 arm_spe_pmu__sample_period(const struct perf_pmu *arm_spe_pmu) 233 { 234 static __u64 sample_period; 235 236 if (sample_period) 237 return sample_period; 238 239 /* 240 * If kernel driver doesn't advertise a minimum, 241 * use max allowable by PMSIDR_EL1.INTERVAL 242 */ 243 if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu", 244 &sample_period) != 1) { 245 pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n"); 246 sample_period = 4096; 247 } 248 return sample_period; 249 } 250 251 static void arm_spe_setup_evsel(struct evsel *evsel, struct perf_cpu_map *cpus) 252 { 253 u64 bit; 254 255 evsel->core.attr.freq = 0; 256 evsel->core.attr.sample_period = arm_spe_pmu__sample_period(evsel->pmu); 257 evsel->needs_auxtrace_mmap = true; 258 259 /* 260 * To obtain the auxtrace buffer file descriptor, the auxtrace event 261 * must come first. 262 */ 263 evlist__to_front(evsel->evlist, evsel); 264 265 /* 266 * In the case of per-cpu mmaps, sample CPU for AUX event; 267 * also enable the timestamp tracing for samples correlation. 268 */ 269 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { 270 evsel__set_sample_bit(evsel, CPU); 271 evsel__set_config_if_unset(evsel->pmu, evsel, "ts_enable", 1); 272 } 273 274 /* 275 * Set this only so that perf report knows that SPE generates memory info. It has no effect 276 * on the opening of the event or the SPE data produced. 277 */ 278 evsel__set_sample_bit(evsel, DATA_SRC); 279 280 /* 281 * The PHYS_ADDR flag does not affect the driver behaviour, it is used to 282 * inform that the resulting output's SPE samples contain physical addresses 283 * where applicable. 284 */ 285 bit = perf_pmu__format_bits(evsel->pmu, "pa_enable"); 286 if (evsel->core.attr.config & bit) 287 evsel__set_sample_bit(evsel, PHYS_ADDR); 288 } 289 290 static int arm_spe_setup_aux_buffer(struct record_opts *opts) 291 { 292 bool privileged = perf_event_paranoid_check(-1); 293 294 /* 295 * we are in snapshot mode. 296 */ 297 if (opts->auxtrace_snapshot_mode) { 298 /* 299 * Command arguments '-Sxyz' and/or '-m,xyz' are missing, so fill those in with 300 * default values. 301 */ 302 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) 303 arm_spe_snapshot_resolve_auxtrace_defaults(opts, privileged); 304 305 /* 306 * Snapshot size can't be bigger than the auxtrace area. 307 */ 308 if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) { 309 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 310 opts->auxtrace_snapshot_size, 311 opts->auxtrace_mmap_pages * (size_t)page_size); 312 return -EINVAL; 313 } 314 315 /* 316 * Something went wrong somewhere - this shouldn't happen. 317 */ 318 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { 319 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 320 return -EINVAL; 321 } 322 323 pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME, 324 opts->auxtrace_snapshot_size); 325 } 326 327 /* We are in full trace mode but '-m,xyz' wasn't specified */ 328 if (!opts->auxtrace_mmap_pages) { 329 if (privileged) { 330 opts->auxtrace_mmap_pages = MiB(4) / page_size; 331 } else { 332 opts->auxtrace_mmap_pages = KiB(128) / page_size; 333 if (opts->mmap_pages == UINT_MAX) 334 opts->mmap_pages = KiB(256) / page_size; 335 } 336 } 337 338 /* Validate auxtrace_mmap_pages */ 339 if (opts->auxtrace_mmap_pages) { 340 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; 341 size_t min_sz = KiB(8); 342 343 if (sz < min_sz || !is_power_of_2(sz)) { 344 pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n", 345 min_sz / 1024); 346 return -EINVAL; 347 } 348 } 349 350 return 0; 351 } 352 353 static int arm_spe_setup_tracking_event(struct evlist *evlist, 354 struct record_opts *opts) 355 { 356 int err; 357 struct evsel *tracking_evsel; 358 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 359 360 /* Add dummy event to keep tracking */ 361 err = parse_event(evlist, "dummy:u"); 362 if (err) 363 return err; 364 365 tracking_evsel = evlist__last(evlist); 366 evlist__set_tracking_event(evlist, tracking_evsel); 367 368 tracking_evsel->core.attr.freq = 0; 369 tracking_evsel->core.attr.sample_period = 1; 370 371 /* In per-cpu case, always need the time of mmap events etc */ 372 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { 373 evsel__set_sample_bit(tracking_evsel, TIME); 374 evsel__set_sample_bit(tracking_evsel, CPU); 375 376 /* also track task context switch */ 377 if (!record_opts__no_switch_events(opts)) 378 tracking_evsel->core.attr.context_switch = 1; 379 } 380 381 return 0; 382 } 383 384 static int arm_spe_recording_options(struct auxtrace_record *itr, 385 struct evlist *evlist, 386 struct record_opts *opts) 387 { 388 struct arm_spe_recording *sper = 389 container_of(itr, struct arm_spe_recording, itr); 390 struct evsel *evsel, *tmp; 391 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 392 bool discard = false; 393 int err; 394 395 sper->evlist = evlist; 396 397 evlist__for_each_entry(evlist, evsel) { 398 if (evsel__is_aux_event(evsel)) { 399 if (!strstarts(evsel->pmu->name, ARM_SPE_PMU_NAME)) { 400 pr_err("Found unexpected auxtrace event: %s\n", 401 evsel->pmu->name); 402 return -EINVAL; 403 } 404 opts->full_auxtrace = true; 405 406 if (opts->user_freq != UINT_MAX || 407 arm_spe_is_set_freq(evsel)) { 408 pr_err("Arm SPE: Frequency is not supported. " 409 "Set period with -c option or PMU parameter (-e %s/period=NUM/).\n", 410 evsel->pmu->name); 411 return -EINVAL; 412 } 413 } 414 } 415 416 if (!opts->full_auxtrace) 417 return 0; 418 419 evlist__for_each_entry_safe(evlist, tmp, evsel) { 420 if (evsel__is_aux_event(evsel)) { 421 arm_spe_setup_evsel(evsel, cpus); 422 if (evsel->core.attr.config & 423 perf_pmu__format_bits(evsel->pmu, "discard")) 424 discard = true; 425 } 426 } 427 428 if (discard) 429 return 0; 430 431 err = arm_spe_setup_aux_buffer(opts); 432 if (err) 433 return err; 434 435 return arm_spe_setup_tracking_event(evlist, opts); 436 } 437 438 static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 439 struct record_opts *opts, 440 const char *str) 441 { 442 unsigned long long snapshot_size = 0; 443 char *endptr; 444 445 if (str) { 446 snapshot_size = strtoull(str, &endptr, 0); 447 if (*endptr || snapshot_size > SIZE_MAX) 448 return -1; 449 } 450 451 opts->auxtrace_snapshot_mode = true; 452 opts->auxtrace_snapshot_size = snapshot_size; 453 454 return 0; 455 } 456 457 static int arm_spe_snapshot_start(struct auxtrace_record *itr) 458 { 459 struct arm_spe_recording *ptr = 460 container_of(itr, struct arm_spe_recording, itr); 461 struct evsel *evsel; 462 int ret = -EINVAL; 463 464 evlist__for_each_entry(ptr->evlist, evsel) { 465 if (evsel__is_aux_event(evsel)) { 466 ret = evsel__disable(evsel); 467 if (ret < 0) 468 return ret; 469 } 470 } 471 return ret; 472 } 473 474 static int arm_spe_snapshot_finish(struct auxtrace_record *itr) 475 { 476 struct arm_spe_recording *ptr = 477 container_of(itr, struct arm_spe_recording, itr); 478 struct evsel *evsel; 479 int ret = -EINVAL; 480 481 evlist__for_each_entry(ptr->evlist, evsel) { 482 if (evsel__is_aux_event(evsel)) { 483 ret = evsel__enable(evsel); 484 if (ret < 0) 485 return ret; 486 } 487 } 488 return ret; 489 } 490 491 static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx) 492 { 493 bool *wrapped; 494 int cnt = ptr->wrapped_cnt, new_cnt, i; 495 496 /* 497 * No need to allocate, so return early. 498 */ 499 if (idx < cnt) 500 return 0; 501 502 /* 503 * Make ptr->wrapped as big as idx. 504 */ 505 new_cnt = idx + 1; 506 507 /* 508 * Free'ed in arm_spe_recording_free(). 509 */ 510 wrapped = reallocarray(ptr->wrapped, new_cnt, sizeof(bool)); 511 if (!wrapped) 512 return -ENOMEM; 513 514 /* 515 * init new allocated values. 516 */ 517 for (i = cnt; i < new_cnt; i++) 518 wrapped[i] = false; 519 520 ptr->wrapped_cnt = new_cnt; 521 ptr->wrapped = wrapped; 522 523 return 0; 524 } 525 526 static bool arm_spe_buffer_has_wrapped(unsigned char *buffer, 527 size_t buffer_size, u64 head) 528 { 529 u64 i, watermark; 530 u64 *buf = (u64 *)buffer; 531 size_t buf_size = buffer_size; 532 533 /* 534 * Defensively handle the case where head might be continually increasing - if its value is 535 * equal or greater than the size of the ring buffer, then we can safely determine it has 536 * wrapped around. Otherwise, continue to detect if head might have wrapped. 537 */ 538 if (head >= buffer_size) 539 return true; 540 541 /* 542 * We want to look the very last 512 byte (chosen arbitrarily) in the ring buffer. 543 */ 544 watermark = buf_size - 512; 545 546 /* 547 * The value of head is somewhere within the size of the ring buffer. This can be that there 548 * hasn't been enough data to fill the ring buffer yet or the trace time was so long that 549 * head has numerically wrapped around. To find we need to check if we have data at the 550 * very end of the ring buffer. We can reliably do this because mmap'ed pages are zeroed 551 * out and there is a fresh mapping with every new session. 552 */ 553 554 /* 555 * head is less than 512 byte from the end of the ring buffer. 556 */ 557 if (head > watermark) 558 watermark = head; 559 560 /* 561 * Speed things up by using 64 bit transactions (see "u64 *buf" above) 562 */ 563 watermark /= sizeof(u64); 564 buf_size /= sizeof(u64); 565 566 /* 567 * If we find trace data at the end of the ring buffer, head has been there and has 568 * numerically wrapped around at least once. 569 */ 570 for (i = watermark; i < buf_size; i++) 571 if (buf[i]) 572 return true; 573 574 return false; 575 } 576 577 static int arm_spe_find_snapshot(struct auxtrace_record *itr, int idx, 578 struct auxtrace_mmap *mm, unsigned char *data, 579 u64 *head, u64 *old) 580 { 581 int err; 582 bool wrapped; 583 struct arm_spe_recording *ptr = 584 container_of(itr, struct arm_spe_recording, itr); 585 586 /* 587 * Allocate memory to keep track of wrapping if this is the first 588 * time we deal with this *mm. 589 */ 590 if (idx >= ptr->wrapped_cnt) { 591 err = arm_spe_alloc_wrapped_array(ptr, idx); 592 if (err) 593 return err; 594 } 595 596 /* 597 * Check to see if *head has wrapped around. If it hasn't only the 598 * amount of data between *head and *old is snapshot'ed to avoid 599 * bloating the perf.data file with zeros. But as soon as *head has 600 * wrapped around the entire size of the AUX ring buffer it taken. 601 */ 602 wrapped = ptr->wrapped[idx]; 603 if (!wrapped && arm_spe_buffer_has_wrapped(data, mm->len, *head)) { 604 wrapped = true; 605 ptr->wrapped[idx] = true; 606 } 607 608 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n", 609 __func__, idx, (size_t)*old, (size_t)*head, mm->len); 610 611 /* 612 * No wrap has occurred, we can just use *head and *old. 613 */ 614 if (!wrapped) 615 return 0; 616 617 /* 618 * *head has wrapped around - adjust *head and *old to pickup the 619 * entire content of the AUX buffer. 620 */ 621 if (*head >= mm->len) { 622 *old = *head - mm->len; 623 } else { 624 *head += mm->len; 625 *old = *head - mm->len; 626 } 627 628 return 0; 629 } 630 631 static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused) 632 { 633 struct timespec ts; 634 635 clock_gettime(CLOCK_MONOTONIC_RAW, &ts); 636 637 return ts.tv_sec ^ ts.tv_nsec; 638 } 639 640 static void arm_spe_recording_free(struct auxtrace_record *itr) 641 { 642 struct arm_spe_recording *sper = 643 container_of(itr, struct arm_spe_recording, itr); 644 645 zfree(&sper->wrapped); 646 free(sper); 647 } 648 649 struct auxtrace_record *arm_spe_recording_init(int *err, 650 struct perf_pmu *arm_spe_pmu) 651 { 652 struct arm_spe_recording *sper; 653 654 if (!arm_spe_pmu) { 655 *err = -ENODEV; 656 return NULL; 657 } 658 659 sper = zalloc(sizeof(struct arm_spe_recording)); 660 if (!sper) { 661 *err = -ENOMEM; 662 return NULL; 663 } 664 665 sper->arm_spe_pmu = arm_spe_pmu; 666 sper->itr.snapshot_start = arm_spe_snapshot_start; 667 sper->itr.snapshot_finish = arm_spe_snapshot_finish; 668 sper->itr.find_snapshot = arm_spe_find_snapshot; 669 sper->itr.parse_snapshot_options = arm_spe_parse_snapshot_options; 670 sper->itr.recording_options = arm_spe_recording_options; 671 sper->itr.info_priv_size = arm_spe_info_priv_size; 672 sper->itr.info_fill = arm_spe_info_fill; 673 sper->itr.free = arm_spe_recording_free; 674 sper->itr.reference = arm_spe_reference; 675 sper->itr.read_finish = auxtrace_record__read_finish; 676 sper->itr.alignment = 0; 677 678 *err = 0; 679 return &sper->itr; 680 } 681 682 void 683 arm_spe_pmu_default_config(const struct perf_pmu *arm_spe_pmu, struct perf_event_attr *attr) 684 { 685 attr->sample_period = arm_spe_pmu__sample_period(arm_spe_pmu); 686 } 687