Lines Matching refs:pg
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
460 struct ftrace_profile_page *pg;
462 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
468 if ((void *)rec >= (void *)&pg->records[pg->index]) {
469 pg = pg->next;
470 if (!pg)
472 rec = &pg->records[0];
617 struct ftrace_profile_page *pg;
619 pg = stat->pages = stat->start;
621 while (pg) {
622 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
623 pg->index = 0;
624 pg = pg->next;
633 struct ftrace_profile_page *pg;
659 pg = stat->start = stat->pages;
664 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
665 if (!pg->next)
667 pg = pg->next;
673 pg = stat->start;
674 while (pg) {
675 unsigned long tmp = (unsigned long)pg;
677 pg = pg->next;
1592 #define do_for_each_ftrace_rec(pg, rec) \
1593 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1595 for (_____i = 0; _____i < pg->index; _____i++) { \
1596 rec = &pg->records[_____i];
1617 struct ftrace_page *pg;
1624 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1625 if (pg->index == 0 ||
1626 end < pg->records[0].ip ||
1627 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1629 rec = bsearch(&key, pg->records, pg->index,
1765 struct ftrace_page *pg;
1784 do_for_each_ftrace_rec(pg, rec) {
2003 struct ftrace_page *pg;
2031 do_for_each_ftrace_rec(pg, rec) {
2089 do_for_each_ftrace_rec(pg, rec) {
2758 struct ftrace_page *pg;
2766 do_for_each_ftrace_rec(pg, rec) {
2783 struct ftrace_page *pg;
2805 iter->pg = ftrace_pages_start;
2809 while (iter->pg && !iter->pg->index)
2810 iter->pg = iter->pg->next;
2812 if (!iter->pg)
2828 if (iter->index >= iter->pg->index) {
2829 iter->pg = iter->pg->next;
2833 while (iter->pg && !iter->pg->index)
2834 iter->pg = iter->pg->next;
2837 if (!iter->pg)
2851 return &iter->pg->records[iter->index];
3188 struct ftrace_page *pg;
3191 do_for_each_ftrace_rec(pg, rec) {
3781 struct ftrace_page *pg;
3804 for (pg = new_pgs; pg; pg = pg->next) {
3806 for (i = 0; i < pg->index; i++) {
3812 p = &pg->records[i];
3837 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3851 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3853 if (!pg->records) {
3865 pg->order = order;
3875 struct ftrace_page *pg = pages;
3877 while (pg) {
3878 if (pg->records) {
3879 free_pages((unsigned long)pg->records, pg->order);
3880 ftrace_number_of_pages -= 1 << pg->order;
3882 pages = pg->next;
3883 kfree(pg);
3884 pg = pages;
3893 struct ftrace_page *pg;
3899 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3900 if (!pg)
3909 cnt = ftrace_allocate_records(pg, num_to_init);
3917 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3918 if (!pg->next)
3921 pg = pg->next;
3938 struct ftrace_page *pg;
4168 if (iter->idx >= iter->pg->index) {
4169 if (iter->pg->next) {
4170 iter->pg = iter->pg->next;
4175 rec = &iter->pg->records[iter->idx++];
4277 iter->pg = ftrace_pages_start;
4347 struct ftrace_page *pg;
4351 do_for_each_ftrace_rec(pg, rec) {
4558 iter->pg = ftrace_pages_start;
4582 iter->pg = ftrace_pages_start;
4607 iter->pg = ftrace_pages_start;
4631 iter->pg = ftrace_pages_start;
4721 iter->pg = ftrace_pages_start;
4849 struct ftrace_page *pg;
4856 do_for_each_ftrace_rec(pg, rec) {
4857 if (pg->index <= index) {
4858 index -= pg->index;
4862 rec = &pg->records[index];
4933 struct ftrace_page *pg;
4963 do_for_each_ftrace_rec(pg, rec) {
6985 struct ftrace_page *pg;
7001 do_for_each_ftrace_rec(pg, rec) {
7180 struct ftrace_page *pg;
7238 pg = start_pg;
7268 end_offset = (pg->index+1) * sizeof(pg->records[0]);
7269 if (end_offset > PAGE_SIZE << pg->order) {
7271 if (WARN_ON(!pg->next))
7273 pg = pg->next;
7276 rec = &pg->records[pg->index++];
7280 if (pg->next) {
7281 pg_unuse = pg->next;
7282 pg->next = NULL;
7286 ftrace_pages = pg;
7311 pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
7317 for (pg = pg_unuse; pg; pg = pg->next)
7318 remaining += 1 << pg->order;
7446 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
7455 for (i = 0; i < pg->index; i++) {
7456 rec = &pg->records[i];
7469 static void clear_mod_from_hashes(struct ftrace_page *pg)
7478 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
7479 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
7508 struct ftrace_page *pg;
7532 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
7533 rec = &pg->records[0];
7539 if (WARN_ON(pg == ftrace_pages_start))
7543 if (pg == ftrace_pages)
7546 ftrace_update_tot_cnt -= pg->index;
7547 *last_pg = pg->next;
7549 pg->next = tmp_page;
7550 tmp_page = pg;
7552 last_pg = &pg->next;
7560 for (pg = tmp_page; pg; pg = tmp_page) {
7563 clear_mod_from_hashes(pg);
7565 if (pg->records) {
7566 free_pages((unsigned long)pg->records, pg->order);
7567 ftrace_number_of_pages -= 1 << pg->order;
7569 tmp_page = pg->next;
7570 kfree(pg);
7578 struct ftrace_page *pg;
7601 do_for_each_ftrace_rec(pg, rec) {
7605 * module text shares the pg. If a record is
7606 * not part of this module, then skip this pg,
7896 struct ftrace_page *pg;
7916 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7917 if (end < pg->records[0].ip ||
7918 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7921 rec = bsearch(&key, pg->records, pg->index,
7933 pg->index--;
7935 if (!pg->index) {
7936 *last_pg = pg->next;
7937 pg->next = tmp_page;
7938 tmp_page = pg;
7939 pg = container_of(last_pg, struct ftrace_page, next);
7941 ftrace_pages = pg;
7945 (pg->index - (rec - pg->records)) * sizeof(*rec));