Lines Matching defs:bnode
1473 struct kvfree_rcu_bulk_data *bnode)
1479 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
1509 struct kvfree_rcu_bulk_data *bnode, int idx)
1514 if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
1515 debug_rcu_bhead_unqueue(bnode);
1519 "slab", bnode->nr_records,
1520 bnode->records);
1522 kfree_bulk(bnode->nr_records, bnode->records);
1524 for (i = 0; i < bnode->nr_records; i++) {
1526 "slab", bnode->records[i], 0);
1528 vfree(bnode->records[i]);
1535 if (put_cached_bnode(krcp, bnode))
1536 bnode = NULL;
1539 if (bnode)
1540 free_page((unsigned long) bnode);
1573 struct kvfree_rcu_bulk_data *bnode, *n;
1599 list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
1600 kvfree_rcu_bulk(krcp, bnode, i);
1702 struct kvfree_rcu_bulk_data *bnode, *n;
1711 list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
1712 if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
1715 atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
1716 list_move(&bnode->list, &bulk_ready[i]);
1728 list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
1729 kvfree_rcu_bulk(krcp, bnode, i);
1818 struct kvfree_rcu_bulk_data *bnode;
1831 bnode = (struct kvfree_rcu_bulk_data *)
1834 if (!bnode)
1838 pushed = put_cached_bnode(krcp, bnode);
1842 free_page((unsigned long) bnode);
1861 struct kvfree_rcu_bulk_data *bnode;
1869 bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
1873 if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
1874 bnode = get_cached_bnode(*krcp);
1875 if (!bnode && can_alloc) {
1889 bnode = (struct kvfree_rcu_bulk_data *)
1894 if (!bnode)
1898 bnode->nr_records = 0;
1899 list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
1903 bnode->nr_records++;
1904 bnode->records[bnode->nr_records - 1] = ptr;
1905 get_state_synchronize_rcu_full(&bnode->gp_snap);