Lines Matching +full:block +full:- +full:offset
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
27 u32 offset;
45 prestera_counter_is_ready(struct prestera_counter_block *block, u32 id)
47 return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY;
52 mutex_lock(&counter->mtx);
57 mutex_unlock(&counter->mtx);
60 static void prestera_counter_block_lock(struct prestera_counter_block *block)
62 mutex_lock(&block->mtx);
65 static void prestera_counter_block_unlock(struct prestera_counter_block *block)
67 mutex_unlock(&block->mtx);
70 static bool prestera_counter_block_incref(struct prestera_counter_block *block)
72 return refcount_inc_not_zero(&block->refcnt);
75 static bool prestera_counter_block_decref(struct prestera_counter_block *block)
77 return refcount_dec_and_test(&block->refcnt);
81 static void prestera_counter_stats_clear(struct prestera_counter_block *block,
84 memset(&block->stats[counter_id - block->offset], 0,
85 sizeof(*block->stats));
95 for (i = 0; i < counter->block_list_len; i++) {
96 if (counter->block_list[i] &&
97 counter->block_list[i]->client == client &&
98 !counter->block_list[i]->full &&
99 prestera_counter_block_incref(counter->block_list[i])) {
101 return counter->block_list[i];
110 struct prestera_counter_block *block)
117 for (i = 0; i < counter->block_list_len; i++) {
118 if (counter->block_list[i])
121 counter->block_list[i] = block;
126 arr = krealloc(counter->block_list, (counter->block_list_len + 1) *
127 sizeof(*counter->block_list), GFP_KERNEL);
130 return -ENOMEM;
133 counter->block_list = arr;
134 counter->block_list[counter->block_list_len] = block;
135 counter->block_list_len++;
143 struct prestera_counter_block *block;
146 block = prestera_counter_block_lookup_not_full(counter, client);
147 if (block)
148 return block;
150 block = kzalloc(sizeof(*block), GFP_KERNEL);
151 if (!block)
152 return ERR_PTR(-ENOMEM);
154 err = prestera_hw_counter_block_get(counter->sw, client,
155 &block->id, &block->offset,
156 &block->num_counters);
160 block->stats = kcalloc(block->num_counters,
161 sizeof(*block->stats), GFP_KERNEL);
162 if (!block->stats) {
163 err = -ENOMEM;
167 block->counter_flag = kcalloc(block->num_counters,
168 sizeof(*block->counter_flag),
170 if (!block->counter_flag) {
171 err = -ENOMEM;
175 block->client = client;
176 mutex_init(&block->mtx);
177 refcount_set(&block->refcnt, 1);
178 idr_init_base(&block->counter_idr, block->offset);
180 err = prestera_counter_block_list_add(counter, block);
184 return block;
187 idr_destroy(&block->counter_idr);
188 mutex_destroy(&block->mtx);
189 kfree(block->counter_flag);
191 kfree(block->stats);
193 prestera_hw_counter_block_release(counter->sw, block->id);
195 kfree(block);
200 struct prestera_counter_block *block)
204 if (!prestera_counter_block_decref(block))
208 for (i = 0; i < counter->block_list_len; i++) {
209 if (counter->block_list[i] &&
210 counter->block_list[i]->id == block->id) {
211 counter->block_list[i] = NULL;
217 WARN_ON(!idr_is_empty(&block->counter_idr));
219 prestera_hw_counter_block_release(counter->sw, block->id);
220 idr_destroy(&block->counter_idr);
221 mutex_destroy(&block->mtx);
222 kfree(block->stats);
223 kfree(block);
226 static int prestera_counter_get_vacant(struct prestera_counter_block *block,
231 if (block->full)
232 return -ENOSPC;
234 prestera_counter_block_lock(block);
235 free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset,
236 block->offset + block->num_counters,
239 if (free_id == -ENOSPC)
240 block->full = true;
242 prestera_counter_block_unlock(block);
246 prestera_counter_block_unlock(block);
254 struct prestera_counter_block *block;
259 block = prestera_counter_block_get(counter, client);
260 if (IS_ERR(block))
261 return PTR_ERR(block);
263 err = prestera_counter_get_vacant(block, &id);
265 prestera_counter_block_put(counter, block);
267 if (err == -ENOSPC)
273 prestera_counter_block_lock(block);
274 if (block->is_updating)
275 block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID;
276 prestera_counter_block_unlock(block);
279 *bl = block;
285 struct prestera_counter_block *block, u32 counter_id)
287 if (!block)
290 prestera_counter_block_lock(block);
291 idr_remove(&block->counter_idr, counter_id);
292 block->full = false;
293 prestera_counter_stats_clear(block, counter_id);
294 prestera_counter_block_unlock(block);
296 prestera_hw_counter_clear(counter->sw, block->id, counter_id);
297 prestera_counter_block_put(counter, block);
306 for (i = 0; i < counter->block_list_len; i++) {
307 idx = (start + i) % counter->block_list_len;
308 if (!counter->block_list[idx])
322 if (idx >= counter->block_list_len)
327 if (!counter->block_list[idx] ||
328 !prestera_counter_block_incref(counter->block_list[idx])) {
334 return counter->block_list[idx];
342 struct prestera_counter_block *block;
349 block = prestera_counter_block_get_by_idx(counter, counter->curr_idx);
350 if (!block) {
351 if (counter->is_fetching)
357 if (!counter->is_fetching) {
358 err = prestera_hw_counter_trigger(counter->sw, block->id);
362 prestera_counter_block_lock(block);
363 block->is_updating = true;
364 prestera_counter_block_unlock(block);
366 counter->is_fetching = true;
367 counter->total_read = 0;
372 prestera_counter_block_lock(block);
373 err = prestera_hw_counters_get(counter->sw, counter->total_read,
375 &block->stats[counter->total_read]);
376 prestera_counter_block_unlock(block);
380 counter->total_read += count;
381 if (!done || counter->total_read < block->num_counters) {
386 for (i = 0; i < block->num_counters; i++) {
387 if (block->counter_flag[i] == COUNTER_FLAG_INVALID) {
388 prestera_counter_block_lock(block);
389 block->counter_flag[i] = COUNTER_FLAG_READY;
390 memset(&block->stats[i], 0, sizeof(*block->stats));
391 prestera_counter_block_unlock(block);
395 prestera_counter_block_lock(block);
396 block->is_updating = false;
397 prestera_counter_block_unlock(block);
401 prestera_hw_counter_abort(counter->sw);
403 counter->is_fetching = false;
404 counter->curr_idx =
405 prestera_counter_block_idx_next(counter, counter->curr_idx);
407 if (block)
408 prestera_counter_block_put(counter, block);
410 schedule_delayed_work(&counter->stats_dw, resched_time);
417 struct prestera_counter_block *block,
420 if (!block || !prestera_counter_is_ready(block, counter_id)) {
426 prestera_counter_block_lock(block);
427 *packets = block->stats[counter_id - block->offset].packets;
428 *bytes = block->stats[counter_id - block->offset].bytes;
430 prestera_counter_stats_clear(block, counter_id);
431 prestera_counter_block_unlock(block);
442 return -ENOMEM;
444 counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL);
445 if (!counter->block_list) {
447 return -ENOMEM;
450 mutex_init(&counter->mtx);
451 counter->block_list_len = 1;
452 counter->sw = sw;
453 sw->counter = counter;
455 INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work);
456 schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME);
463 struct prestera_counter *counter = sw->counter;
466 cancel_delayed_work_sync(&counter->stats_dw);
468 for (i = 0; i < counter->block_list_len; i++)
469 WARN_ON(counter->block_list[i]);
471 mutex_destroy(&counter->mtx);
472 kfree(counter->block_list);