| /linux/drivers/md/dm-vdo/ |
| H A D | funnel-queue.c | 27 queue->oldest = &queue->stub; in vdo_make_funnel_queue() 45 struct funnel_queue_entry *oldest = queue->oldest; in get_oldest() local 46 struct funnel_queue_entry *next = READ_ONCE(oldest->next); in get_oldest() 48 if (oldest == &queue->stub) { in get_oldest() 59 oldest = next; in get_oldest() 60 queue->oldest = oldest; in get_oldest() 61 next = READ_ONCE(oldest->next); in get_oldest() 71 if (oldest != newest) { in get_oldest() 86 next = READ_ONCE(oldest->next); in get_oldest() 96 return oldest; in get_oldest() [all …]
|
| H A D | logical-zone.c | 218 sequence_number_t oldest = in update_oldest_active_generation() local 221 if (oldest == zone->oldest_active_generation) in update_oldest_active_generation() 224 WRITE_ONCE(zone->oldest_active_generation, oldest); in update_oldest_active_generation()
|
| H A D | funnel-queue.h | 64 struct funnel_queue_entry *oldest __aligned(L1_CACHE_BYTES); in __aligned()
|
| /linux/net/rds/ |
| H A D | ib_ring.c | 156 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest) in rds_ib_ring_completed() argument 160 if (oldest <= (unsigned long long)wr_id) in rds_ib_ring_completed() 161 ret = (unsigned long long)wr_id - oldest + 1; in rds_ib_ring_completed() 163 ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1; in rds_ib_ring_completed() 166 wr_id, oldest); in rds_ib_ring_completed()
|
| H A D | ib_send.c | 249 u32 oldest; in rds_ib_send_cqe_handler() local 267 oldest = rds_ib_ring_oldest(&ic->i_send_ring); in rds_ib_send_cqe_handler() 269 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest); in rds_ib_send_cqe_handler() 272 send = &ic->i_sends[oldest]; in rds_ib_send_cqe_handler() 292 oldest = (oldest + 1) % ic->i_send_ring.w_nr; in rds_ib_send_cqe_handler()
|
| /linux/drivers/net/xen-netback/ |
| H A D | hash.c | 38 struct xenvif_hash_cache_entry *new, *entry, *oldest; in xenvif_add_hash() local 53 oldest = NULL; in xenvif_add_hash() 60 if (!oldest || entry->seq < oldest->seq) in xenvif_add_hash() 61 oldest = entry; in xenvif_add_hash() 69 list_del_rcu(&oldest->link); in xenvif_add_hash() 71 kfree_rcu(oldest, rcu); in xenvif_add_hash()
|
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | dev-event.rst | 41 full, then the oldest event in that queue will be dropped. 44 the oldest event that is about to be dropped will be merged with the 45 payload of the next oldest event. Thus ensuring that no information
|
| H A D | func-read.rst | 58 single or multiple buffers and discarding the oldest or newest frames 70 depends on the discarding policy. A driver discarding the oldest frames
|
| H A D | vidioc-dqevent.rst | 147 second-oldest event is kept, but the ``changes`` field of the 148 second-oldest event is ORed with the ``changes`` field of the 149 oldest event.
|
| H A D | vidioc-g-enc-index.rst | 50 read the meta data in time the oldest entries will be lost. When the
|
| /linux/net/netfilter/ |
| H A D | nf_conntrack_ftp.c | 348 unsigned int i, oldest; in update_nl_seq() local 360 oldest = 0; in update_nl_seq() 362 oldest = 1; in update_nl_seq() 364 if (after(nl_seq, info->seq_aft_nl[dir][oldest])) in update_nl_seq() 365 info->seq_aft_nl[dir][oldest] = nl_seq; in update_nl_seq()
|
| /linux/fs/ceph/ |
| H A D | addr.c | 725 struct ceph_snap_context *snapc, *oldest; in write_folio_nounlock() local 749 oldest = get_oldest_context(inode, &ceph_wbc, snapc); in write_folio_nounlock() 750 if (snapc->seq > oldest->seq) { in write_folio_nounlock() 755 ceph_put_snap_context(oldest); in write_folio_nounlock() 759 ceph_put_snap_context(oldest); in write_folio_nounlock() 854 oldest = folio_detach_private(folio); in write_folio_nounlock() 855 WARN_ON_ONCE(oldest != snapc); in write_folio_nounlock() 1768 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); in context_is_writeable_or_written() local 1769 int ret = !oldest || snapc->seq <= oldest->seq; in context_is_writeable_or_written() 1771 ceph_put_snap_context(oldest); in context_is_writeable_or_written() [all …]
|
| /linux/net/core/ |
| H A D | gro.c | 445 struct sk_buff *oldest; in gro_flush_oldest() local 447 oldest = list_last_entry(head, struct sk_buff, list); in gro_flush_oldest() 452 if (WARN_ON_ONCE(!oldest)) in gro_flush_oldest() 458 skb_list_del_init(oldest); in gro_flush_oldest() 459 gro_complete(gro, oldest); in gro_flush_oldest()
|
| /linux/fs/smb/client/ |
| H A D | dfs_cache.c | 450 struct cache_entry *oldest = NULL; in purge_cache() local 461 else if (!oldest || in purge_cache() 463 &oldest->etime) < 0) in purge_cache() 464 oldest = ce; in purge_cache() 468 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest) in purge_cache() 469 flush_cache_ent(oldest); in purge_cache()
|
| /linux/Documentation/driver-api/media/ |
| H A D | v4l2-event.rst | 32 :c:type:`v4l2_kevent` ringbuffer, then the oldest event will be dropped 68 The ``merge()`` callback allows you to merge the oldest event payload into 69 that of the second-oldest event payload. It is called when
|
| /linux/drivers/staging/rtl8723bs/core/ |
| H A D | rtw_mlme.c | 403 struct wlan_network *oldest = NULL; in rtw_get_oldest_wlan_network() local 411 if (!oldest || time_after(oldest->last_scanned, pwlan->last_scanned)) in rtw_get_oldest_wlan_network() 412 oldest = pwlan; in rtw_get_oldest_wlan_network() 415 return oldest; in rtw_get_oldest_wlan_network() 487 struct wlan_network *oldest = NULL; in rtw_update_scanned_network() local 507 if (!oldest || time_after(oldest->last_scanned, pnetwork->last_scanned)) in rtw_update_scanned_network() 508 oldest = pnetwork; in rtw_update_scanned_network() 519 pnetwork = oldest; in rtw_update_scanned_network()
|
| /linux/drivers/hid/ |
| H A D | hid-asus.c | 229 struct input_mt_slot *oldest; in asus_report_tool_width() local 235 oldest = NULL; in asus_report_tool_width() 245 oldest = ps; in asus_report_tool_width() 250 if (oldest) { in asus_report_tool_width() 252 input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR)); in asus_report_tool_width()
|
| /linux/drivers/net/wireless/intel/ipw2x00/ |
| H A D | libipw_rx.c | 1418 struct libipw_network *oldest = NULL; in libipw_process_probe_response() local 1469 if ((oldest == NULL) || in libipw_process_probe_response() 1470 time_before(target->last_scanned, oldest->last_scanned)) in libipw_process_probe_response() 1471 oldest = target; in libipw_process_probe_response() 1479 list_del(&oldest->list); in libipw_process_probe_response() 1480 target = oldest; in libipw_process_probe_response()
|
| /linux/Documentation/i2c/busses/ |
| H A D | i2c-viapro.rst | 72 Except for the oldest chips (VT82C596A/B, VT82C686A and most probably
|
| /linux/Documentation/driver-api/usb/ |
| H A D | anchors.rst | 69 Returns the oldest anchored URB of an anchor. The URB is unanchored
|
| /linux/Documentation/userspace-api/media/cec/ |
| H A D | cec-ioc-dqevent.rst | 88 filehandle. The messages lost are the oldest messages. So when a 89 new message arrives and there is no more room, then the oldest
|
| /linux/Documentation/arch/mips/ |
| H A D | ingenic-tcu.rst | 35 - On the oldest SoCs (up to JZ4740), all of the eight channels operate in
|
| /linux/Documentation/userspace-api/gpio/ |
| H A D | gpio-v2-line-event-read.rst | 62 by userspace. If an overflow occurs then the oldest buffered event is
|
| /linux/Documentation/bpf/ |
| H A D | map_queue_stack.rst | 41 when the queue or stack is full, the oldest element will be removed to
|
| /linux/net/ipv4/ |
| H A D | route.c | 597 struct fib_nh_exception *fnhe, *oldest = NULL; in fnhe_remove_oldest() local 604 if (!oldest || in fnhe_remove_oldest() 605 time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) { in fnhe_remove_oldest() 606 oldest = fnhe; in fnhe_remove_oldest() 614 oldest->fnhe_daddr = 0; in fnhe_remove_oldest() 615 fnhe_flush_routes(oldest); in fnhe_remove_oldest() 616 *oldest_p = oldest->fnhe_next; in fnhe_remove_oldest() 617 kfree_rcu(oldest, rcu); in fnhe_remove_oldest()
|