| /linux/rust/syn/gen/ |
| H A D | clone.rs | 10 fn clone(&self) -> Self { in clone() method 12 extern_token: self.extern_token.clone(), in clone() 13 name: self.name.clone(), in clone() 20 fn clone(&self) -> Self { in clone() method 22 colon2_token: self.colon2_token.clone(), in clone() 23 lt_token: self.lt_token.clone(), in clone() 24 args: self.args.clone(), in clone() 25 gt_token: self.gt_token.clone(), in clone() 32 fn clone(&self) -> Self { in clone() method 34 attrs: self.attrs.clone(), in clone() [all …]
|
| /linux/drivers/md/ |
| H A D | dm-rq.c | 21 struct request *orig, *clone; member 78 static void end_clone_bio(struct bio *clone) in end_clone_bio() argument 81 container_of(clone, struct dm_rq_clone_bio_info, clone); in end_clone_bio() 84 blk_status_t error = clone->bi_status; in end_clone_bio() 85 bool is_last = !clone->bi_next; in end_clone_bio() 87 bio_put(clone); in end_clone_bio() 157 static void dm_end_request(struct request *clone, blk_status_t error) in dm_end_request() argument 159 struct dm_rq_target_io *tio = clone->end_io_data; in dm_end_request() 163 blk_rq_unprep_clone(clone); in dm_end_request() 164 tio->ti->type->release_clone_rq(clone, NULL); in dm_end_request() [all …]
|
| H A D | dm.c | 93 static inline struct dm_target_io *clone_to_tio(struct bio *clone) in clone_to_tio() argument 95 return container_of(clone, struct dm_target_io, clone); in clone_to_tio() 119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr() 540 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) in dm_start_io_acct() argument 549 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { in dm_start_io_acct() 575 struct bio *clone; in alloc_io() local 577 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); in alloc_io() 578 if (unlikely(!clone)) in alloc_io() 580 tio = clone_to_tio(clone); in alloc_io() 610 bio_put(&io->tio.clone); in free_io() [all …]
|
| H A D | dm-crypt.c | 284 static void crypt_endio(struct bio *clone); 1642 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); 1667 struct bio *clone; in crypt_alloc_buffer() local 1677 clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf, in crypt_alloc_buffer() 1679 clone->bi_private = io; in crypt_alloc_buffer() 1680 clone->bi_end_io = crypt_endio; in crypt_alloc_buffer() 1681 clone->bi_ioprio = io->base_bio->bi_ioprio; in crypt_alloc_buffer() 1682 clone->bi_iter.bi_sector = cc->start + io->sector; in crypt_alloc_buffer() 1709 crypt_free_buffer_pages(cc, clone); in crypt_alloc_buffer() 1710 bio_put(clone); in crypt_alloc_buffer() [all …]
|
| H A D | dm-zoned-target.c | 103 static void dmz_clone_endio(struct bio *clone) in dmz_clone_endio() argument 105 struct dmz_bioctx *bioctx = clone->bi_private; in dmz_clone_endio() 106 blk_status_t status = clone->bi_status; in dmz_clone_endio() 108 bio_put(clone); in dmz_clone_endio() 123 struct bio *clone; in dmz_submit_bio() local 128 clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set); in dmz_submit_bio() 129 if (!clone) in dmz_submit_bio() 133 clone->bi_iter.bi_sector = in dmz_submit_bio() 135 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; in dmz_submit_bio() 136 clone->bi_end_io = dmz_clone_endio; in dmz_submit_bio() [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | dm-clone.rst | 4 dm-clone 10 dm-clone is a device mapper target which produces a one-to-one copy of an 15 The main use case of dm-clone is to clone a potentially remote, high-latency, 26 When the cloning completes, the dm-clone table can be removed altogether and be 29 The dm-clone target reuses the metadata library used by the thin-provisioning 56 clone of the source device. 68 dm-clone divides the source and destination devices in fixed sized regions. 72 The region size is configurable when you first create the dm-clone device. The 92 dm-clone interprets a discard request to a range that hasn't been hydrated yet 97 If the destination device supports discards, then by default dm-clone will pass [all …]
|
| /linux/fs/f2fs/ |
| H A D | acl.c | 296 struct posix_acl *clone = NULL; in f2fs_acl_clone() local 299 clone = kmemdup(acl, struct_size(acl, a_entries, acl->a_count), in f2fs_acl_clone() 301 if (clone) in f2fs_acl_clone() 302 refcount_set(&clone->a_refcount, 1); in f2fs_acl_clone() 304 return clone; in f2fs_acl_clone() 366 struct posix_acl *clone; in f2fs_acl_create() local 383 clone = f2fs_acl_clone(p, GFP_NOFS); in f2fs_acl_create() 384 if (!clone) { in f2fs_acl_create() 389 ret = f2fs_acl_create_masq(clone, mode); in f2fs_acl_create() 394 posix_acl_release(clone); in f2fs_acl_create() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | dynptr_fail.c | 1715 struct bpf_dynptr clone; in global_call_bpf_dynptr() 1719 bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone); in global_call_bpf_dynptr() 1722 bpf_dynptr_clone(&ptr1, &clone); in global_call_bpf_dynptr() 1724 bpf_ringbuf_submit_dynptr(&clone, 0); 1734 struct bpf_dynptr clone; in test_dynptr_reg_type() 1740 bpf_dynptr_clone(&ptr, &clone); 1745 bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0); 1756 struct bpf_dynptr clone; 1761 bpf_dynptr_clone(&ptr, &clone); 1763 bpf_ringbuf_submit_dynptr(&clone, 1480 struct bpf_dynptr clone; clone_invalid2() local 1499 struct bpf_dynptr clone; clone_invalidate1() local 1521 struct bpf_dynptr clone; clone_invalidate2() local 1568 struct bpf_dynptr clone; clone_invalidate4() local 1594 struct bpf_dynptr clone; clone_invalidate5() local 1648 struct bpf_dynptr clone; clone_skb_packet_data() local 1674 struct bpf_dynptr clone; clone_xdp_packet_data() local [all...] |
| /linux/rust/quote/ |
| H A D | to_tokens.rs | 237 tokens.append(self.clone()); in to_tokens() 243 tokens.append(self.clone()); in to_tokens() 249 tokens.append(self.clone()); in to_tokens() 255 tokens.append(self.clone()); in to_tokens() 261 tokens.append(self.clone()); in to_tokens() 267 tokens.extend(iter::once(self.clone())); in to_tokens()
|
| /linux/drivers/android/binder/ |
| H A D | transaction.rs | 64 let to = node_ref.node.owner.clone(); in new() 66 to.clone(), in new() 86 alloc.set_info_oneway_node(node_ref.node.clone()); in new() 91 let target_node = node_ref.node.clone(); in new() 100 from: from.clone(), in new() 123 let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None) in new_reply() 140 from: from.clone(), in new_reply() 186 Some(self.from_parent.as_ref()?.clone()) in clone_next() 196 return Some(transaction.from.clone()); in find_target_thread() 245 let process = self.to.clone(); in submit() [all …]
|
| H A D | node.rs | 359 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap(); in inc_ref_done_locked() 402 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap(); in update_refcount_locked() 434 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap(); in incr_refcount_allow_zero2one() 439 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap(); in incr_refcount_allow_zero2one() 466 Some(wrapper.init(self.clone())) in incr_refcount_allow_zero2one_with_wrapper() 672 inner.freeze_list.push_within_capacity(process.clone())?; in add_freeze_listener() 811 pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> { in clone() method 820 .new_node_ref(self.node.clone(), strong, None)) in clone() 1032 let process = death.process.clone(); in set_dead() 1088 let process = self.process.clone(); in do_work() [all …]
|
| /linux/rust/syn/ |
| H A D | punctuated.rs | 392 fn clone(&self) -> Self { in clone() function 394 inner: self.inner.clone(), in clone() 395 last: self.last.clone(), in clone() 599 fn clone(&self) -> Self { in clone() method 601 inner: self.inner.clone(), in clone() 602 last: self.last.clone(), in clone() 692 fn clone(&self) -> Self { in clone() function 694 inner: self.inner.clone(), in clone() 695 last: self.last.clone(), in clone() 737 fn clone(&self) -> Self { in clone() function [all …]
|
| H A D | buffer.rs | 194 Entry::Ident(ident) => Some((ident.clone(), unsafe { self.bump_ignore_group() })), in ident() 205 Some((punct.clone(), unsafe { self.bump_ignore_group() })) in punct() 216 Entry::Literal(literal) => Some((literal.clone(), unsafe { self.bump_ignore_group() })), in literal() 281 return Some((group.clone(), after_group)); in any_group_token() 308 Entry::Group(group, end_offset) => (group.clone().into(), *end_offset), in token_tree() 309 Entry::Literal(literal) => (literal.clone().into(), 1), in token_tree() 310 Entry::Ident(ident) => (ident.clone().into(), 1), in token_tree() 311 Entry::Punct(punct) => (punct.clone().into(), 1), in token_tree() 387 fn clone(&self) -> Self { in clone() method
|
| H A D | error.rs | 381 fn clone(&self) -> Self { in clone() method 383 messages: self.messages.clone(), in clone() 389 fn clone(&self) -> Self { in clone() method 392 message: self.message.clone(), in clone() 398 fn clone(&self) -> Self { in clone() method 458 messages: vec![self.messages.next()?.clone()], in next()
|
| H A D | lit.rs | 271 self.repr.token.clone() in token() 306 self.repr.token.clone() in token() 341 self.repr.token.clone() in token() 376 self.repr.token.clone() in token() 411 self.repr.token.clone() in token() 482 self.repr.token.clone() in token() 554 self.repr.token.clone() in token() 741 fn clone(&self) -> Self { in clone() method 743 token: self.token.clone(), in clone() 744 suffix: self.suffix.clone(), in clone() [all …]
|
| H A D | tt.rs | 87 let left = self.0.clone().into_iter().collect::<Vec<_>>(); in eq() 88 let right = other.0.clone().into_iter().collect::<Vec<_>>(); in eq() 103 let tts = self.0.clone().into_iter().collect::<Vec<_>>(); in hash()
|
| /linux/net/rds/ |
| H A D | tcp_recv.c | 162 struct sk_buff *clone; in rds_tcp_data_recv() local 219 clone = pskb_extract(skb, offset, to_copy, arg->gfp); in rds_tcp_data_recv() 220 if (!clone) { in rds_tcp_data_recv() 225 skb_queue_tail(&tinc->ti_skb_list, clone); in rds_tcp_data_recv() 230 clone, clone->data, clone->len); in rds_tcp_data_recv()
|
| /linux/include/linux/dsa/ |
| H A D | ocelot.h | 16 struct sk_buff *clone; member 265 struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; in ocelot_ptp_rew_op() local 269 if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) { in ocelot_ptp_rew_op() 271 rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3; in ocelot_ptp_rew_op()
|
| /linux/fs/btrfs/ |
| H A D | fiemap.c | 308 struct extent_buffer *clone = path->nodes[0]; in fiemap_next_leaf_item() local 322 ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags)); in fiemap_next_leaf_item() 323 refcount_inc(&clone->refs); in fiemap_next_leaf_item() 349 clone->start = path->nodes[0]->start; in fiemap_next_leaf_item() 351 copy_extent_buffer_full(clone, path->nodes[0]); in fiemap_next_leaf_item() 355 path->nodes[0] = clone; in fiemap_next_leaf_item() 359 free_extent_buffer(clone); in fiemap_next_leaf_item() 374 struct extent_buffer *clone; in fiemap_search_slot() local 419 clone = btrfs_clone_extent_buffer(path->nodes[0]); in fiemap_search_slot() 420 if (!clone) in fiemap_search_slot() [all …]
|
| /linux/drivers/net/usb/ |
| H A D | lg-vl600.c | 103 struct sk_buff *clone; in vl600_rx_fixup() local 200 clone = skb_clone(buf, GFP_ATOMIC); in vl600_rx_fixup() 201 if (!clone) in vl600_rx_fixup() 204 skb_trim(clone, packet_len); in vl600_rx_fixup() 205 usbnet_skb_return(dev, clone); in vl600_rx_fixup()
|
| /linux/net/netfilter/ |
| H A D | nft_set_pipapo.c | 1213 static int pipapo_realloc_scratch(struct nft_pipapo_match *clone, argument 1235 pipapo_free_scratch(clone, i); 1237 *per_cpu_ptr(clone->scratch, i) = scratch; 1267 if (priv->clone) 1268 return priv->clone; 1272 priv->clone = pipapo_clone(m); 1274 return priv->clone; 1807 if (!priv->clone) 1811 pipapo_gc(set, priv->clone); 1813 old = rcu_replace_pointer(priv->match, priv->clone, [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/dma/ |
| H A D | usernv04.c | 35 bool clone; member 51 if (dmaobj->clone) { in nv04_dmaobj_bind() 99 dmaobj->clone = true; in nv04_dmaobj_new()
|
| /linux/rust/proc-macro2/ |
| H A D | rcvec.rs | 60 Vec::clone(&self.inner) in make_owned() 118 fn clone(&self) -> Self { in clone() method 120 inner: Rc::clone(&self.inner), in clone()
|
| /linux/Documentation/userspace-api/ |
| H A D | unshare.rst | 38 threads. On Linux, at the time of thread creation using the clone system 58 when creating a new process using fork or clone, unshare() can benefit 96 works on an active task (as opposed to clone/fork working on a newly 98 changes to copy_* functions utilized by clone/fork system call. 108 unshare() reverses sharing that was done using clone(2) system call, 109 so unshare() should have a similar interface as clone(2). That is, 110 since flags in clone(int flags, void \*stack) specifies what should 113 the meaning of the flags from the way they are used in clone(2). 140 using clone(2). 182 clone(2), fork(2) [all …]
|
| /linux/Documentation/translations/zh_CN/userspace-api/ |
| H A D | no_new_privs.rst | 27 置 ``no_new_privs`` 。一旦该位被设置,它会在fork、clone和execve中继承下去 55 原则上,当 ``no_new_privs`` 被置位时, ``unshare(2)`` 和 ``clone(2)`` 的几个选
|