Home
last modified time | relevance | path

Searched refs:layer (Results 1 – 25 of 414) sorted by relevance

12345678910>>...17

/linux/drivers/gpu/drm/xlnx/
H A Dzynqmp_disp.c413 static bool zynqmp_disp_layer_is_video(const struct zynqmp_disp_layer *layer) in zynqmp_disp_layer_is_video() argument
415 return layer->id == ZYNQMP_DPSUB_LAYER_VID; in zynqmp_disp_layer_is_video()
427 struct zynqmp_disp_layer *layer, in zynqmp_disp_avbuf_set_format() argument
433 layer->disp_fmt = fmt; in zynqmp_disp_avbuf_set_format()
434 if (layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE) { in zynqmp_disp_avbuf_set_format()
437 val &= zynqmp_disp_layer_is_video(layer) in zynqmp_disp_avbuf_set_format()
443 reg = zynqmp_disp_layer_is_video(layer) in zynqmp_disp_avbuf_set_format()
451 reg = zynqmp_disp_layer_is_video(layer) in zynqmp_disp_avbuf_set_format()
572 struct zynqmp_disp_layer *layer) in zynqmp_disp_avbuf_enable_video() argument
577 if (zynqmp_disp_layer_is_video(layer)) { in zynqmp_disp_avbuf_enable_video()
[all …]
H A Dzynqmp_disp.h53 u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
55 u32 *zynqmp_disp_live_layer_formats(struct zynqmp_disp_layer *layer,
57 void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer);
58 void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer);
59 void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
61 void zynqmp_disp_layer_set_live_format(struct zynqmp_disp_layer *layer,
63 int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
/linux/drivers/media/dvb-frontends/
H A Dmb86a20s.c377 unsigned layer) in mb86a20s_get_modulation() argument
386 if (layer >= ARRAY_SIZE(reg)) in mb86a20s_get_modulation()
388 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); in mb86a20s_get_modulation()
409 unsigned layer) in mb86a20s_get_fec() argument
419 if (layer >= ARRAY_SIZE(reg)) in mb86a20s_get_fec()
421 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); in mb86a20s_get_fec()
444 unsigned layer) in mb86a20s_get_interleaving() argument
457 if (layer >= ARRAY_SIZE(reg)) in mb86a20s_get_interleaving()
459 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); in mb86a20s_get_interleaving()
470 unsigned layer) in mb86a20s_get_segment_count() argument
[all …]
H A Dtc90522.c220 c->layer[0].fec = c->fec_inner; in tc90522s_get_frontend()
221 c->layer[0].modulation = c->modulation; in tc90522s_get_frontend()
222 c->layer[0].segment_count = val[3] & 0x3f; /* slots */ in tc90522s_get_frontend()
226 c->layer[1].fec = fec_conv_sat[v]; in tc90522s_get_frontend()
228 c->layer[1].segment_count = 0; in tc90522s_get_frontend()
230 c->layer[1].segment_count = val[4] & 0x3f; /* slots */ in tc90522s_get_frontend()
235 c->layer[1].modulation = QPSK; in tc90522s_get_frontend()
362 c->layer[0].segment_count = 0; in tc90522t_get_frontend()
365 c->layer[0].segment_count = v; in tc90522t_get_frontend()
366 c->layer[0].fec = fec_conv_ter[(val[1] & 0x1c) >> 2]; in tc90522t_get_frontend()
[all …]
/linux/drivers/gpu/drm/sun4i/
H A Dsun4i_layer.c69 struct sun4i_layer *layer = plane_to_sun4i_layer(plane); in sun4i_backend_layer_atomic_disable() local
70 struct sun4i_backend *backend = layer->backend; in sun4i_backend_layer_atomic_disable()
72 sun4i_backend_layer_enable(backend, layer->id, false); in sun4i_backend_layer_atomic_disable()
89 struct sun4i_layer *layer = plane_to_sun4i_layer(plane); in sun4i_backend_layer_atomic_update() local
90 struct sun4i_backend *backend = layer->backend; in sun4i_backend_layer_atomic_update()
93 sun4i_backend_cleanup_layer(backend, layer->id); in sun4i_backend_layer_atomic_update()
101 sun4i_backend_update_layer_frontend(backend, layer->id, in sun4i_backend_layer_atomic_update()
105 sun4i_backend_update_layer_formats(backend, layer->id, plane); in sun4i_backend_layer_atomic_update()
106 sun4i_backend_update_layer_buffer(backend, layer->id, plane); in sun4i_backend_layer_atomic_update()
109 sun4i_backend_update_layer_coord(backend, layer->id, plane); in sun4i_backend_layer_atomic_update()
[all …]
H A Dsun4i_backend.h196 int layer, bool enable);
199 int layer, struct drm_plane *plane);
201 int layer, struct drm_plane *plane);
203 int layer, struct drm_plane *plane);
205 int layer, uint32_t in_fmt);
207 int layer, struct drm_plane *plane);
209 int layer);
/linux/net/caif/
H A Dcaif_dev.c35 struct cflayer layer; member
161 caifd->layer.up-> in caif_flow_cb()
162 ctrlcmd(caifd->layer.up, in caif_flow_cb()
164 caifd->layer.id); in caif_flow_cb()
168 static int transmit(struct cflayer *layer, struct cfpkt *pkt) in transmit() argument
172 container_of(layer, struct caif_device_entry, layer); in transmit()
230 caifd->layer.up->ctrlcmd(caifd->layer.up, in transmit()
232 caifd->layer.id); in transmit()
259 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || in receive()
270 err = caifd->layer.up->receive(caifd->layer.up, pkt); in receive()
[all …]
H A Dcfserl.c22 struct cflayer layer; member
34 void cfserl_release(struct cflayer *layer) in cfserl_release() argument
36 kfree(layer); in cfserl_release()
44 caif_assert(offsetof(struct cfserl, layer) == 0); in cfserl_create()
45 this->layer.receive = cfserl_receive; in cfserl_create()
46 this->layer.transmit = cfserl_transmit; in cfserl_create()
47 this->layer.ctrlcmd = cfserl_ctrlcmd; in cfserl_create()
50 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); in cfserl_create()
51 return &this->layer; in cfserl_create()
157 ret = layr->layer.up->receive(layr->layer.up, pkt); in cfserl_receive()
[all …]
H A Dcaif_usb.c33 struct cflayer layer; member
55 struct cfusbl *usbl = container_of(layr, struct cfusbl, layer); in cfusbl_transmit()
93 caif_assert(offsetof(struct cfusbl, layer) == 0); in cfusbl_create()
95 memset(&this->layer, 0, sizeof(this->layer)); in cfusbl_create()
96 this->layer.receive = cfusbl_receive; in cfusbl_create()
97 this->layer.transmit = cfusbl_transmit; in cfusbl_create()
98 this->layer.ctrlcmd = cfusbl_ctrlcmd; in cfusbl_create()
99 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid); in cfusbl_create()
100 this->layer.id = phyid; in cfusbl_create()
119 static void cfusbl_release(struct cflayer *layer) in cfusbl_release() argument
[all …]
H A Dcfmuxl.c18 #define container_obj(layr) container_of(layr, struct cfmuxl, layer)
25 struct cflayer layer; member
54 this->layer.receive = cfmuxl_receive; in cfmuxl_create()
55 this->layer.transmit = cfmuxl_transmit; in cfmuxl_create()
56 this->layer.ctrlcmd = cfmuxl_ctrlcmd; in cfmuxl_create()
61 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); in cfmuxl_create()
62 return &this->layer; in cfmuxl_create()
250 struct cflayer *layer; in cfmuxl_ctrlcmd() local
253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { in cfmuxl_ctrlcmd()
255 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { in cfmuxl_ctrlcmd()
[all …]
H A Dcfrfml.c17 #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
34 static void cfrfml_release(struct cflayer *layer) in cfrfml_release() argument
36 struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer); in cfrfml_release()
37 struct cfrfml *rfml = container_obj(&srvl->layer); in cfrfml_release()
56 this->serv.layer.receive = cfrfml_receive; in cfrfml_create()
57 this->serv.layer.transmit = cfrfml_transmit; in cfrfml_create()
65 snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ, in cfrfml_create()
68 return &this->serv.layer; in cfrfml_create()
167 err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt); in cfrfml_receive()
201 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; in cfrfml_transmit_segment()
[all …]
H A Dcffrml.c20 #define container_obj(layr) container_of(layr, struct cffrml, layer)
23 struct cflayer layer; member
46 caif_assert(offsetof(struct cffrml, layer) == 0); in cffrml_create()
48 this->layer.receive = cffrml_receive; in cffrml_create()
49 this->layer.transmit = cffrml_transmit; in cffrml_create()
50 this->layer.ctrlcmd = cffrml_ctrlcmd; in cffrml_create()
51 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid); in cffrml_create()
53 this->layer.id = phyid; in cffrml_create()
57 void cffrml_free(struct cflayer *layer) in cffrml_free() argument
59 struct cffrml *this = container_obj(layer); in cffrml_free()
[all...]
H A Dcfdbgl.c25 caif_assert(offsetof(struct cfsrvl, layer) == 0); in cfdbgl_create()
27 dbg->layer.receive = cfdbgl_receive; in cfdbgl_create()
28 dbg->layer.transmit = cfdbgl_transmit; in cfdbgl_create()
29 snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ, "dbg%d", channel_id); in cfdbgl_create()
30 return &dbg->layer; in cfdbgl_create()
51 info->channel_id = service->layer.id; in cfdbgl_transmit()
H A Dcfveil.c21 #define container_obj(layr) container_of(layr, struct cfsrvl, layer)
31 caif_assert(offsetof(struct cfsrvl, layer) == 0); in cfvei_create()
33 vei->layer.receive = cfvei_receive; in cfvei_create()
34 vei->layer.transmit = cfvei_transmit; in cfvei_create()
35 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ, "vei%d", channel_id); in cfvei_create()
36 return &vei->layer; in cfvei_create()
94 info->channel_id = service->layer.id; in cfvei_transmit()
H A Dcfvidl.c27 caif_assert(offsetof(struct cfsrvl, layer) == 0); in cfvidl_create()
30 vid->layer.receive = cfvidl_receive; in cfvidl_create()
31 vid->layer.transmit = cfvidl_transmit; in cfvidl_create()
32 snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ, "vid1"); in cfvidl_create()
33 return &vid->layer; in cfvidl_create()
62 info->channel_id = service->layer.id; in cfvidl_transmit()
H A Dcfdgml.c32 caif_assert(offsetof(struct cfsrvl, layer) == 0); in cfdgml_create()
34 dgm->layer.receive = cfdgml_receive; in cfdgml_create()
35 dgm->layer.transmit = cfdgml_transmit; in cfdgml_create()
36 snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ, "dgm%d", channel_id); in cfdgml_create()
37 return &dgm->layer; in cfdgml_create()
106 info->channel_id = service->layer.id; in cfdgml_transmit()
H A Dcfutill.c32 caif_assert(offsetof(struct cfsrvl, layer) == 0); in cfutill_create()
34 util->layer.receive = cfutill_receive; in cfutill_create()
35 util->layer.transmit = cfutill_transmit; in cfutill_create()
36 snprintf(util->layer.name, CAIF_LAYER_NAME_SZ, "util1"); in cfutill_create()
37 return &util->layer; in cfutill_create()
96 info->channel_id = service->layer.id; in cfutill_transmit()
/linux/Documentation/networking/caif/
H A Dlinux_caif.rst66 CAIF Core layer implements the CAIF protocol as defined by ST-Ericsson.
68 each layer described in the specification is implemented as a separate layer.
78 - Layered architecture (a la Streams), each layer in the CAIF
80 - Clients must call configuration function to add PHY layer.
81 - Clients must implement CAIF layer to consume/produce
84 Client layer.
100 - CFCNFG CAIF Configuration layer. Configures the CAIF Protocol
104 - CFCTRL CAIF Control layer. Encodes and Decodes control messages
111 - CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual
112 External Interface). This layer encodes/decodes VEI frames.
[all …]
/linux/Documentation/hid/
H A Damd-sfh-hid.rst49 sensor data. The layer, which binds each device (AMD SFH HID driver) identifies the device type and
50 registers with the HID core. Transport layer attaches a constant "struct hid_ll_driver" object with
52 used by HID core to communicate with the device. AMD HID Transport layer implements the synchronous…
56 This layer is responsible to implement HID requests and descriptors. As firmware is OS agnostic, HID
57 client layer fills the HID request structure and descriptors. HID client layer is complex as it is
58 interface between MP2 PCIe layer and HID. HID client layer initializes the MP2 PCIe layer and holds
59 the instance of MP2 layer. It identifies the number of sensors connected using MP2-PCIe layer. Based
61 enumeration of each sensor, client layer fills the HID Descriptor structure and HID input report
65 AMD MP2 PCIe layer
76 interrupt to MP2. The client layer allocates the physical memory and the same is sent to MP2 via
[all …]
/linux/drivers/gpu/drm/arm/display/komeda/
H A Dkomeda_plane.c23 struct komeda_pipeline *pipe = kplane->layer->base.pipeline; in komeda_plane_init_data_flow()
57 komeda_complete_data_flow_cfg(kplane->layer, dflow, fb); in komeda_plane_init_data_flow()
78 struct komeda_layer *layer = kplane->layer; in komeda_plane_atomic_check() local
105 err = komeda_build_layer_split_data_flow(layer, in komeda_plane_atomic_check()
108 err = komeda_build_layer_data_flow(layer, in komeda_plane_atomic_check()
181 u32 layer_type = kplane->layer->layer_type; in komeda_plane_format_mod_supported()
241 struct komeda_layer *layer) in komeda_plane_add() argument
244 struct komeda_component *c = &layer->base; in komeda_plane_add()
255 kplane->layer = layer; in komeda_plane_add()
258 layer->layer_type, &n_formats); in komeda_plane_add()
[all …]
/linux/security/landlock/
H A Daudit.c137 get_hierarchy(const struct landlock_ruleset *const domain, const size_t layer) in get_hierarchy() argument
142 if (WARN_ON_ONCE(layer >= domain->num_layers)) in get_hierarchy()
145 for (i = domain->num_layers - 1; i > layer; i--) { in get_hierarchy()
195 long layer; in get_denied_layer() local
201 layer = __fls(mask); in get_denied_layer()
202 if (layer > youngest_layer) { in get_denied_layer()
203 youngest_layer = layer; in get_denied_layer()
205 } else if (layer == youngest_layer) { in get_denied_layer()
291 const size_t layer = in get_layer_from_deny_masks() local
295 if (layer > youngest_layer) { in get_layer_from_deny_masks()
[all …]
/linux/Documentation/ABI/stable/
H A Dsysfs-transport-srp12 Description: Number of seconds the SCSI layer will wait after a transport
13 layer error has been observed before removing a target port.
21 Description: Number of seconds the SCSI layer will wait after a transport
22 layer error has been observed before failing I/O. Zero means
37 Description: Number of seconds the SCSI layer will wait after a reconnect
51 Description: State of the transport layer used for communication with the
52 remote port. "running" if the transport layer is operational;
53 "blocked" if a transport layer error has been encountered but
/linux/Documentation/userspace-api/media/dvb/
H A Dfrontend.h.rst.exceptions6 # Group layer A-C symbols together
7 replace define DTV_ISDBT_LAYERA_FEC dtv-isdbt-layer-fec
8 replace define DTV_ISDBT_LAYERB_FEC dtv-isdbt-layer-fec
9 replace define DTV_ISDBT_LAYERC_FEC dtv-isdbt-layer-fec
10 replace define DTV_ISDBT_LAYERA_MODULATION dtv-isdbt-layer-modulation
11 replace define DTV_ISDBT_LAYERB_MODULATION dtv-isdbt-layer-modulation
12 replace define DTV_ISDBT_LAYERC_MODULATION dtv-isdbt-layer-modulation
13 replace define DTV_ISDBT_LAYERA_SEGMENT_COUNT dtv-isdbt-layer-segment-count
14 replace define DTV_ISDBT_LAYERB_SEGMENT_COUNT dtv-isdbt-layer-segment-count
15 replace define DTV_ISDBT_LAYERC_SEGMENT_COUNT dtv-isdbt-layer-segment-count
[all …]
/linux/Documentation/driver-api/surface_aggregator/
H A Dinternal.rst63 Lower-level packet transport is implemented in the *packet transport layer
65 infrastructure of the kernel. As the name indicates, this layer deals with
70 Above this sits the *request transport layer (RTL)*. This layer is centered
76 The *controller* layer is building on top of this and essentially decides
81 ``RQID``). This layer basically provides a fundamental interface to the SAM
84 While the controller layer already provides an interface for other kernel
100 The packet transport layer is represented via |ssh_ptl| and is structured
107 managed by the packet transport layer, which is essentially the lowest layer
114 transport layer, as well as a reference to the buffer containing the data to
126 submitted to the packet transport layer, the ``complete()`` callback is
[all …]
/linux/fs/overlayfs/
H A Dparams.c277 enum ovl_opt layer, const char *name, bool upper) in ovl_mount_dir_check() argument
314 if (ctx->lowerdir_all && layer != Opt_lowerdir) in ovl_mount_dir_check()
316 if (ctx->nr_data && layer == Opt_lowerdir_add) in ovl_mount_dir_check()
345 static void ovl_add_layer(struct fs_context *fc, enum ovl_opt layer, in ovl_add_layer() argument
353 switch (layer) { in ovl_add_layer()
379 static inline bool is_upper_layer(enum ovl_opt layer) in is_upper_layer() argument
381 return layer == Opt_upperdir || layer == Opt_workdir; in is_upper_layer()
386 enum ovl_opt layer) in ovl_kern_path() argument
390 switch (layer) { in ovl_kern_path()
412 struct path *layer_path, enum ovl_opt layer) in ovl_do_parse_layer() argument
[all …]

12345678910>>...17