Home
last modified time | relevance | path

Searched full:bulk (Results 1 – 25 of 546) sorted by relevance

12345678910>>...22

/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c13 * (for example, TLS) after last revalidation in a pool or a bulk.
20 #define MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk) MLX5_CRYPTO_DEK_CALC_FREED(bulk) argument
22 #define MLX5_CRYPTO_DEK_BULK_IDLE(bulk) \ argument
23 ({ typeof(bulk) _bulk = (bulk); \
59 int num_deks; /* the total number of keys in a bulk */
80 struct mlx5_crypto_dek_bulk *bulk; member
182 mlx5_core_dbg(mdev, "DEK objects created, bulk=%d, obj_id=%d\n", in mlx5_crypto_create_dek_bulk()
292 struct mlx5_crypto_dek_bulk *bulk; in mlx5_crypto_dek_bulk_create() local
296 bulk = kzalloc(sizeof(*bulk), GFP_KERNEL); in mlx5_crypto_dek_bulk_create()
297 if (!bulk) in mlx5_crypto_dek_bulk_create()
[all …]
/linux/include/net/libeth/
H A Dxsk.h19 * @bq: XDP Tx bulk to queue the head frag to
27 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xsk_tx_queue_head()
36 bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI; in libeth_xsk_tx_queue_head()
43 * @bq: XDP Tx bulk to queue the frag to
49 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xsk_tx_queue_frag()
57 * @bq: XDP Tx bulk to queue the frame to
59 * @flush_bulk: driver callback to flush the bulk to the HW queue
96 bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST; in libeth_xsk_tx_queue_bulk()
103 * @frm: XDP Tx frame from the bulk
140 * libeth_xsk_tx_flush_bulk - wrapper to define flush of XSk ``XDP_TX`` bulk
[all …]
H A Dxdp.h278 * @LIBETH_XDP_TX_BULK: one bulk size at which it will be flushed to the queue
368 * struct libeth_xdp_tx_bulk - XDP Tx frame bulk for bulk sending
373 * @count: current number of frames in @bulk
374 * @bulk: array of queued frames for bulk Tx
376 * All XDP Tx operations except XSk xmit queue each frame to the bulk first
377 * and flush it when @count reaches the array end. Bulk is always placed on
378 * the stack for performance. One bulk element contains all the data necessary
381 * to &libeth_xdp_tx_frame as they are compatible and the bulk structur
391 struct libeth_xdp_tx_frame bulk[LIBETH_XDP_TX_BULK]; global() member
517 libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame * bulk,void * xdpsq,u32 n,bool unroll,u64 priv,u32 (* prep)(void * xdpsq,struct libeth_xdpsq * sq),struct libeth_xdp_tx_desc (* fill)(struct libeth_xdp_tx_frame frm,u32 i,const struct libeth_xdpsq * sq,u64 priv),void (* xmit)(struct libeth_xdp_tx_desc desc,u32 i,const struct libeth_xdpsq * sq,u64 priv)) libeth_xdp_tx_xmit_bulk() argument
1771 __libeth_xdp_complete_tx(struct libeth_sqe * sqe,struct libeth_cq_pp * cp,typeof(libeth_xdp_return_buff_bulk) bulk,typeof(libeth_xsk_buff_free_slow) xsk) __libeth_xdp_complete_tx() argument
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dfs_counters.c41 /* Max number of counters to query in bulk read is 32K */
105 * - bulk query (single thread workqueue context)
107 * around the same time as the current bulk cmd.
109 * destroyed during a bulk query command.
128 /* Start new bulk query. */ in mlx5_fc_stats_query_all_counters()
129 /* First id must be aligned to 4 when using bulk query. */ in mlx5_fc_stats_query_all_counters()
139 mlx5_core_err(dev, "Error doing bulk query: %d\n", err); in mlx5_fc_stats_query_all_counters()
145 /* Do not update counters added after bulk query was started. */ in mlx5_fc_stats_query_all_counters()
166 if (counter->bulk) in mlx5_fc_release()
183 "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n", in mlx5_fc_stats_bulk_query_buf_realloc()
[all …]
/linux/include/linux/usb/
H A Dserial.h40 * @bulk_in_buffer: pointer to the bulk in buffer for this port.
42 * @read_urb: pointer to the bulk in struct urb for this port.
43 * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
45 * @bulk_in_buffers: pointers to the bulk in buffers for this port
46 * @read_urbs: pointers to the bulk in urbs for this port
47 * @read_urbs_free: status bitmap the for bulk in urbs
48 * @bulk_out_buffer: pointer to the bulk out buffer for this port.
50 * @write_urb: pointer to the bulk out struct urb for this port.
52 * @bulk_out_buffers: pointers to the bulk out buffers for this port
53 * @write_urbs: pointers to the bulk out urbs for this port
[all …]
/linux/drivers/media/usb/dvb-usb/
H A Ddibusb.h25 * bulk msg to/from endpoint 0x01
36 * bulk write: 0x02 ((7bit i2c_addr << 1) | 0x01) register_bytes length_word
37 * bulk read: byte_buffer (length_word bytes)
43 * bulk write: 0x03 (7bit i2c_addr << 1) register_bytes value_bytes
49 * bulk write: 0x04
50 * bulk read: byte_buffer (5 bytes)
59 * bulk write: 0x05 mode_byte
/linux/Documentation/w1/masters/
H A Dds2490.rst49 - The ds2490 specification doesn't cover short bulk in reads in
51 available, the bulk read will return an error and the hardware will
52 clear the entire bulk in buffer. It would be possible to read the
63 most of the time one of the bulk out or in, and usually the bulk in
64 would fail. qemu sets a 50ms timeout and the bulk in would timeout
65 even when the status shows data available. A bulk out write would
/linux/drivers/staging/vc04_services/interface/vchiq_arm/
H A Dvchiq_core.c458 struct vchiq_header *header, struct vchiq_bulk *bulk) in make_service_callback() argument
465 * If a bulk transfer is in progress, pass bulk->cb_*data to the in make_service_callback()
468 if (bulk) { in make_service_callback()
469 cb_data = bulk->cb_data; in make_service_callback()
470 cb_userdata = bulk->cb_userdata; in make_service_callback()
1308 get_bulk_reason(struct vchiq_bulk *bulk) in get_bulk_reason() argument
1310 if (bulk->dir == VCHIQ_BULK_TRANSMIT) { in get_bulk_reason()
1311 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) in get_bulk_reason()
1317 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) in get_bulk_reason()
1324 struct vchiq_bulk *bulk) in service_notify_bulk() argument
[all …]
H A Dvchiq_core.h130 int local_insert; /* Where to insert the next local bulk */
131 int remote_insert; /* Where to insert the next remote bulk (master) */
132 int process; /* Bulk to transfer next */
133 int remote_notify; /* Bulk to notify the remote client of next (mstr) */
134 int remove; /* Bulk to notify the local client of, and remove, next */
445 struct vchiq_bulk *bulk; member
453 * is better to use a bulk transfer
506 struct vchiq_bulk *bulk);
510 struct vchiq_bulk *bulk);
/linux/Documentation/ABI/testing/
H A Dsysfs-driver-w1_therm75 * If a bulk read has been triggered, it will directly
76 return the temperature computed when the bulk read
80 * If no bulk read has been triggered, it will trigger
115 (RW) trigger a bulk read conversion. read the status
124 no bulk operation. Reading temperature will
128 'trigger': trigger a bulk read on all supporting
131 Note that if a bulk read is sent but one sensor is not read
134 of the bulk read command (not the current temperature).
/linux/drivers/net/ethernet/intel/libeth/
H A Dtx.c12 DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk);
27 __libeth_xdp_complete_tx(sqe, cp, static_call(bulk), in libeth_tx_complete_any()
38 static_call_update(bulk, ops ? ops->bulk : NULL); in libeth_attach_xdp()
H A Dxdp.c115 * @bq: XDP Tx frame bulk
116 * @sent: number of frames sent successfully (from this bulk)
121 * the Tx bulk to try again later.
126 const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent]; in libeth_xdp_tx_exception()
133 memmove(bq->bulk, pos, left * sizeof(*bq->bulk)); in libeth_xdp_tx_exception()
284 * @bq: XDP Tx bulk
325 * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
327 * @bq: XDP frame bulk to store the buffer
431 .bulk = libeth_xdp_return_buff_bulk,
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dfs_hws_pools.c128 pr_bulk->prs_data[i].bulk = pr_bulk; in mlx5_fs_hws_pr_bulk_create()
152 mlx5_core_err(dev, "Freeing bulk before all reformats were released\n"); in mlx5_fs_hws_pr_bulk_destroy()
225 struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk; in mlx5_fs_hws_pr_pool_release_pr()
237 return pr_data->bulk->hws_action; in mlx5_fs_hws_pr_get_action()
282 mh_bulk->mhs_data[i].bulk = mh_bulk; in mlx5_fs_hws_mh_bulk_create()
307 mlx5_core_err(dev, "Freeing bulk before all modify header were released\n"); in mlx5_fs_hws_mh_bulk_destroy()
374 struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk; in mlx5_fs_hws_mh_pool_release_mh()
409 struct mlx5_fc_bulk *fc_bulk = counter->bulk; in mlx5_fc_get_hws_action()
425 mlx5_fs_put_hws_action(&counter->bulk->hws_data); in mlx5_fc_put_hws_action()
/linux/Documentation/driver-api/usb/
H A Dbulk-streams.rst1 USB bulk streams
7 Bulk endpoint streams were added in the USB 3.0 specification. Streams allow a
8 device driver to overload a bulk endpoint so that multiple transfers can be
41 ID for the bulk IN and OUT endpoints used in a Bi-directional command sequence.
46 declares how many stream IDs it can support, and each bulk endpoint on a
/linux/Documentation/usb/
H A Dehci.rst58 At this writing the driver should comfortably handle all control, bulk,
125 and bulk transfers. Shows each active qh and the qtds
160 Bulk transfers are most often used where throughput is an issue. It's
161 good to keep in mind that bulk transfers are always in 512 byte packets,
165 So more than 50 MByte/sec is available for bulk transfers, when both
195 you issue a control or bulk request you can often expect to learn that
203 or using bulk queuing if a series of small requests needs to be issued.
213 I/O be efficient, it's better to just queue up several (bulk) requests
H A Dgadget_serial.rst89 with bulk in and bulk out endpoints, and it is treated similarly
93 or any driver that can talk to a device with a simple bulk in/out
122 To load it as a vendor specific bulk in/out device, do this::
150 serial is loaded as a bulk in/out device, you will want to use the
209 E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
210 E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
239 E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
240 E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
H A Dmass-storage.rst15 Its requirements are modest; only a bulk-in and a bulk-out endpoint
21 a single memory/DMA buffer will be usable for bulk-in and bulk-out
140 Specifies whether the gadget is allowed to halt bulk endpoints.
242 supported. MSG always uses SCSI protocol with bulk only
/linux/tools/usb/
H A Dhcd-tests.sh7 # - out, in: out needs 'bulk sink' firmware, in needs 'bulk src'
9 # - halt: needs bulk sink+src, tests halt set/clear from host
10 # - unlink: needs bulk sink and/or src, test HCD unlink processing
17 # this default provides a steady test load for a bulk device
37 # NOTE: the 'in' and 'out' cases are usually bulk, but can be
/linux/drivers/usb/misc/
H A Dusblcd.c48 bulk in endpoint */
50 bulk out endpoint */
140 /* do a blocking bulk read to get data from the device */ in lcd_read()
208 "nonzero write bulk status received: %d\n", status); in lcd_write_bulk_callback()
270 /* send the data out the bulk port */ in lcd_write()
345 /* use only the first bulk-in and bulk-out endpoints */ in lcd_probe()
350 "Could not find both bulk-in and bulk-out endpoints\n"); in lcd_probe()
/linux/fs/xfs/libxfs/
H A Dxfs_btree_staging.c25 * use to construct a new btree index using the btree bulk loader code. The
26 * bulk loading code uses the staging btree cursor to abstract the details of
32 * construction by the bulk loading code. Staging btree cursors point to fake
36 * the _stage_cursor function for a specific btree type. When bulk loading is
42 * Bulk Loading for AG Btrees
96 * Bulk Loading for Inode-Rooted Btrees
165 * Bulk Loading of Staged Btrees
177 * "Bulk Loading for AG Btrees" and "Bulk Loading for Inode-Rooted Btrees" for
189 * save them for later use by ->claim_block(). Bulk loading requires all
234 * The btree blocks will be written to disk when bulk loading is finished.
[all …]
/linux/drivers/usb/serial/
H A Dipw.c90 #define IPW_SIO_RXCTL 0x00 /* control bulk rx channel transmissions, value=1/0 (on/off) */
159 /* reset the bulk pipes */ in ipw_open()
164 dev_dbg(dev, "%s: setting up bulk read callback\n", __func__); in ipw_open()
167 /*--3: Tell the modem to open the floodgates on the rx bulk channel */ in ipw_open()
178 dev_err(dev, "Enabling bulk RxRead failed (error = %d)\n", result); in ipw_open()
269 /* send RXBULK_off (tell modem to stop transmitting bulk data on in ipw_close()
281 dev_err(dev, "Disabling bulk RxRead failed (error = %d)\n", result); in ipw_close()
/linux/drivers/usb/
H A Dusb-skeleton.c57 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
58 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
174 "%s - nonzero write bulk status received: %d\n", in skel_read_bulk_callback()
205 /* submit bulk in urb, which means no data to deliver */ in skel_do_read_io()
345 "%s - nonzero write bulk status received: %d\n", in skel_write_bulk_callback()
436 /* send the data out the bulk port */ in skel_write()
511 /* use only the first bulk-in and bulk-out endpoints */ in skel_probe()
516 "Could not find both bulk-in and bulk-out endpoints\n"); in skel_probe()
/linux/drivers/media/usb/em28xx/
H A Dem28xx.h187 * bulk transfers: transfer buffer size = packet size * packet multiplier
188 * USB 2.0 spec says bulk packet size is always 512 bytes
218 * @urb: urb for isoc/bulk transfers
219 * @buf: transfer buffers for isoc/bulk transfer
232 * @analog_bufs: isoc/bulk transfer buffers for analog mode
233 * @digital_bufs: isoc/bulk transfer buffers for digital mode
723 u8 analog_ep_bulk; // address of bulk endpoint for analog
725 u8 dvb_ep_bulk_ts2; // address of bulk endpoint for DVB TS2
727 u8 dvb_ep_bulk; // address of bulk endpoint for DVB
734 unsigned int analog_xfer_bulk:1; // use bulk instead of isoc
[all …]
/linux/drivers/media/usb/dvb-usb-v2/
H A Dusb_urb.c8 * BULK and ISOC USB data transfers in a generic way.
28 __func__, ptype == PIPE_ISOCHRONOUS ? "isoc" : "bulk", in usb_urb_complete()
155 stream->props.u.bulk.buffersize, in usb_urb_alloc_bulk_urbs()
260 buf_size = stream->props.u.bulk.buffersize; in usb_urb_reconfig()
281 props->u.bulk.buffersize == in usb_urb_reconfig()
282 stream->props.u.bulk.buffersize) in usb_urb_reconfig()
326 stream->props.u.bulk.buffersize); in usb_urb_initv2()
/linux/tools/testing/shared/
H A Dlinux.c139 pr_debug("Bulk free %p[0-%zu]\n", list, size - 1); in kmem_cache_free_locked()
163 pr_debug("Bulk alloc %zu\n", size); in kmem_cache_shrink()
338 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
347 * Testing the bulk allocators without aligned kmem_cache to force the
348 * bulk alloc/free to reuse

12345678910>>...22