Home
last modified time | relevance | path

Searched refs:bulk (Results 1 – 25 of 101) sorted by relevance

12345

/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c20 #define MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk) MLX5_CRYPTO_DEK_CALC_FREED(bulk) argument
22 #define MLX5_CRYPTO_DEK_BULK_IDLE(bulk) \ argument
23 ({ typeof(bulk) _bulk = (bulk); \
80 struct mlx5_crypto_dek_bulk *bulk; member
292 struct mlx5_crypto_dek_bulk *bulk; in mlx5_crypto_dek_bulk_create() local
296 bulk = kzalloc(sizeof(*bulk), GFP_KERNEL); in mlx5_crypto_dek_bulk_create()
297 if (!bulk) in mlx5_crypto_dek_bulk_create()
301 bulk->need_sync = bitmap_zalloc(num_deks, GFP_KERNEL); in mlx5_crypto_dek_bulk_create()
302 if (!bulk->need_sync) { in mlx5_crypto_dek_bulk_create()
307 bulk->in_use = bitmap_zalloc(num_deks, GFP_KERNEL); in mlx5_crypto_dek_bulk_create()
[all …]
/linux/drivers/gpu/drm/ttm/
H A Dttm_resource.c46 cursor->bulk = NULL; in ttm_resource_cursor_clear_bulk()
51 static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk, in ttm_resource_cursor_move_bulk_tail() argument
58 if (WARN_ON_ONCE(bulk != cursor->bulk)) { in ttm_resource_cursor_move_bulk_tail()
63 pos = &bulk->pos[cursor->mem_type][cursor->priority]; in ttm_resource_cursor_move_bulk_tail()
70 static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk) in ttm_bulk_move_adjust_cursors() argument
74 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) in ttm_bulk_move_adjust_cursors()
75 ttm_resource_cursor_move_bulk_tail(bulk, cursor); in ttm_bulk_move_adjust_cursors()
79 static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk) in ttm_bulk_move_drop_cursors() argument
83 list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link) in ttm_bulk_move_drop_cursors()
125 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) in ttm_lru_bulk_move_init() argument
[all …]
/linux/drivers/platform/raspberrypi/vchiq-interface/
H A DTESTING49 Testing bulk transfer for alignment.
50 Testing bulk transfer at PAGE_SIZE.
61 vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us
62 vchi bulk (size 0, 0 oneway) -> 230.000000us
65 vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us
66 vchi bulk (size 0, 0 oneway) -> 266.000000us
68 vchi bulk (size 0, 0 oneway) -> 456.000000us
70 vchi bulk (size 0, 0 oneway) -> 640.000000us
72 vchi bulk (size 0, 0 oneway) -> 2309.000000us
78 vchi bulk (size 0, 0 oneway) -> nanus
[all …]
H A Dvchiq_core.c458 struct vchiq_header *header, struct vchiq_bulk *bulk) in make_service_callback() argument
468 if (bulk) { in make_service_callback()
469 cb_data = bulk->cb_data; in make_service_callback()
470 cb_userdata = bulk->cb_userdata; in make_service_callback()
1308 get_bulk_reason(struct vchiq_bulk *bulk) in get_bulk_reason() argument
1310 if (bulk->dir == VCHIQ_BULK_TRANSMIT) { in get_bulk_reason()
1311 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) in get_bulk_reason()
1317 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) in get_bulk_reason()
1324 struct vchiq_bulk *bulk) in service_notify_bulk() argument
1326 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) { in service_notify_bulk()
[all …]
H A Dvchiq_arm.c604 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; in vchiq_blocking_bulk_transfer() local
606 if (bulk) { in vchiq_blocking_bulk_transfer()
609 if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) || in vchiq_blocking_bulk_transfer()
610 (bulk->size != bulk_params->size)) { in vchiq_blocking_bulk_transfer()
616 bulk->waiter = NULL; in vchiq_blocking_bulk_transfer()
629 if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { in vchiq_blocking_bulk_transfer()
630 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; in vchiq_blocking_bulk_transfer() local
632 if (bulk) { in vchiq_blocking_bulk_transfer()
635 bulk->waiter = NULL; in vchiq_blocking_bulk_transfer()
/linux/drivers/platform/raspberrypi/vchiq-mmal/
H A Dmmal-vchiq.c151 } bulk; /* bulk data */ member
255 container_of(work, struct mmal_msg_context, u.bulk.work); in buffer_work_cb()
256 struct mmal_buffer *buffer = msg_context->u.bulk.buffer; in buffer_work_cb()
264 buffer->length = msg_context->u.bulk.buffer_used; in buffer_work_cb()
265 buffer->mmal_flags = msg_context->u.bulk.mmal_flags; in buffer_work_cb()
266 buffer->dts = msg_context->u.bulk.dts; in buffer_work_cb()
267 buffer->pts = msg_context->u.bulk.pts; in buffer_work_cb()
269 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu); in buffer_work_cb()
271 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance, in buffer_work_cb()
272 msg_context->u.bulk.port, in buffer_work_cb()
[all …]
/linux/drivers/net/ethernet/intel/libeth/
H A Dtx.c12 DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk);
27 __libeth_xdp_complete_tx(sqe, cp, static_call(bulk), in libeth_tx_complete_any()
38 static_call_update(bulk, ops ? ops->bulk : NULL); in libeth_attach_xdp()
H A Dxdp.c126 const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent]; in libeth_xdp_tx_exception()
133 memmove(bq->bulk, pos, left * sizeof(*bq->bulk)); in libeth_xdp_tx_exception()
431 .bulk = libeth_xdp_return_buff_bulk,
H A Dpriv.h25 void (*bulk)(const struct skb_shared_info *sinfo, member
/linux/include/net/libeth/
H A Dxsk.h27 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xsk_tx_queue_head()
36 bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI; in libeth_xsk_tx_queue_head()
49 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xsk_tx_queue_frag()
96 bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST; in libeth_xsk_tx_queue_bulk()
292 const struct libeth_xdp_tx_frame *bulk; in libeth_xsk_xmit_do_bulk() local
301 bulk = container_of(&pool->tx_descs[0], typeof(*bulk), desc); in libeth_xsk_xmit_do_bulk()
303 libeth_xdp_tx_xmit_bulk(bulk, xdpsq, n, true, in libeth_xsk_xmit_do_bulk()
H A Dxdp.h391 struct libeth_xdp_tx_frame bulk[LIBETH_XDP_TX_BULK]; member
517 libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame *bulk, void *xdpsq, in libeth_xdp_tx_xmit_bulk() argument
551 xmit(fill(bulk[i + j], base + j, &sq, priv), in libeth_xdp_tx_xmit_bulk()
558 xmit(fill(bulk[i], ntu + i - off, &sq, priv), in libeth_xdp_tx_xmit_bulk()
603 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xdp_tx_queue_head()
612 bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_MULTI; in libeth_xdp_tx_queue_head()
625 bq->bulk[bq->count++].frag = *frag; in libeth_xdp_tx_queue_frag()
669 bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_LAST; in libeth_xdp_tx_queue_bulk()
783 sent = libeth_xdp_tx_xmit_bulk(bq->bulk, bq->xdpsq, in __libeth_xdp_tx_flush_bulk()
886 bq->bulk[bq->count++] = (typeof(*bq->bulk)){ in libeth_xdp_xmit_queue_head()
[all …]
/linux/Documentation/w1/masters/
H A Dds2490.rst49 - The ds2490 specification doesn't cover short bulk in reads in
51 available, the bulk read will return an error and the hardware will
52 clear the entire bulk in buffer. It would be possible to read the
63 most of the time one of the bulk out or in, and usually the bulk in
64 would fail. qemu sets a 50ms timeout and the bulk in would timeout
65 even when the status shows data available. A bulk out write would
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dfs_counters.c166 if (counter->bulk) in mlx5_fc_release()
417 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, in mlx5_fc_init() argument
420 counter->bulk = bulk; in mlx5_fc_init()
426 return counter->bulk->base_id; in mlx5_fc_get_base_id()
526 struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk; in mlx5_fc_pool_release_counter()
531 pool_index.index = fc->id - fc->bulk->base_id; in mlx5_fc_pool_release_counter()
567 counter->bulk = fc_bulk; in mlx5_fc_local_create()
575 kfree(counter->bulk); in mlx5_fc_local_destroy()
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dfs_hws_pools.c128 pr_bulk->prs_data[i].bulk = pr_bulk; in mlx5_fs_hws_pr_bulk_create()
225 struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk; in mlx5_fs_hws_pr_pool_release_pr()
237 return pr_data->bulk->hws_action; in mlx5_fs_hws_pr_get_action()
282 mh_bulk->mhs_data[i].bulk = mh_bulk; in mlx5_fs_hws_mh_bulk_create()
374 struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk; in mlx5_fs_hws_mh_pool_release_mh()
409 struct mlx5_fc_bulk *fc_bulk = counter->bulk; in mlx5_fc_get_hws_action()
425 mlx5_fs_put_hws_action(&counter->bulk->hws_data); in mlx5_fc_put_hws_action()
/linux/Documentation/ABI/testing/
H A Dsysfs-driver-w1_therm75 * If a bulk read has been triggered, it will directly
76 return the temperature computed when the bulk read
80 * If no bulk read has been triggered, it will trigger
115 (RW) trigger a bulk read conversion. read the status
124 no bulk operation. Reading temperature will
128 'trigger': trigger a bulk read on all supporting
131 Note that if a bulk read is sent but one sensor is not read
134 of the bulk read command (not the current temperature).
/linux/drivers/gpu/drm/msm/
H A Dmsm_mdss.c372 struct clk_bulk_data *bulk; in mdp5_mdss_parse_clock() local
379 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); in mdp5_mdss_parse_clock()
380 if (!bulk) in mdp5_mdss_parse_clock()
383 bulk[num_clocks++].id = "iface"; in mdp5_mdss_parse_clock()
384 bulk[num_clocks++].id = "bus"; in mdp5_mdss_parse_clock()
385 bulk[num_clocks++].id = "vsync"; in mdp5_mdss_parse_clock()
387 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); in mdp5_mdss_parse_clock()
391 *clocks = bulk; in mdp5_mdss_parse_clock()
/linux/include/drm/ttm/
H A Dttm_resource.h342 struct ttm_lru_bulk_move *bulk; member
442 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
443 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
445 struct ttm_lru_bulk_move *bulk);
/linux/drivers/remoteproc/
H A Dqcom_wcnss.c447 struct regulator_bulk_data *bulk; in wcnss_init_regulators() local
465 bulk = devm_kcalloc(wcnss->dev, in wcnss_init_regulators()
468 if (!bulk) in wcnss_init_regulators()
472 bulk[i].supply = info[i].name; in wcnss_init_regulators()
474 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk); in wcnss_init_regulators()
480 regulator_set_voltage(bulk[i].consumer, in wcnss_init_regulators()
485 regulator_set_load(bulk[i].consumer, info[i].load_uA); in wcnss_init_regulators()
488 wcnss->vregs = bulk; in wcnss_init_regulators()
/linux/drivers/media/usb/dvb-usb-v2/
H A Dusb_urb.c155 stream->props.u.bulk.buffersize, in usb_urb_alloc_bulk_urbs()
260 buf_size = stream->props.u.bulk.buffersize; in usb_urb_reconfig()
281 props->u.bulk.buffersize == in usb_urb_reconfig()
282 stream->props.u.bulk.buffersize) in usb_urb_reconfig()
326 stream->props.u.bulk.buffersize); in usb_urb_initv2()
/linux/Documentation/driver-api/usb/
H A Dbulk-streams.rst1 USB bulk streams
8 device driver to overload a bulk endpoint so that multiple transfers can be
41 ID for the bulk IN and OUT endpoints used in a Bi-directional command sequence.
46 declares how many stream IDs it can support, and each bulk endpoint on a
/linux/net/core/
H A Dpage_pool.c593 const int bulk = PP_ALLOC_CACHE_REFILL; in __page_pool_alloc_netmems_slow()
605 /* Don't support bulk alloc for high-order pages */ in __page_pool_alloc_netmems_slow()
614 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_netmems_slow()
616 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk, in __page_pool_alloc_netmems_slow()
924 netmem_ref *bulk, in page_pool_recycle_ring_bulk()
934 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) { in page_pool_recycle_ring_bulk()
953 page_pool_return_netmem(pool, bulk[i]);
983 netmem_ref bulk[XDP_BULK_QUEUE_SIZE]; in page_pool_put_netmem_bulk()
1009 /* Approved for bulk recycling in ptr_ring cache */ in page_pool_put_netmem_bulk()
1011 bulk[bulk_le in page_pool_put_netmem_bulk()
589 const int bulk = PP_ALLOC_CACHE_REFILL; __page_pool_alloc_netmems_slow() local
920 page_pool_recycle_ring_bulk(struct page_pool * pool,netmem_ref * bulk,u32 bulk_len) page_pool_recycle_ring_bulk() argument
979 netmem_ref bulk[XDP_BULK_QUEUE_SIZE]; page_pool_put_netmem_bulk() local
[all...]
/linux/lib/
H A Dtest_objpool.c72 int bulk[2]; /* for thread and irq */ member
207 item->bulk[0] = test->bulk_normal; in ot_init_cpu_item()
208 item->bulk[1] = test->bulk_irq; in ot_init_cpu_item()
329 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_sync()
506 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_async()
/linux/Documentation/usb/
H A Dehci.rst58 At this writing the driver should comfortably handle all control, bulk,
125 and bulk transfers. Shows each active qh and the qtds
161 good to keep in mind that bulk transfers are always in 512 byte packets,
165 So more than 50 MByte/sec is available for bulk transfers, when both
195 you issue a control or bulk request you can often expect to learn that
203 or using bulk queuing if a series of small requests needs to be issued.
213 I/O be efficient, it's better to just queue up several (bulk) requests
/linux/include/linux/raspberrypi/
H A Dvchiq_core.h495 struct vchiq_bulk *bulk; member
556 struct vchiq_bulk *bulk);
560 struct vchiq_bulk *bulk);
/linux/drivers/interconnect/
H A DMakefile4 icc-core-objs := core.o bulk.o debugfs-client.o

12345