| /linux/sound/soc/intel/catpt/ |
| H A D | messages.c | 17 struct catpt_ipc_msg request = {{0}}, reply; in catpt_ipc_get_fw_version() local 20 request.header = msg.val; in catpt_ipc_get_fw_version() 24 ret = catpt_dsp_send_msg(cdev, request, &reply); in catpt_ipc_get_fw_version() 58 struct catpt_ipc_msg request, reply; in catpt_ipc_alloc_stream() local 91 request.header = msg.val; in catpt_ipc_alloc_stream() 92 request.size = size; in catpt_ipc_alloc_stream() 93 request.data = payload; in catpt_ipc_alloc_stream() 97 ret = catpt_dsp_send_msg(cdev, request, &reply); in catpt_ipc_alloc_stream() 109 struct catpt_ipc_msg request; in catpt_ipc_free_stream() local 112 request.header = msg.val; in catpt_ipc_free_stream() [all …]
|
| /linux/include/linux/ |
| H A D | blk-mq.h | 25 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); 103 struct request { struct 125 struct request *rq_next; argument 222 static inline unsigned long req_phys_gap_mask(const struct request *req) in req_phys_gap_mask() argument 227 static inline enum req_op req_op(const struct request *req) in req_op() 232 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() 237 static inline unsigned short req_get_ioprio(struct request *req) in req_get_ioprio() 260 static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) in rq_list_add_tail() 270 static inline void rq_list_add_head(struct rq_list *rl, struct request *rq) in rq_list_add_head() 278 static inline struct request *rq_list_pop(struct rq_list *rl) in rq_list_pop() [all …]
|
| /linux/drivers/usb/musb/ |
| H A D | musb_gadget.c | 33 static inline void map_dma_buffer(struct musb_request *request, in map_dma_buffer() argument 39 request->map_state = UN_MAPPED; in map_dma_buffer() 50 musb_ep->packet_sz, request->request.buf, in map_dma_buffer() 51 request->request.length); in map_dma_buffer() 55 if (request->request.dma == DMA_ADDR_INVALID) { in map_dma_buffer() 61 request->request.buf, in map_dma_buffer() 62 request->request.length, in map_dma_buffer() 63 request->tx in map_dma_buffer() 70 request->request.dma = dma_addr; in map_dma_buffer() 71 request->map_state = MUSB_MAPPED; in map_dma_buffer() [all …]
|
| /linux/drivers/md/dm-vdo/indexer/ |
| H A D | index.c | 89 struct uds_request *request; in launch_zone_message() local 91 result = vdo_allocate(1, struct uds_request, __func__, &request); in launch_zone_message() 95 request->index = index; in launch_zone_message() 96 request->unbatched = true; in launch_zone_message() 97 request->zone_number = zone; in launch_zone_message() 98 request->zone_message = message; in launch_zone_message() 100 uds_enqueue_request(request, STAGE_MESSAGE); in launch_zone_message() 124 static u64 triage_index_request(struct uds_index *index, struct uds_request *request) in triage_index_request() argument 130 &request->record_name); in triage_index_request() 134 zone = index->zones[request->zone_number]; in triage_index_request() [all …]
|
| H A D | funnel-requestqueue.c | 97 struct uds_request *request = poll_queues(queue); in dequeue_request() local 99 if (request != NULL) { in dequeue_request() 100 *request_ptr = request; in dequeue_request() 116 unsigned long timeout, struct uds_request **request, in wait_for_request() argument 121 (dequeue_request(queue, request, waited) || in wait_for_request() 127 dequeue_request(queue, request, waited), in wait_for_request() 134 struct uds_request *request = NULL; in request_queue_worker() local 141 wait_for_request(queue, dormant, time_batch, &request, &waited); in request_queue_worker() 142 if (likely(request != NULL)) { in request_queue_worker() 144 queue->processor(request); in request_queue_worker() [all …]
|
| /linux/block/ |
| H A D | blk-crypto-internal.h | 32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 37 static inline bool bio_crypt_ctx_back_mergeable(struct request *req, in bio_crypt_ctx_back_mergeable() 44 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, in bio_crypt_ctx_front_mergeable() 51 static inline bool bio_crypt_ctx_merge_rq(struct request *req, in bio_crypt_ctx_merge_rq() 52 struct request *next) in bio_crypt_ctx_merge_rq() 58 static inline void blk_crypto_rq_set_defaults(struct request *rq) in blk_crypto_rq_set_defaults() 64 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) in blk_crypto_rq_is_encrypted() 69 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) in blk_crypto_rq_has_keyslot() 100 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, in bio_crypt_rq_ctx_compatible() 106 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, in bio_crypt_ctx_front_mergeable() [all …]
|
| /linux/drivers/platform/chrome/wilco_ec/ |
| H A D | keyboard_leds.c | 56 struct wilco_keyboard_leds_msg *request, in send_kbbl_msg() argument 64 msg.request_data = request; in send_kbbl_msg() 65 msg.request_size = sizeof(*request); in send_kbbl_msg() 81 struct wilco_keyboard_leds_msg request; in set_kbbl() local 85 memset(&request, 0, sizeof(request)); in set_kbbl() 86 request.command = WILCO_EC_COMMAND_KBBL; in set_kbbl() 87 request.subcmd = WILCO_KBBL_SUBCMD_SET_STATE; in set_kbbl() 88 request.mode = WILCO_KBBL_MODE_FLAG_PWM; in set_kbbl() 89 request.percent = brightness; in set_kbbl() 91 ret = send_kbbl_msg(ec, &request, &response); in set_kbbl() [all …]
|
| /linux/net/ethtool/ |
| H A D | eeprom.c | 29 static int fallback_set_params(struct eeprom_req_info *request, in fallback_set_params() argument 33 u32 offset = request->offset; in fallback_set_params() 34 u32 length = request->length; in fallback_set_params() 36 if (request->page) in fallback_set_params() 37 offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset; in fallback_set_params() 40 request->i2c_address == 0x51) in fallback_set_params() 53 static int eeprom_fallback(struct eeprom_req_info *request, in eeprom_fallback() argument 67 err = fallback_set_params(request, &modinfo, &eeprom); in eeprom_fallback() 114 struct eeprom_req_info *request = MODULE_EEPROM_REQINFO(req_base); in eeprom_prepare_data() local 119 page_data.offset = request->offset; in eeprom_prepare_data() [all …]
|
| /linux/drivers/staging/greybus/ |
| H A D | gpio.c | 64 struct gb_gpio_activate_request request; in gb_gpio_activate_operation() local 72 request.which = which; in gb_gpio_activate_operation() 74 &request, sizeof(request), NULL, 0); in gb_gpio_activate_operation() 90 struct gb_gpio_deactivate_request request; in gb_gpio_deactivate_operation() local 93 request.which = which; in gb_gpio_deactivate_operation() 95 &request, sizeof(request), NULL, 0); in gb_gpio_deactivate_operation() 111 struct gb_gpio_get_direction_request request; in gb_gpio_get_direction_operation() local 116 request.which = which; in gb_gpio_get_direction_operation() 118 &request, sizeof(request), in gb_gpio_get_direction_operation() 135 struct gb_gpio_direction_in_request request; in gb_gpio_direction_in_operation() local [all …]
|
| H A D | pwm.c | 47 struct gb_pwm_activate_request request; in gb_pwm_activate_operation() local 51 request.which = which; in gb_pwm_activate_operation() 59 &request, sizeof(request), NULL, 0); in gb_pwm_activate_operation() 69 struct gb_pwm_deactivate_request request; in gb_pwm_deactivate_operation() local 73 request.which = which; in gb_pwm_deactivate_operation() 81 &request, sizeof(request), NULL, 0); in gb_pwm_deactivate_operation() 92 struct gb_pwm_config_request request; in gb_pwm_config_operation() local 96 request.which = which; in gb_pwm_config_operation() 97 request.duty = cpu_to_le32(duty); in gb_pwm_config_operation() 98 request.period = cpu_to_le32(period); in gb_pwm_config_operation() [all …]
|
| H A D | fw-management.c | 138 struct gb_fw_mgmt_load_and_validate_fw_request request; in fw_mgmt_load_and_validate_operation() local 148 request.load_method = load_method; in fw_mgmt_load_and_validate_operation() 150 ret = strscpy_pad(request.firmware_tag, tag); in fw_mgmt_load_and_validate_operation() 154 request.firmware_tag); in fw_mgmt_load_and_validate_operation() 168 request.request_id = ret; in fw_mgmt_load_and_validate_operation() 171 GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request, in fw_mgmt_load_and_validate_operation() 172 sizeof(request), NULL, 0); in fw_mgmt_load_and_validate_operation() 189 struct gb_fw_mgmt_loaded_fw_request *request; in fw_mgmt_interface_fw_loaded_operation() local 198 if (op->request->payload_size != sizeof(*request)) { in fw_mgmt_interface_fw_loaded_operation() 200 op->request->payload_size, sizeof(*request)); in fw_mgmt_interface_fw_loaded_operation() [all …]
|
| /linux/drivers/greybus/ |
| H A D | control.c | 21 struct gb_control_version_request request; in gb_control_get_version() local 25 request.major = GB_CONTROL_VERSION_MAJOR; in gb_control_get_version() 26 request.minor = GB_CONTROL_VERSION_MINOR; in gb_control_get_version() 30 &request, sizeof(request), &response, in gb_control_get_version() 39 if (response.major > request.major) { in gb_control_get_version() 42 response.major, request.major); in gb_control_get_version() 59 struct gb_control_bundle_version_request request; in gb_control_get_bundle_version() local 63 request.bundle_id = bundle->id; in gb_control_get_bundle_version() 67 &request, sizeof(request), in gb_control_get_bundle_version() 133 struct gb_control_connected_request request; in gb_control_connected_operation() local [all …]
|
| /linux/drivers/usb/serial/ |
| H A D | upd78f0730.c | 209 struct upd78f0730_set_dtr_rts request; in upd78f0730_tiocmset() local 232 request.opcode = UPD78F0730_CMD_SET_DTR_RTS; in upd78f0730_tiocmset() 233 request.params = private->line_signals; in upd78f0730_tiocmset() 235 res = upd78f0730_send_ctl(port, &request, sizeof(request)); in upd78f0730_tiocmset() 245 struct upd78f0730_set_dtr_rts request; in upd78f0730_break_ctl() local 259 request.opcode = UPD78F0730_CMD_SET_DTR_RTS; in upd78f0730_break_ctl() 260 request.params = private->line_signals; in upd78f0730_break_ctl() 262 res = upd78f0730_send_ctl(port, &request, sizeof(request)); in upd78f0730_break_ctl() 306 struct upd78f0730_line_control request; in upd78f0730_set_termios() local 318 request.opcode = UPD78F0730_CMD_LINE_CONTROL; in upd78f0730_set_termios() [all …]
|
| /linux/drivers/s390/char/ |
| H A D | sclp_vt220.c | 96 static int __sclp_vt220_emit(struct sclp_vt220_request *request); 116 sclp_vt220_process_queue(struct sclp_vt220_request *request) in sclp_vt220_process_queue() argument 123 page = request->sclp_req.sccb; in sclp_vt220_process_queue() 126 list_del(&request->list); in sclp_vt220_process_queue() 129 request = NULL; in sclp_vt220_process_queue() 131 request = list_entry(sclp_vt220_outqueue.next, in sclp_vt220_process_queue() 133 if (!request) { in sclp_vt220_process_queue() 139 } while (__sclp_vt220_emit(request)); in sclp_vt220_process_queue() 140 if (request == NULL && sclp_vt220_flush_later) in sclp_vt220_process_queue() 152 sclp_vt220_callback(struct sclp_req *request, void *data) in sclp_vt220_callback() argument [all …]
|
| /linux/sound/pci/mixart/ |
| H A D | mixart_hwdep.c | 133 struct mixart_msg request; in mixart_enum_connectors() local 150 request.message_id = MSG_SYSTEM_ENUM_PLAY_CONNECTOR; in mixart_enum_connectors() 151 request.uid = (struct mixart_uid){0,0}; /* board num = 0 */ in mixart_enum_connectors() 152 request.data = NULL; in mixart_enum_connectors() 153 request.size = 0; in mixart_enum_connectors() 155 err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); in mixart_enum_connectors() 180 request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; in mixart_enum_connectors() 181 request.uid = connector->uid[k]; in mixart_enum_connectors() 182 request.data = audio_info_req; in mixart_enum_connectors() 183 request.size = sizeof(*audio_info_req); in mixart_enum_connectors() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | mock_request.c | 33 struct i915_request *request; in mock_request() local 36 request = intel_context_create_request(ce); in mock_request() 37 if (IS_ERR(request)) in mock_request() 38 return request; in mock_request() 40 request->mock.delay = delay; in mock_request() 41 return request; in mock_request() 44 bool mock_cancel_request(struct i915_request *request) in mock_cancel_request() argument 47 container_of(request->engine, typeof(*engine), base); in mock_cancel_request() 51 was_queued = !list_empty(&request->mock.link); in mock_cancel_request() 52 list_del_init(&request->mock.link); in mock_cancel_request() [all …]
|
| /linux/drivers/thunderbolt/ |
| H A D | icm.c | 61 struct icm_usb4_switch_op request; member 276 const struct icm_pkg_header *req_hdr = req->request; in icm_match() 299 static int icm_request(struct tb *tb, const void *request, size_t request_size, in icm_request() argument 315 req->request = request; in icm_request() 423 struct icm_fr_pkg_get_topology request = { in icm_fr_get_route() local 434 ret = icm_request(tb, &request, sizeof(request), switches, in icm_fr_get_route() 481 struct icm_pkg_driver_ready request = { in icm_fr_driver_ready() local 487 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), in icm_fr_driver_ready() 500 struct icm_fr_pkg_approve_device request; in icm_fr_approve_switch() local 504 memset(&request, 0, sizeof(request)); in icm_fr_approve_switch() [all …]
|
| /linux/security/apparmor/ |
| H A D | file.c | 52 if (ad->request & AA_AUDIT_FILE_MASK) { in file_audit_cb() 54 map_mask_to_chr_mask(ad->request)); in file_audit_cb() 62 if (ad->request & AA_AUDIT_FILE_MASK) { in file_audit_cb() 97 const char *op, u32 request, const char *name, in aa_audit_file() argument 105 ad.request = request; in aa_audit_file() 121 ad.request &= mask; in aa_audit_file() 123 if (likely(!ad.request)) in aa_audit_file() 128 ad.request = ad.request & ~perms->allow; in aa_audit_file() 129 AA_BUG(!ad.request); in aa_audit_file() 131 if (ad.request & perms->kill) in aa_audit_file() [all …]
|
| /linux/Documentation/userspace-api/media/mediactl/ |
| H A D | media-request-ioc-queue.rst | 13 MEDIA_REQUEST_IOC_QUEUE - Queue a request 31 If the media device supports :ref:`requests <media-request-api>`, then 32 this request ioctl can be used to queue a previously allocated request. 34 If the request was successfully queued, then the file descriptor can be 35 :ref:`polled <request-func-poll>` to wait for the request to complete. 37 If the request was already queued before, then ``EBUSY`` is returned. 38 Other errors can be returned if the contents of the request contained 40 common error codes. On error both the request and driver state are unchanged. 42 Once a request is queued, then the driver is required to gracefully handle 43 errors that occur when the request is applied to the hardware. The [all …]
|
| H A D | request-api.rst | 4 .. _media-request-api: 31 of request completion are also available for reading. 37 subsystem-specific APIs to support request usage. At the Media Controller 39 node. Their life cycle is then managed through the request file descriptors in 42 request support, such as V4L2 APIs that take an explicit ``request_fd`` 50 request. Typically, several such requests will be allocated. 55 Standard V4L2 ioctls can then receive a request file descriptor to express the 56 fact that the ioctl is part of said request, and is not to be applied 59 instead of being immediately applied, and buffers queued to a request do not 60 enter the regular buffer queue until the request itself is queued. [all …]
|
| /linux/drivers/firewire/ |
| H A D | core-transaction.c | 717 void fw_request_get(struct fw_request *request) in fw_request_get() argument 719 kref_get(&request->kref); in fw_request_get() 724 struct fw_request *request = container_of(kref, struct fw_request, kref); in release_request() local 726 kfree(request); in release_request() 729 void fw_request_put(struct fw_request *request) in fw_request_put() argument 731 kref_put(&request->kref, release_request); in fw_request_put() 737 struct fw_request *request = container_of(packet, struct fw_request, response); in free_response_callback() local 739 trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation, in free_response_callback() 743 fw_request_put(request); in free_response_callback() 746 fw_request_put(request); in free_response_callback() [all …]
|
| /linux/drivers/pmdomain/tegra/ |
| H A D | powergate-bpmp.c | 34 struct mrq_pg_request request; in tegra_bpmp_powergate_set_state() local 38 memset(&request, 0, sizeof(request)); in tegra_bpmp_powergate_set_state() 39 request.cmd = CMD_PG_SET_STATE; in tegra_bpmp_powergate_set_state() 40 request.id = id; in tegra_bpmp_powergate_set_state() 41 request.set_state.state = state; in tegra_bpmp_powergate_set_state() 45 msg.tx.data = &request; in tegra_bpmp_powergate_set_state() 46 msg.tx.size = sizeof(request); in tegra_bpmp_powergate_set_state() 61 struct mrq_pg_request request; in tegra_bpmp_powergate_get_state() local 65 memset(&request, 0, sizeof(request)); in tegra_bpmp_powergate_get_state() 66 request.cmd = CMD_PG_GET_STATE; in tegra_bpmp_powergate_get_state() [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | mock_engine.c | 100 static void advance(struct i915_request *request) in advance() argument 102 list_del_init(&request->mock.link); in advance() 103 i915_request_mark_complete(request); in advance() 104 GEM_BUG_ON(!i915_request_completed(request)); in advance() 106 intel_engine_signal_breadcrumbs(request->engine); in advance() 112 struct i915_request *request; in hw_delay_complete() local 118 request = first_request(engine); in hw_delay_complete() 119 if (request) in hw_delay_complete() 120 advance(request); in hw_delay_complete() 126 while ((request = first_request(engine))) { in hw_delay_complete() [all …]
|
| /linux/include/trace/events/ |
| H A D | nbd.h | 12 TP_PROTO(struct request *req, u64 handle), 17 __field(struct request *, req) 27 "nbd transport event: request %p, handle 0x%016llx", 35 TP_PROTO(struct request *req, u64 handle), 42 TP_PROTO(struct request *req, u64 handle), 49 TP_PROTO(struct request *req, u64 handle), 56 TP_PROTO(struct request *req, u64 handle), 64 struct request *rq), 71 __field(struct request *, request) [all...] |
| /linux/security/landlock/ |
| H A D | audit.c | 355 static bool is_valid_request(const struct landlock_request *const request) in is_valid_request() argument 357 if (WARN_ON_ONCE(request->layer_plus_one > LANDLOCK_MAX_NUM_LAYERS)) in is_valid_request() 360 if (WARN_ON_ONCE(!(!!request->layer_plus_one ^ !!request->access))) in is_valid_request() 363 if (request->access) { in is_valid_request() 364 if (WARN_ON_ONCE(!(!!request->layer_masks ^ in is_valid_request() 365 !!request->all_existing_optional_access))) in is_valid_request() 368 if (WARN_ON_ONCE(request->layer_masks || in is_valid_request() 369 request->all_existing_optional_access)) in is_valid_request() 373 if (WARN_ON_ONCE(!!request->layer_masks ^ !!request->layer_masks_size)) in is_valid_request() 376 if (request->deny_masks) { in is_valid_request() [all …]
|