/linux/drivers/s390/char/ |
H A D | tape_std.c | 38 struct tape_request * request = from_timer(request, t, timer); in tape_std_assign_timeout() local 39 struct tape_device * device = request->device; in tape_std_assign_timeout() 46 rc = tape_cancel_io(device, request); in tape_std_assign_timeout() 56 struct tape_request *request; in tape_std_assign() local 58 request = tape_alloc_request(2, 11); in tape_std_assign() 59 if (IS_ERR(request)) in tape_std_assign() 60 return PTR_ERR(request); in tape_std_assign() 62 request->op = TO_ASSIGN; in tape_std_assign() 63 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); in tape_std_assign() 64 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); in tape_std_assign() [all …]
|
H A D | tape_core.c | 292 __tape_cancel_io(struct tape_device *device, struct tape_request *request) in __tape_cancel_io() argument 298 if (request->callback == NULL) in __tape_cancel_io() 303 rc = ccw_device_clear(device->cdev, (long) request); in __tape_cancel_io() 307 request->status = TAPE_REQUEST_DONE; in __tape_cancel_io() 310 request->status = TAPE_REQUEST_CANCEL; in __tape_cancel_io() 434 * request. We may prevent this by returning an error. 592 struct tape_request * request; in __tape_discard_requests() local 596 request = list_entry(l, struct tape_request, list); in __tape_discard_requests() 597 if (request->status == TAPE_REQUEST_IN_IO) in __tape_discard_requests() 598 request->status = TAPE_REQUEST_DONE; in __tape_discard_requests() [all …]
|
H A D | tape_34xx.c | 56 static void __tape_34xx_medium_sense(struct tape_request *request) in __tape_34xx_medium_sense() argument 58 struct tape_device *device = request->device; in __tape_34xx_medium_sense() 61 if (request->rc == 0) { in __tape_34xx_medium_sense() 62 sense = request->cpdata; in __tape_34xx_medium_sense() 81 request->rc); in __tape_34xx_medium_sense() 82 tape_free_request(request); in __tape_34xx_medium_sense() 87 struct tape_request *request; in tape_34xx_medium_sense() local 90 request = tape_alloc_request(1, 32); in tape_34xx_medium_sense() 91 if (IS_ERR(request)) { in tape_34xx_medium_sense() 93 return PTR_ERR(request); in tape_34xx_medium_sense() [all …]
|
H A D | tape_3590.c | 204 struct tape_request *request; in tape_3592_kekl_query() local 213 request = tape_alloc_request(2, sizeof(*order)); in tape_3592_kekl_query() 214 if (IS_ERR(request)) { in tape_3592_kekl_query() 215 rc = PTR_ERR(request); in tape_3592_kekl_query() 218 order = request->cpdata; in tape_3592_kekl_query() 222 request->op = TO_KEKL_QUERY; in tape_3592_kekl_query() 223 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); in tape_3592_kekl_query() 224 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), in tape_3592_kekl_query() 226 rc = tape_do_io(device, request); in tape_3592_kekl_query() 233 tape_free_request(request); in tape_3592_kekl_query() [all …]
|
/linux/drivers/greybus/ |
H A D | svc.c | 176 struct gb_svc_pwrmon_sample_get_request request; in gb_svc_pwrmon_sample_get() local 180 request.rail_id = rail_id; in gb_svc_pwrmon_sample_get() 181 request.measurement_type = measurement_type; in gb_svc_pwrmon_sample_get() 184 &request, sizeof(request), in gb_svc_pwrmon_sample_get() 213 struct gb_svc_pwrmon_intf_sample_get_request request; in gb_svc_pwrmon_intf_sample_get() local 217 request.intf_id = intf_id; in gb_svc_pwrmon_intf_sample_get() 218 request.measurement_type = measurement_type; in gb_svc_pwrmon_intf_sample_get() 222 &request, sizeof(request), in gb_svc_pwrmon_intf_sample_get() 260 struct gb_svc_intf_device_id_request request; in gb_svc_intf_device_id() local 262 request.intf_id = intf_id; in gb_svc_intf_device_id() [all …]
|
/linux/include/linux/surface_aggregator/ |
H A D | controller.h | 54 * Specifies that the request expects a response. If not set, the request 56 * transmitted. If set, the request transport system waits for a response 57 * of the request. 60 * Specifies that the request should be transmitted via an unsequenced 61 * packet. If set, the request must not have a response, meaning that this 70 * struct ssam_request - SAM request description. 71 * @target_category: Category of the request's target. See &enum ssam_ssh_tc. 72 * @target_id: ID of the request's target. 73 * @command_id: Command ID of the request. 74 * @instance_id: Instance ID of the request's target. [all …]
|
/linux/include/media/ |
H A D | media-request.h | 3 * Media device request objects 23 * enum media_request_state - media request state 26 * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes 29 * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done 30 * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited 31 * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e. 32 * request objects are being added, 34 * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used 50 * struct media_request - Media device request 51 * @mdev: Media device this request belongs to [all …]
|
/linux/Documentation/userspace-api/media/mediactl/ |
H A D | request-api.rst | 4 .. _media-request-api: 6 Request API 9 The Request API has been designed to allow V4L2 to deal with requirements of 19 Supporting these features without the Request API is not always possible and if 26 The Request API allows a specific configuration of the pipeline (media 31 of request completion are also available for reading. 36 The Request API extends the Media Controller API and cooperates with 37 subsystem-specific APIs to support request usage. At the Media Controller 39 node. Their life cycle is then managed through the request file descriptors in 42 request support, such as V4L2 APIs that take an explicit ``request_fd`` [all …]
|
H A D | media-request-ioc-queue.rst | 13 MEDIA_REQUEST_IOC_QUEUE - Queue a request 31 If the media device supports :ref:`requests <media-request-api>`, then 32 this request ioctl can be used to queue a previously allocated request. 34 If the request was successfully queued, then the file descriptor can be 35 :ref:`polled <request-func-poll>` to wait for the request to complete. 37 If the request was already queued before, then ``EBUSY`` is returned. 38 Other errors can be returned if the contents of the request contained 40 common error codes. On error both the request and driver state are unchanged. 42 Once a request is queued, then the driver is required to gracefully handle 43 errors that occur when the request is applied to the hardware. The [all …]
|
/linux/sound/soc/intel/catpt/ |
H A D | messages.c | 17 struct catpt_ipc_msg request = {{0}}, reply; in catpt_ipc_get_fw_version() local 20 request.header = msg.val; in catpt_ipc_get_fw_version() 24 ret = catpt_dsp_send_msg(cdev, request, &reply); in catpt_ipc_get_fw_version() 58 struct catpt_ipc_msg request, reply; in catpt_ipc_alloc_stream() local 91 request.header = msg.val; in catpt_ipc_alloc_stream() 92 request.size = size; in catpt_ipc_alloc_stream() 93 request.data = payload; in catpt_ipc_alloc_stream() 97 ret = catpt_dsp_send_msg(cdev, request, &reply); in catpt_ipc_alloc_stream() 109 struct catpt_ipc_msg request; in catpt_ipc_free_stream() local 112 request.header = msg.val; in catpt_ipc_free_stream() [all …]
|
/linux/include/linux/ |
H A D | blk-mq.h | 24 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); 27 * request flags */ 34 /* request for flush sequence */ 42 /* use an I/O scheduler for this request */ 50 /* runtime pm request */ 59 /* request completion needs to be signaled to zone write plugging. */ 102 struct request { struct 124 struct request *rq_next; argument 129 /* Time that the first bio started allocating this request. */ argument 132 /* Time that this request was allocated for this IO. */ argument [all …]
|
/linux/fs/nfsd/ |
H A D | xdr4.h | 212 u32 ac_req_access; /* request */ 218 u32 cl_seqid; /* request */ 219 stateid_t cl_stateid; /* request+response */ 223 u64 co_offset; /* request */ 224 u32 co_count; /* request */ 229 u32 cr_namelen; /* request */ 230 char * cr_name; /* request */ 231 u32 cr_type; /* request */ 232 union { /* request */ 243 u32 cr_bmval[3]; /* request */ [all …]
|
/linux/block/ |
H A D | elevator.h | 33 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); 35 int (*request_merge)(struct request_queue *q, struct request **, struct bio *); 36 void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); 37 void (*requests_merged)(struct request_queue *, struct request *, struct request *); 39 void (*prepare_request)(struct request *); 40 void (*finish_request)(struct request *); 43 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); 45 void (*completed_request)(struct request *, u64); 46 void (*requeue_request)(struct request *); 47 struct request *(*former_request)(struct request_queue *, struct request *); [all …]
|
H A D | blk-crypto-internal.h | 31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 36 static inline bool bio_crypt_ctx_back_mergeable(struct request *req, in bio_crypt_ctx_back_mergeable() 43 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, in bio_crypt_ctx_front_mergeable() 50 static inline bool bio_crypt_ctx_merge_rq(struct request *req, in bio_crypt_ctx_merge_rq() 51 struct request *next) in bio_crypt_ctx_merge_rq() 57 static inline void blk_crypto_rq_set_defaults(struct request *rq) in blk_crypto_rq_set_defaults() 63 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) in blk_crypto_rq_is_encrypted() 68 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) in blk_crypto_rq_has_keyslot() 96 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, in bio_crypt_rq_ctx_compatible() 102 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, in bio_crypt_ctx_front_mergeable() [all …]
|
/linux/rust/kernel/block/mq/ |
H A D | request.rs | 3 //! This module provides a wrapper for the C `struct request` type. 19 /// A wrapper around a blk-mq `struct request`. This represents an IO request. 23 /// There are four states for a request that the Rust bindings care about: 25 /// A) Request is owned by block layer (refcount 0) 26 /// B) Request is owned by driver but with zero `ARef`s in existence 28 /// C) Request is owned by driver with exactly one `ARef` in existence 30 /// D) Request is owned by driver with more than one `ARef` in existence 34 /// We need to track A and B to ensure we fail tag to request conversions for 37 /// We need to track C and D to ensure that it is safe to end the request and hand 42 /// `struct request`. [all …]
|
/linux/tools/perf/pmu-events/arch/riscv/ |
H A D | riscv-sbi-firmware.json | 51 "PublicDescription": "Sent FENCE.I request to other HART event", 54 "BriefDescription": "Sent FENCE.I request to other HART event" 57 "PublicDescription": "Received FENCE.I request from other HART event", 60 "BriefDescription": "Received FENCE.I request from other HART event" 63 "PublicDescription": "Sent SFENCE.VMA request to other HART event", 66 "BriefDescription": "Sent SFENCE.VMA request to other HART event" 69 "PublicDescription": "Received SFENCE.VMA request from other HART event", 72 "BriefDescription": "Received SFENCE.VMA request from other HART event" 75 "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event", 78 "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event" [all …]
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_request.h | 73 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. 76 * by __i915_request_unsubmit() if we preempt this request. 78 * Finally cleared for consistency on retiring the request, when 79 * we know the HW is no longer running this request. 86 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution 88 * Using the scheduler, when a request is ready for execution it is put 98 * I915_FENCE_FLAG_HOLD - this request is currently on hold 100 * This request has been suspended, pending an ongoing investigation. 105 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial 112 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list [all …]
|
/linux/tools/perf/pmu-events/arch/x86/westmereep-dp/ |
H A D | memory.json | 11 "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_DRAM AND REMOTE_FWD", 21 "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_LLC_MISS", 31 "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = OTHER_LOCAL_DRAM", 41 "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = REMOTE_DRAM", 51 "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD", 61 "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_LLC_MISS", 71 "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = OTHER_LOCAL_DRAM", 81 "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = REMOTE_DRAM", 91 "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_DRAM AND REMOTE_FWD", 101 "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_LLC_MISS", [all …]
|
/linux/net/ethtool/ |
H A D | eeprom.c | 29 static int fallback_set_params(struct eeprom_req_info *request, in fallback_set_params() argument 33 u32 offset = request->offset; in fallback_set_params() 34 u32 length = request->length; in fallback_set_params() 36 if (request->page) in fallback_set_params() 37 offset = request->page * ETH_MODULE_EEPROM_PAGE_LEN + offset; in fallback_set_params() 40 request->i2c_address == 0x51) in fallback_set_params() 53 static int eeprom_fallback(struct eeprom_req_info *request, in eeprom_fallback() argument 67 err = fallback_set_params(request, &modinfo, &eeprom); in eeprom_fallback() 114 struct eeprom_req_info *request = MODULE_EEPROM_REQINFO(req_base); in eeprom_prepare_data() local 119 page_data.offset = request->offset; in eeprom_prepare_data() [all …]
|
/linux/drivers/staging/greybus/ |
H A D | gpio.c | 69 struct gb_gpio_activate_request request; in gb_gpio_activate_operation() local 77 request.which = which; in gb_gpio_activate_operation() 79 &request, sizeof(request), NULL, 0); in gb_gpio_activate_operation() 95 struct gb_gpio_deactivate_request request; in gb_gpio_deactivate_operation() local 98 request.which = which; in gb_gpio_deactivate_operation() 100 &request, sizeof(request), NULL, 0); in gb_gpio_deactivate_operation() 116 struct gb_gpio_get_direction_request request; in gb_gpio_get_direction_operation() local 121 request.which = which; in gb_gpio_get_direction_operation() 123 &request, sizeof(request), in gb_gpio_get_direction_operation() 140 struct gb_gpio_direction_in_request request; in gb_gpio_direction_in_operation() local [all …]
|
/linux/drivers/firewire/ |
H A D | core-transaction.c | 97 * If the request packet has already been sent, we need to see in fw_cancel_transaction() 279 * __fw_send_request() - submit a request packet for transmission to generate callback for response 281 * @card: interface to send the request at 282 * @t: transaction instance to which the request belongs 285 * @generation: bus generation in which request and response are valid 288 * @payload: data payload for the request subaction 295 * Submit a request packet into the asynchronous request transmission queue. 303 * @generation. Otherwise the request is in danger to be sent to a wrong node. 314 * In case of request types without payload, @data is NULL and @length is 0. 321 * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request [all …]
|
/linux/drivers/md/dm-vdo/indexer/ |
H A D | index.c | 43 * request wants to add a chapter to the sparse cache, it sends a barrier message to each zone 89 struct uds_request *request; in launch_zone_message() local 91 result = vdo_allocate(1, struct uds_request, __func__, &request); in launch_zone_message() 95 request->index = index; in launch_zone_message() 96 request->unbatched = true; in launch_zone_message() 97 request->zone_number = zone; in launch_zone_message() 98 request->zone_message = message; in launch_zone_message() 100 uds_enqueue_request(request, STAGE_MESSAGE); in launch_zone_message() 120 * Determine whether this request should trigger a sparse cache barrier message to change the 124 static u64 triage_index_request(struct uds_index *index, struct uds_request *request) in triage_index_request() argument [all …]
|
/linux/tools/testing/kunit/ |
H A D | kunit.py | 78 request: KunitConfigRequest) -> KunitResult: 82 success = linux.build_reconfig(request.build_dir, request.make_options) 88 request: KunitBuildRequest) -> KunitResult: 92 success = linux.build_kernel(request.jobs, 93 request.build_dir, 94 request.make_options) 100 request: KunitBuildRequest) -> KunitResult: 101 config_result = config_tests(linux, request) 105 return build_tests(linux, request) 107 def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]: [all …]
|
/linux/drivers/media/usb/as102/ |
H A D | as10x_cmd.h | 26 /* context request types */ 83 /* request */ 85 /* request identifier */ 98 /* request */ 100 /* request identifier */ 113 /* request */ 115 /* request identifier */ 130 /* request */ 132 /* request identifier */ 147 /* request */ [all …]
|
/linux/sound/soc/intel/avs/ |
H A D | messages.c | 18 struct avs_ipc_msg request = {{0}}; in avs_ipc_set_boot_config() local 23 request.header = msg.val; in avs_ipc_set_boot_config() 25 return avs_dsp_send_rom_msg(adev, &request, "set boot config"); in avs_ipc_set_boot_config() 31 struct avs_ipc_msg request; in avs_ipc_load_modules() local 34 request.header = msg.val; in avs_ipc_load_modules() 35 request.data = mod_ids; in avs_ipc_load_modules() 36 request.size = sizeof(*mod_ids) * num_mod_ids; in avs_ipc_load_modules() 38 return avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS, in avs_ipc_load_modules() 45 struct avs_ipc_msg request; in avs_ipc_unload_modules() local 48 request.header = msg.val; in avs_ipc_unload_modules() [all …]
|