Lines Matching full:req
269 struct scmi_req *req; in scmi_reqs_pool_allocate() local
275 req = malloc(sizeof(*req) + max_payld_sz, in scmi_reqs_pool_allocate()
278 req->dev = dev; in scmi_reqs_pool_allocate()
279 req->tsk.ta_context = &req->tsk; in scmi_reqs_pool_allocate()
280 req->tsk.ta_func = scmi_req_async_waiter; in scmi_reqs_pool_allocate()
282 mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN); in scmi_reqs_pool_allocate()
283 LIST_INSERT_HEAD(&rp->head, req, next); in scmi_reqs_pool_allocate()
294 struct scmi_req *req, *tmp; in scmi_reqs_pool_free() local
296 LIST_FOREACH_SAFE(req, &rp->head, next, tmp) { in scmi_reqs_pool_free()
297 mtx_destroy(&req->mtx); in scmi_reqs_pool_free()
298 free(req, M_DEVBUF); in scmi_reqs_pool_free()
379 struct scmi_req *req; in scmi_req_initialized_alloc() local
390 req = scmi_req_alloc(sc, SCMI_CHAN_A2P); in scmi_req_initialized_alloc()
391 if (req == NULL) in scmi_req_initialized_alloc()
394 req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz; in scmi_req_initialized_alloc()
395 req->msg.rx_len = rx_payld_sz ? in scmi_req_initialized_alloc()
398 return (req); in scmi_req_initialized_alloc()
405 struct scmi_req *req = NULL; in scmi_req_alloc() local
410 req = LIST_FIRST(&rp->head); in scmi_req_alloc()
415 if (req != NULL) { in scmi_req_alloc()
416 refcount_init(&req->cnt, 1); in scmi_req_alloc()
418 req, refcount_load(&req->cnt), -1); in scmi_req_alloc()
421 return (req); in scmi_req_alloc()
426 struct scmi_req *req) in scmi_req_free_unlocked() argument
432 req->timed_out = false; in scmi_req_free_unlocked()
433 req->done = false; in scmi_req_free_unlocked()
434 req->is_raw = false; in scmi_req_free_unlocked()
435 refcount_init(&req->cnt, 0); in scmi_req_free_unlocked()
436 LIST_INSERT_HEAD(&rp->head, req, next); in scmi_req_free_unlocked()
440 req, refcount_load(&req->cnt), -1); in scmi_req_free_unlocked()
444 scmi_req_get(struct scmi_softc *sc, struct scmi_req *req) in scmi_req_get() argument
448 mtx_lock_spin(&req->mtx); in scmi_req_get()
449 ok = refcount_acquire_if_not_zero(&req->cnt); in scmi_req_get()
450 mtx_unlock_spin(&req->mtx); in scmi_req_get()
456 req, refcount_load(&req->cnt), SCMI_MSG_TOKEN(req->msg.hdr)); in scmi_req_get()
462 scmi_req_put(struct scmi_softc *sc, struct scmi_req *req) in scmi_req_put() argument
464 mtx_lock_spin(&req->mtx); in scmi_req_put()
465 if (!refcount_release_if_not_last(&req->cnt)) { in scmi_req_put()
466 req->protocol_id = 0; in scmi_req_put()
467 req->message_id = 0; in scmi_req_put()
468 req->token = 0; in scmi_req_put()
469 req->header = 0; in scmi_req_put()
470 bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE(sc)); in scmi_req_put()
471 scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req); in scmi_req_put()
474 req, refcount_load(&req->cnt), SCMI_MSG_TOKEN(req->msg.hdr)); in scmi_req_put()
476 mtx_unlock_spin(&req->mtx); in scmi_req_put()
537 scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req) in scmi_finalize_req() argument
539 if (!req->is_raw) in scmi_finalize_req()
540 req->token = scmi_token_pick(sc); in scmi_finalize_req()
542 req->token = scmi_token_reserve(sc, SCMI_MSG_TOKEN(req->msg.hdr)); in scmi_finalize_req()
544 if (req->token < 0) in scmi_finalize_req()
547 if (!req->is_raw) { in scmi_finalize_req()
548 req->msg.hdr = req->message_id; in scmi_finalize_req()
549 req->msg.hdr |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S; in scmi_finalize_req()
550 req->msg.hdr |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S; in scmi_finalize_req()
551 req->msg.hdr |= req->token << SCMI_HDR_TOKEN_S; in scmi_finalize_req()
555 req->header = req->msg.hdr; in scmi_finalize_req()
561 scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req) in scmi_req_track_inflight() argument
566 error = scmi_finalize_req(sc, req); in scmi_req_track_inflight()
571 scmi_req_get(sc, req); in scmi_req_track_inflight()
574 LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next); in scmi_req_track_inflight()
581 scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req) in scmi_req_drop_inflight() argument
586 LIST_REMOVE(req, next); in scmi_req_drop_inflight()
587 scmi_token_release_unlocked(sc, req->token); in scmi_req_drop_inflight()
589 /* ...and drop refcount..potentially releasing *req */ in scmi_req_drop_inflight()
590 scmi_req_put(sc, req); in scmi_req_drop_inflight()
598 struct scmi_req *req = NULL; in scmi_req_lookup_inflight() local
603 LIST_FOREACH(req, REQHASH(sc, token), next) { in scmi_req_lookup_inflight()
604 if (req->token == token) in scmi_req_lookup_inflight()
609 return (req); in scmi_req_lookup_inflight()
616 struct scmi_req *req; in scmi_process_response() local
618 req = scmi_req_lookup_inflight(sc, hdr); in scmi_process_response()
619 if (req == NULL) { in scmi_process_response()
626 SDT_PROBE5(scmi, func, scmi_process_response, xfer_track, req, in scmi_process_response()
627 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr), in scmi_process_response()
628 SCMI_MSG_TOKEN(req->msg.hdr), req->timed_out); in scmi_process_response()
630 mtx_lock_spin(&req->mtx); in scmi_process_response()
631 req->done = true; in scmi_process_response()
632 req->msg.rx_len = rx_len; in scmi_process_response()
633 if (!req->timed_out) { in scmi_process_response()
639 if (!req->msg.polling) in scmi_process_response()
640 wakeup(req); in scmi_process_response()
642 atomic_store_rel_int(&req->msg.poll_done, 1); in scmi_process_response()
646 mtx_unlock_spin(&req->mtx); in scmi_process_response()
651 req->token); in scmi_process_response()
657 scmi_req_drop_inflight(sc, req); in scmi_process_response()
679 scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out) in scmi_wait_for_response() argument
684 SDT_PROBE5(scmi, entry, scmi_wait_for_response, xfer_track, req, in scmi_wait_for_response()
685 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr), in scmi_wait_for_response()
686 SCMI_MSG_TOKEN(req->msg.hdr), reply_timo_ms); in scmi_wait_for_response()
688 if (req->msg.polling) { in scmi_wait_for_response()
691 ret = SCMI_POLL_MSG(sc->dev, &req->msg, reply_timo_ms); in scmi_wait_for_response()
693 * Drop reference to successfully polled req unless it had in scmi_wait_for_response()
698 mtx_lock_spin(&req->mtx); in scmi_wait_for_response()
699 needs_drop = (ret == 0) && !req->done; in scmi_wait_for_response()
700 req->timed_out = ret != 0; in scmi_wait_for_response()
701 mtx_unlock_spin(&req->mtx); in scmi_wait_for_response()
703 scmi_req_drop_inflight(sc, req); in scmi_wait_for_response()
704 if (ret == 0 && req->msg.hdr != req->header) { in scmi_wait_for_response()
707 le32toh(req->msg.hdr), le32toh(req->header)); in scmi_wait_for_response()
710 ret = tsleep(req, 0, "scmi_wait4", (reply_timo_ms * hz) / 1000); in scmi_wait_for_response()
712 mtx_lock_spin(&req->mtx); in scmi_wait_for_response()
713 if (ret != 0 && req->done) in scmi_wait_for_response()
715 req->timed_out = ret != 0; in scmi_wait_for_response()
716 mtx_unlock_spin(&req->mtx); in scmi_wait_for_response()
720 SCMI_COLLECT_REPLY(sc->dev, &req->msg); in scmi_wait_for_response()
721 if (req->msg.payld[0] != 0) in scmi_wait_for_response()
722 ret = req->msg.payld[0]; in scmi_wait_for_response()
724 *out = &req->msg.payld[SCMI_MSG_HDR_SIZE]; in scmi_wait_for_response()
727 "Request for token 0x%X timed-out.\n", req->token); in scmi_wait_for_response()
732 SDT_PROBE5(scmi, exit, scmi_wait_for_response, xfer_track, req, in scmi_wait_for_response()
733 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr), in scmi_wait_for_response()
734 SCMI_MSG_TOKEN(req->msg.hdr), req->timed_out); in scmi_wait_for_response()
743 struct scmi_req *req; in scmi_buf_get() local
745 /* Pick a pre-built req */ in scmi_buf_get()
746 req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz); in scmi_buf_get()
747 if (req == NULL) in scmi_buf_get()
750 req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF; in scmi_buf_get()
751 req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF; in scmi_buf_get()
753 return (&req->msg.payld[0]); in scmi_buf_get()
760 struct scmi_req *req; in scmi_buf_put() local
764 req = buf_to_req(buf); in scmi_buf_put()
765 scmi_req_put(sc, req); in scmi_buf_put()
771 struct scmi_req *req; in scmi_msg_get() local
773 /* Pick a pre-built req */ in scmi_msg_get()
774 req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz); in scmi_msg_get()
775 if (req == NULL) in scmi_msg_get()
778 req->is_raw = true; in scmi_msg_get()
780 return (&req->msg); in scmi_msg_get()
788 struct scmi_req *req; in scmi_req_async_waiter() local
790 req = tsk_to_req(ta); in scmi_req_async_waiter()
791 sc = device_get_softc(req->dev); in scmi_req_async_waiter()
792 scmi_wait_for_response(sc, req, NULL); in scmi_req_async_waiter()
794 scmi_msg_put(req->dev, &req->msg); in scmi_req_async_waiter()
801 struct scmi_req *req; in scmi_msg_put() local
805 req = msg_to_req(msg); in scmi_msg_put()
807 scmi_req_put(sc, req); in scmi_msg_put()
814 struct scmi_req *req; in scmi_request_tx() local
819 req = buf_to_req(in); in scmi_request_tx()
821 req->msg.polling = in scmi_request_tx()
822 (cold || sc->trs_desc.no_completion_irq || req->use_polling); in scmi_request_tx()
825 error = scmi_req_track_inflight(sc, req); in scmi_request_tx()
827 device_printf(dev, "Failed to build req with HDR |%0X|\n", in scmi_request_tx()
828 req->msg.hdr); in scmi_request_tx()
832 error = SCMI_XFER_MSG(sc->dev, &req->msg); in scmi_request_tx()
834 scmi_req_drop_inflight(sc, req); in scmi_request_tx()
838 SDT_PROBE5(scmi, func, scmi_request_tx, xfer_track, req, in scmi_request_tx()
839 SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr), in scmi_request_tx()
840 SCMI_MSG_TOKEN(req->msg.hdr), req->msg.polling); in scmi_request_tx()
849 struct scmi_req *req; in scmi_request() local
857 req = buf_to_req(in); in scmi_request()
859 return (scmi_wait_for_response(sc, req, out)); in scmi_request()
865 struct scmi_req *req; in scmi_msg_async_enqueue() local
867 req = msg_to_req(msg); in scmi_msg_async_enqueue()
869 return taskqueue_enqueue_flags(taskqueue_thread, &req->tsk, in scmi_msg_async_enqueue()