Lines Matching +full:vl +full:- +full:supply
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright(c) 2015 - 2018 Intel Corporation.
22 #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
61 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
67 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
74 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
80 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
86 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
92 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
98 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
160 * - The SDMA API for building and submitting packets
163 * - Initialization and tear down routines to buildup
166 * - ISR entrances to handle interrupts, state changes
174 * and verbs to supply packets to the SDMA ring.
183 * for their version of the txreq. slabs, pre-allocated lists,
205 * tx. An example of a use case would be a pre-allocated
249 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
259 * struct sdma_engine - Data pertaining to each SDMA engine.
260 * @dd: a back-pointer to the device data
261 * @ppd: per port back-pointer
304 u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
383 * sdma_empty() - idle engine test
389 * 1 - empty, 0 - non-empty
393 return sde->descq_tail == sde->descq_head; in sdma_empty()
398 return sde->descq_cnt - in sdma_descq_freecnt()
399 (sde->descq_tail - in sdma_descq_freecnt()
400 READ_ONCE(sde->descq_head)) - 1; in sdma_descq_freecnt()
405 return sde->descq_cnt - sdma_descq_freecnt(sde); in sdma_descq_inprocess()
414 return engine->state.current_state == sdma_state_s99_running; in __sdma_running()
418 * sdma_running() - state suitability test
425 * 1 - ok to submit, 0 - not ok to submit
433 spin_lock_irqsave(&engine->tail_lock, flags); in sdma_running()
435 spin_unlock_irqrestore(&engine->tail_lock, flags); in sdma_running()
447 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
451 * @ahg_entry: ahg entry to use (0 - 31)
452 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
488 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
491 * ahg_hlen is used to determine the number of on-chip entry bytes to
509 return -ENODATA; in sdma_txinit_ahg()
511 return -EMSGSIZE; in sdma_txinit_ahg()
512 tx->desc_limit = ARRAY_SIZE(tx->descs); in sdma_txinit_ahg()
513 tx->descp = &tx->descs[0]; in sdma_txinit_ahg()
514 INIT_LIST_HEAD(&tx->list); in sdma_txinit_ahg()
515 tx->num_desc = 0; in sdma_txinit_ahg()
516 tx->flags = flags; in sdma_txinit_ahg()
517 tx->complete = cb; in sdma_txinit_ahg()
518 tx->coalesce_buf = NULL; in sdma_txinit_ahg()
519 tx->wait = NULL; in sdma_txinit_ahg()
520 tx->packet_len = tlen; in sdma_txinit_ahg()
521 tx->tlen = tx->packet_len; in sdma_txinit_ahg()
522 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; in sdma_txinit_ahg()
523 tx->descs[0].qw[1] = 0; in sdma_txinit_ahg()
525 tx->descs[0].qw[1] |= in sdma_txinit_ahg()
536 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
561 * The callback, if non-NULL, will be provided this tx and a status. The
575 /* helpers - don't use */
578 return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK) in sdma_mapping_type()
584 return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK) in sdma_mapping_len()
590 return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK) in sdma_mapping_addr()
603 struct sdma_desc *desc = &tx->descp[tx->num_desc]; in make_tx_sdma_desc()
605 if (!tx->num_desc) { in make_tx_sdma_desc()
607 desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK) in make_tx_sdma_desc()
610 desc->qw[0] = 0; in make_tx_sdma_desc()
611 desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK) in make_tx_sdma_desc()
614 desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK) in make_tx_sdma_desc()
619 desc->pinning_ctx = pinning_ctx; in make_tx_sdma_desc()
620 desc->ctx_put = ctx_put; in make_tx_sdma_desc()
634 if (tx->num_desc) in sdma_txclean()
642 u16 last_desc = tx->num_desc - 1; in _sdma_close_tx()
644 tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG; in _sdma_close_tx()
645 tx->descp[last_desc].qw[1] |= dd->default_desc1; in _sdma_close_tx()
646 if (tx->flags & SDMA_TXREQ_F_URGENT) in _sdma_close_tx()
647 tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG | in _sdma_close_tx()
668 WARN_ON(len > tx->tlen); in _sdma_txadd_daddr()
669 tx->num_desc++; in _sdma_txadd_daddr()
670 tx->tlen -= len; in _sdma_txadd_daddr()
672 if (!tx->tlen) { in _sdma_txadd_daddr()
673 if (tx->packet_len & (sizeof(u32) - 1)) { in _sdma_txadd_daddr()
685 * sdma_txadd_page() - add a page to the sdma_txreq
692 * added if coalesce buffer is used. E.g. pointer to pinned-page
705 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
721 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_page()
729 &dd->pcidev->dev, in sdma_txadd_page()
735 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { in sdma_txadd_page()
737 return -ENOSPC; in sdma_txadd_page()
745 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
757 * 0 - success, -ENOMEM - couldn't extend descriptor array
768 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_daddr()
780 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
792 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
804 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_kvaddr()
812 &dd->pcidev->dev, in sdma_txadd_kvaddr()
817 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { in sdma_txadd_kvaddr()
819 return -ENOSPC; in sdma_txadd_kvaddr()
841 * sdma_build_ahg - build ahg descriptor
867 * sdma_progress - use seq number of detect head progress
877 * re-submission is detected by checking whether the descriptor
883 if (read_seqretry(&sde->head_lock, seq)) { in sdma_progress()
884 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_progress()
885 if (tx->num_desc > sde->desc_avail) in sdma_progress()
900 * Since the mapping now allows for non-uniform engines per vl, the
901 * number of engines for a vl is either the vl_engines[vl] or
905 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
910 * evenly, the extras are added from the last vl downward.
914 * for a particular vl.
916 * dd->sdma_map
918 * | +--------------------+
920 * sdma_vl_map |--------------------|
921 * +--------------------------+ | sde[0] -> eng 1 |
922 * | list (RCU) | |--------------------|
923 * |--------------------------| ->| sde[1] -> eng 2 |
924 * | mask | --/ |--------------------|
925 * |--------------------------| -/ | * |
926 * | actual_vls (max 8) | -/ |--------------------|
927 * |--------------------------| --/ | sde[n-1] -> eng n |
928 * | vls (max 8) | -/ +--------------------+
929 * |--------------------------| --/
930 * | map[0] |-/
931 * |--------------------------| +---------------------+
932 * | map[1] |--- | mask |
933 * |--------------------------| \---- |---------------------|
934 * | * | \-- | sde[0] -> eng 1+n |
935 * | * | \---- |---------------------|
936 * | * | \->| sde[1] -> eng 2+n |
937 * |--------------------------| |---------------------|
938 * | map[vls - 1] |- | * |
939 * +--------------------------+ \- |---------------------|
940 * \- | sde[m-1] -> eng m+n |
941 * \ +---------------------+
942 * \-
944 * \- +----------------------+
945 * \- | mask |
946 * \ |----------------------|
947 * \- | sde[0] -> eng 1+m+n |
948 * \- |----------------------|
949 * >| sde[1] -> eng 2+m+n |
950 * |----------------------|
952 * |----------------------|
953 * | sde[o-1] -> eng o+m+n|
954 * +----------------------+
959 * struct sdma_map_elem - mapping for a vl
960 * @mask - selector mask
961 * @sde - array of engines for this vl
973 * struct sdma_map_el - mapping for a vl
974 * @engine_to_vl - map of an engine to a vl
975 * @list - rcu head for free callback
976 * @mask - vl mask to "mod" the vl to produce an index to map array
977 * @actual_vls - number of vls
978 * @vls - number of vls rounded to next power of 2
979 * @map - array of sdma_map_elem entries
983 * in turn point to an array of sde's for that vl.
1004 * sdma_engine_progress_schedule() - schedule progress on engine
1013 if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8)) in sdma_engine_progress_schedule()
1026 u8 vl);
1029 u32 selector, u8 vl);