| /linux/kernel/bpf/ |
| H A D | queue_stack_maps.c | 31 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) in queue_stack_map_is_empty() argument 33 return qs->head == qs->tail; in queue_stack_map_is_empty() 36 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) in queue_stack_map_is_full() argument 38 u32 head = qs->head + 1; in queue_stack_map_is_full() 40 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 43 return head == qs->tail; in queue_stack_map_is_full() 68 struct bpf_queue_stack *qs; in queue_stack_map_alloc() local 72 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 74 qs = bpf_map_area_alloc(queue_size, numa_node); in queue_stack_map_alloc() 75 if (!qs) in queue_stack_map_alloc() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb3/ |
| H A D | sge.c | 721 * @qs: the queue set 726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) in init_qset_cntxt() argument 728 qs->rspq.cntxt_id = id; in init_qset_cntxt() 729 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt() 730 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt() 731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt() 732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt() 733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt() 734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt() 735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt() [all …]
|
| H A D | cxgb3_main.c | 409 adap->sge.qs[qidx]. in request_msix_data_irqs() 412 &adap->sge.qs[qidx]); in request_msix_data_irqs() 416 &adap->sge.qs[qidx]); in request_msix_data_irqs() 436 &adapter->sge.qs[i]); in free_irq_resources() 446 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies() 459 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity() 595 struct sge_qset *qs = &adap->sge.qs[i]; in ring_dbs() local 597 if (qs->adap) in ring_dbs() 599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs() 608 struct sge_qset *qs = &adap->sge.qs[i]; in init_napi() local [all …]
|
| H A D | adapter.h | 68 struct sge_qset *qs; member 216 struct sge_qset qs[SGE_QSETS]; member 325 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_virtchnl.c | 737 qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL); in idpf_alloc_queue_set() 806 * @qs: set of the Tx queues 810 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs) in idpf_wait_for_marker_event_set() argument 815 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event_set() 816 switch (qs->qs[i].type) { in idpf_wait_for_marker_event_set() 818 txq = qs->qs[i].txq; in idpf_wait_for_marker_event_set() 830 netdev_warn(qs->vport->netdev, in idpf_wait_for_marker_event_set() 846 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_wait_for_marker_event() 848 qs = idpf_alloc_queue_set(vport, vport->num_txq); in idpf_wait_for_marker_event() 849 if (!qs) in idpf_wait_for_marker_event() [all …]
|
| H A D | idpf_txrx.c | 957 static int idpf_init_queue_set(const struct idpf_queue_set *qs) in idpf_init_queue_set() argument 959 const struct idpf_vport *vport = qs->vport; in idpf_init_queue_set() 965 for (u32 i = 0; i < qs->num; i++) { in idpf_init_queue_set() 966 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_init_queue_set() 1031 static void idpf_clean_queue_set(const struct idpf_queue_set *qs) in idpf_clean_queue_set() argument 1033 const struct idpf_vport *vport = qs->vport; in idpf_clean_queue_set() 1036 for (u32 i = 0; i < qs->num; i++) { in idpf_clean_queue_set() 1037 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_clean_queue_set() 1114 struct idpf_queue_set *qs; in idpf_vector_to_queue_set() local 1122 qs = idpf_alloc_queue_set(vport, num); in idpf_vector_to_queue_set() [all …]
|
| /linux/drivers/net/ethernet/cavium/thunder/ |
| H A D | nicvf_ethtool.c | 217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 282 (nic->qs->rq_cnt + nic->qs->sq_cnt); in nicvf_get_sset_count() 290 (snic->qs->rq_cnt + snic->qs->sq_cnt); in nicvf_get_sset_count() 306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() 316 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) in nicvf_get_qset_stats() 475 struct queue_set *qs = nic->qs; in nicvf_get_ringparam() local 478 ring->rx_pending = qs->cq_len; in nicvf_get_ringparam() [all …]
|
| H A D | nicvf_main.c | 319 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; in nicvf_config_cpi() 441 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; in nicvf_request_sqs() 444 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; in nicvf_request_sqs() 449 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; in nicvf_request_sqs() 452 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; in nicvf_request_sqs() 456 nic->snicvf[sqs]->qs->cq_cnt = in nicvf_request_sqs() 457 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); in nicvf_request_sqs() 667 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler() 852 struct queue_set *qs = nic->qs; in nicvf_cq_intr_handler() local 853 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler() [all …]
|
| H A D | nicvf_queues.h | 58 /* Default queue count per QS, its lengths and threshold values */ 244 u8 cq_qs; /* CQ's QS to which this RQ is assigned */ 245 u8 cq_idx; /* CQ index (0 to 7) in the QS */ 246 u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ 247 u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ 248 u8 start_rbdr_qs; /* First buffer ptrs - QS num */ 249 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ 266 u8 cq_qs; /* CQ's QS to which this SQ is pointing */ 267 u8 cq_idx; /* CQ index (0 to 7) in the above QS */ 335 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
|
| /linux/drivers/soc/qcom/ |
| H A D | socinfo.c | 788 struct qcom_socinfo *qs; in qcom_socinfo_probe() local 799 qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL); in qcom_socinfo_probe() 800 if (!qs) in qcom_socinfo_probe() 803 qs->attr.family = "Snapdragon"; in qcom_socinfo_probe() 804 qs->attr.machine = socinfo_machine(&pdev->dev, in qcom_socinfo_probe() 806 qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", in qcom_socinfo_probe() 808 qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", in qcom_socinfo_probe() 811 if (!qs->attr.soc_id || !qs->attr.revision) in qcom_socinfo_probe() 815 qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, in qcom_socinfo_probe() 818 if (!qs->attr.serial_number) in qcom_socinfo_probe() [all …]
|
| /linux/fs/qnx4/ |
| H A D | inode.c | 47 struct qnx4_sb_info *qs; in qnx4_reconfigure() local 50 qs = qnx4_sb(sb); in qnx4_reconfigure() 51 qs->Version = QNX4_VERSION; in qnx4_reconfigure() 197 struct qnx4_sb_info *qs; in qnx4_fill_super() local 200 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); in qnx4_fill_super() 201 if (!qs) in qnx4_fill_super() 203 s->s_fs_info = qs; in qnx4_fill_super() 259 struct qnx4_sb_info *qs = qnx4_sb(sb); in qnx4_kill_sb() local 261 if (qs) { in qnx4_kill_sb() 262 kfree(qs->BitMap); in qnx4_kill_sb() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
| H A D | cxgb4vf_main.c | 367 int qs, msi; in name_msix_vecs() local 369 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs() 371 "%s-%d", dev->name, qs); in name_msix_vecs() 634 int qs; in setup_sge_queues() local 636 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 644 netdev_get_tx_queue(dev, qs), in setup_sge_queues() 649 rxq->rspq.idx = qs; in setup_sge_queues() 665 int qs; in setup_sge_queues() local 667 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 707 int qs, err; in setup_rss() local [all …]
|
| H A D | t4vf_common.h | 246 unsigned int neq; /* N egress Qs */ 247 unsigned int nethctrl; /* N egress ETH or CTRL Qs */ 248 unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ 249 unsigned int niq; /* N ingress Qs */
|
| /linux/fs/qnx6/ |
| H A D | inode.c | 299 struct qnx6_sb_info *qs; in qnx6_fill_super() local 305 qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); in qnx6_fill_super() 306 if (!qs) in qnx6_fill_super() 308 s->s_fs_info = qs; in qnx6_fill_super() 309 qs->s_mount_opt = ctx->s_mount_opts; in qnx6_fill_super() 317 if (qs->s_mount_opt == QNX6_MOUNT_MMI_FS) { in qnx6_fill_super() 469 kfree(qs); in qnx6_fill_super() 476 struct qnx6_sb_info *qs = QNX6_SB(sb); in qnx6_put_super() local 477 brelse(qs->sb_buf); in qnx6_put_super() 478 iput(qs->longfile); in qnx6_put_super() [all …]
|
| /linux/arch/s390/include/asm/ |
| H A D | atomic_ops.h | 71 : [old] "=d" (old), [ptr] "+QS" (*ptr) \ 99 : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\ 150 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\ 184 : "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \ 208 : "=@cc" (cc), [ptr] "+QS" (*ptr) \
|
| H A D | access-regs.h | 22 : [regs] "=QS" (*regs) in save_access_regs() 34 : [regs] "QS" (*regs) in restore_access_regs()
|
| H A D | cmpxchg.h | 32 : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr) in __csg_asm() 141 [__ptr] "+QS" (*(ptr)), \ 245 : [old] "+d" (old), [ptr] "+QS" (*ptr) in arch_cmpxchg128() 262 : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc) in arch_try_cmpxchg128()
|
| /linux/Documentation/devicetree/bindings/net/ |
| H A D | mscc,vsc7514-switch.yaml | 60 - description: qs target 85 - const: qs 153 reg-names = "sys", "rew", "qs", "ptp", "port0", "port1", 200 reg-names = "sys", "rew", "qs", "ptp", "port0", "port1",
|
| /linux/drivers/net/ethernet/hisilicon/hns/ |
| H A D | hnae.c | 299 hnae_fini_queue(handle->qs[i]); in hnae_reinit_handle() 305 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); in hnae_reinit_handle() 312 hnae_fini_queue(handle->qs[j]); in hnae_reinit_handle() 351 ret = hnae_init_queue(handle, handle->qs[i], dev); in hnae_get_handle() 364 hnae_fini_queue(handle->qs[j]); in hnae_get_handle() 378 hnae_fini_queue(h->qs[i]); in hnae_put_handle()
|
| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_hw_queues.c | 199 struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT]; in efct_hw_new_cq_set() local 217 qs[i] = cq->queue; in efct_hw_new_cq_set() 222 if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) { in efct_hw_new_cq_set() 315 struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL }; in efct_hw_new_rq_set() local 342 qs[q_count] = rq->hdr; in efct_hw_new_rq_set() 349 qs[q_count + 1] = rq->data; in efct_hw_new_rq_set() 354 if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs, in efct_hw_new_rq_set()
|
| /linux/drivers/net/dsa/ocelot/ |
| H A D | ocelot_ext.c | 47 [QS] = "qs",
|
| /linux/net/sched/ |
| H A D | sch_fq_codel.c | 648 struct gnet_stats_queue qs = { 0 }; in fq_codel_dump_class_stats() local 675 qs.qlen++; in fq_codel_dump_class_stats() 680 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats() 681 qs.drops = 0; in fq_codel_dump_class_stats() 683 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) in fq_codel_dump_class_stats()
|
| /linux/include/uapi/sound/ |
| H A D | hdspm.h | 27 qs enumerator 41 __u8 speed; /* enum {ss, ds, qs} */
|
| /linux/tools/testing/selftests/drivers/net/ |
| H A D | stats.py | 98 for qs in stats: 99 if qs["ifindex"]== test.ifindex: 100 return qs
|
| /linux/Documentation/RCU/Design/Requirements/ |
| H A D | ReadersPartitionGP1.svg | 342 y="331.66351">QS</tspan></text> 366 y="523.77856">QS</tspan></text> 403 y="336.96619">QS</tspan></text> 499 y="427.29443">QS</tspan></text> 554 y="422.79153">QS</tspan></text> 578 y="609.59003">QS</tspan></text> 602 y="586.99133">QS</tspan></text>
|