Lines Matching refs:scrq
3911 struct ibmvnic_sub_crq_queue *scrq) in reset_one_sub_crq_queue() argument
3915 if (!scrq) { in reset_one_sub_crq_queue()
3920 if (scrq->irq) { in reset_one_sub_crq_queue()
3921 free_irq(scrq->irq, scrq); in reset_one_sub_crq_queue()
3922 irq_dispose_mapping(scrq->irq); in reset_one_sub_crq_queue()
3923 scrq->irq = 0; in reset_one_sub_crq_queue()
3926 if (scrq->msgs) { in reset_one_sub_crq_queue()
3927 memset(scrq->msgs, 0, 4 * PAGE_SIZE); in reset_one_sub_crq_queue()
3928 atomic_set(&scrq->used, 0); in reset_one_sub_crq_queue()
3929 scrq->cur = 0; in reset_one_sub_crq_queue()
3930 scrq->ind_buf.index = 0; in reset_one_sub_crq_queue()
3936 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in reset_one_sub_crq_queue()
3937 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in reset_one_sub_crq_queue()
3968 struct ibmvnic_sub_crq_queue *scrq, in release_sub_crq_queue() argument
3981 scrq->crq_num); in release_sub_crq_queue()
3987 scrq->crq_num, rc); in release_sub_crq_queue()
3993 scrq->ind_buf.indir_arr, in release_sub_crq_queue()
3994 scrq->ind_buf.indir_dma); in release_sub_crq_queue()
3996 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in release_sub_crq_queue()
3998 free_pages((unsigned long)scrq->msgs, 2); in release_sub_crq_queue()
3999 free_cpumask_var(scrq->affinity_mask); in release_sub_crq_queue()
4000 kfree(scrq); in release_sub_crq_queue()
4007 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_queue() local
4010 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); in init_sub_crq_queue()
4011 if (!scrq) in init_sub_crq_queue()
4014 scrq->msgs = in init_sub_crq_queue()
4016 if (!scrq->msgs) { in init_sub_crq_queue()
4020 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) in init_sub_crq_queue()
4023 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, in init_sub_crq_queue()
4025 if (dma_mapping_error(dev, scrq->msg_token)) { in init_sub_crq_queue()
4030 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in init_sub_crq_queue()
4031 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in init_sub_crq_queue()
4043 scrq->adapter = adapter; in init_sub_crq_queue()
4044 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); in init_sub_crq_queue()
4045 scrq->ind_buf.index = 0; in init_sub_crq_queue()
4047 scrq->ind_buf.indir_arr = in init_sub_crq_queue()
4050 &scrq->ind_buf.indir_dma, in init_sub_crq_queue()
4053 if (!scrq->ind_buf.indir_arr) in init_sub_crq_queue()
4056 spin_lock_init(&scrq->lock); in init_sub_crq_queue()
4060 scrq->crq_num, scrq->hw_irq, scrq->irq); in init_sub_crq_queue()
4062 return scrq; in init_sub_crq_queue()
4068 scrq->crq_num); in init_sub_crq_queue()
4071 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in init_sub_crq_queue()
4074 free_cpumask_var(scrq->affinity_mask); in init_sub_crq_queue()
4076 free_pages((unsigned long)scrq->msgs, 2); in init_sub_crq_queue()
4078 kfree(scrq); in init_sub_crq_queue()
4143 struct ibmvnic_sub_crq_queue *scrq) in disable_scrq_irq() argument
4149 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in disable_scrq_irq()
4152 scrq->hw_irq, rc); in disable_scrq_irq()
4159 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_xics_eoi() argument
4161 u64 val = 0xff000000 | scrq->hw_irq; in ibmvnic_xics_eoi()
4174 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_clear_pending_interrupt() argument
4177 ibmvnic_xics_eoi(dev, scrq); in ibmvnic_clear_pending_interrupt()
4181 struct ibmvnic_sub_crq_queue *scrq) in enable_scrq_irq() argument
4186 if (scrq->hw_irq > 0x100000000ULL) { in enable_scrq_irq()
4187 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); in enable_scrq_irq()
4193 ibmvnic_clear_pending_interrupt(dev, scrq); in enable_scrq_irq()
4197 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in enable_scrq_irq()
4200 scrq->hw_irq, rc); in enable_scrq_irq()
4205 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_complete_tx() argument
4216 while (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
4217 unsigned int pool = scrq->pool_index; in ibmvnic_complete_tx()
4219 next = ibmvnic_next_scrq(adapter, scrq); in ibmvnic_complete_tx()
4255 if (atomic_sub_return(num_entries, &scrq->used) <= in ibmvnic_complete_tx()
4258 scrq->pool_index)) { in ibmvnic_complete_tx()
4262 scrq->pool_index); in ibmvnic_complete_tx()
4265 scrq->pool_index); in ibmvnic_complete_tx()
4271 enable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
4273 if (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
4274 disable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
4278 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
4286 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_tx() local
4287 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_tx()
4289 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_tx()
4290 ibmvnic_complete_tx(adapter, scrq); in ibmvnic_interrupt_tx()
4297 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_rx() local
4298 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_rx()
4306 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; in ibmvnic_interrupt_rx()
4308 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { in ibmvnic_interrupt_rx()
4309 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_rx()
4310 __napi_schedule(&adapter->napi[scrq->scrq_num]); in ibmvnic_interrupt_rx()
4319 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_irqs() local
4326 scrq = adapter->tx_scrq[i]; in init_sub_crq_irqs()
4327 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4329 if (!scrq->irq) { in init_sub_crq_irqs()
4335 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", in init_sub_crq_irqs()
4337 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, in init_sub_crq_irqs()
4338 0, scrq->name, scrq); in init_sub_crq_irqs()
4342 scrq->irq, rc); in init_sub_crq_irqs()
4343 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4351 scrq = adapter->rx_scrq[i]; in init_sub_crq_irqs()
4352 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4353 if (!scrq->irq) { in init_sub_crq_irqs()
4358 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", in init_sub_crq_irqs()
4360 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, in init_sub_crq_irqs()
4361 0, scrq->name, scrq); in init_sub_crq_irqs()
4364 scrq->irq, rc); in init_sub_crq_irqs()
4365 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4624 struct ibmvnic_sub_crq_queue *scrq) in pending_scrq() argument
4626 union sub_crq *entry = &scrq->msgs[scrq->cur]; in pending_scrq()
4640 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_next_scrq() argument
4645 spin_lock_irqsave(&scrq->lock, flags); in ibmvnic_next_scrq()
4646 entry = &scrq->msgs[scrq->cur]; in ibmvnic_next_scrq()
4648 if (++scrq->cur == scrq->size) in ibmvnic_next_scrq()
4649 scrq->cur = 0; in ibmvnic_next_scrq()
4653 spin_unlock_irqrestore(&scrq->lock, flags); in ibmvnic_next_scrq()