| /linux/arch/sh/kernel/cpu/sh2a/ |
| H A D | fpu.c | 96 unsigned int ix, iy; in denormal_mulf() local 100 ix = hx & 0x7fffffff; in denormal_mulf() 102 if (iy < 0x00800000 || ix == 0) in denormal_mulf() 106 ix &= 0x007fffff; in denormal_mulf() 108 m = (unsigned long long)ix * iy; in denormal_mulf() 116 ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23); in denormal_mulf() 118 ix = (int) (m >> (w - 22 - exp)) & 0x007fffff; in denormal_mulf() 120 ix = 0; in denormal_mulf() 122 ix |= (hx ^ hy) & 0x80000000; in denormal_mulf() 123 return ix; in denormal_mulf() [all …]
|
| /linux/arch/mips/math-emu/ |
| H A D | sp_sqrt.c | 14 int ix, s, q, m, t, i; in ieee754sp_sqrt() local 56 ix = x.bits; in ieee754sp_sqrt() 59 m = (ix >> 23); in ieee754sp_sqrt() 61 for (i = 0; (ix & 0x00800000) == 0; i++) in ieee754sp_sqrt() 62 ix <<= 1; in ieee754sp_sqrt() 66 ix = (ix & 0x007fffff) | 0x00800000; in ieee754sp_sqrt() 68 ix += ix; in ieee754sp_sqrt() 72 ix += ix; in ieee754sp_sqrt() 79 if (t <= ix) { in ieee754sp_sqrt() 81 ix -= t; in ieee754sp_sqrt() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| H A D | pool.c | 46 static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix) in mlx5e_xsk_add_pool() argument 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 58 static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix) in mlx5e_xsk_remove_pool() argument 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool() 79 struct xsk_buff_pool *pool, u16 ix) in mlx5e_xsk_enable_locked() argument 86 if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix))) in mlx5e_xsk_enable_locked() 92 err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool); in mlx5e_xsk_enable_locked() 96 err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix); in mlx5e_xsk_enable_locked() 123 c = priv->channels.c[ix]; in mlx5e_xsk_enable_locked() 136 mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true); in mlx5e_xsk_enable_locked() [all …]
|
| H A D | pool.h | 10 struct mlx5e_xsk *xsk, u16 ix) in mlx5e_xsk_get_pool() argument 15 if (unlikely(ix >= params->num_channels)) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
| H A D | rx.h | 11 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); 12 int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk); 13 int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
|
| H A D | rx.c | 19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_xsk_alloc_rx_mpwqe() argument 21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_xsk_alloc_rx_mpwqe() 130 offset = ix * rq->mpwqe.mtts_per_wqe; in mlx5e_xsk_alloc_rx_mpwqe() 160 int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) in mlx5e_xsk_alloc_rx_wqes_batched() argument 171 contig = mlx5_wq_cyc_get_size(wq) - ix; in mlx5e_xsk_alloc_rx_wqes_batched() 173 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); in mlx5e_xsk_alloc_rx_wqes_batched() 175 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); in mlx5e_xsk_alloc_rx_wqes_batched() 181 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); in mlx5e_xsk_alloc_rx_wqes_batched() 198 int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) in mlx5e_xsk_alloc_rx_wqes() argument 204 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); in mlx5e_xsk_alloc_rx_wqes()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | channels.c | 14 static struct mlx5e_channel *mlx5e_channels_get(struct mlx5e_channels *chs, unsigned int ix) in mlx5e_channels_get() argument 16 WARN_ON_ONCE(ix >= mlx5e_channels_get_num(chs)); in mlx5e_channels_get() 17 return chs->c[ix]; in mlx5e_channels_get() 20 bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix) in mlx5e_channels_is_xsk() argument 22 struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); in mlx5e_channels_is_xsk() 27 void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, in mlx5e_channels_get_regular_rqn() argument 30 struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); in mlx5e_channels_get_regular_rqn() 37 void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, in mlx5e_channels_get_xsk_rqn() argument 40 struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); in mlx5e_channels_get_xsk_rqn()
|
| H A D | channels.h | 12 bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix); 13 void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, 15 void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
|
| H A D | rqt.c | 118 unsigned int ix = i; in mlx5e_calc_indir_rqns() local 121 ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size)); in mlx5e_calc_indir_rqns() 123 ix = indir->table[ix]; in mlx5e_calc_indir_rqns() 125 if (WARN_ON(ix >= num_rqns)) in mlx5e_calc_indir_rqns() 130 rss_rqns[i] = rqns[ix]; in mlx5e_calc_indir_rqns() 132 rss_vhca_ids[i] = vhca_ids[ix]; in mlx5e_calc_indir_rqns()
|
| H A D | fs_tt_redirect.c | 148 int ix = 0; in fs_udp_create_groups() local 178 MLX5_SET_CFG(in, start_flow_index, ix); in fs_udp_create_groups() 179 ix += MLX5E_FS_UDP_GROUP1_SIZE; in fs_udp_create_groups() 180 MLX5_SET_CFG(in, end_flow_index, ix - 1); in fs_udp_create_groups() 188 MLX5_SET_CFG(in, start_flow_index, ix); in fs_udp_create_groups() 189 ix += MLX5E_FS_UDP_GROUP2_SIZE; in fs_udp_create_groups() 190 MLX5_SET_CFG(in, end_flow_index, ix - 1); in fs_udp_create_groups() 430 int ix = 0; in fs_any_create_groups() local 449 MLX5_SET_CFG(in, start_flow_index, ix); in fs_any_create_groups() 450 ix += MLX5E_FS_ANY_GROUP1_SIZE; in fs_any_create_groups() [all …]
|
| H A D | qos.c | 59 int ix; in mlx5e_get_qos_sq() local 61 ix = qid % params->num_channels; in mlx5e_get_qos_sq() 63 c = priv->channels.c[ix]; in mlx5e_get_qos_sq() 76 int txq_ix, ix, qid, err = 0; in mlx5e_open_qos_sq() local 112 ix = node_qid % params->num_channels; in mlx5e_open_qos_sq() 114 c = chs->c[ix]; in mlx5e_open_qos_sq() 217 int ix; in mlx5e_close_qos_sq() local 221 ix = qid % params->num_channels; in mlx5e_close_qos_sq() 223 c = priv->channels.c[ix]; in mlx5e_close_qos_sq() 341 u16 qid = params->num_channels * i + c->ix; in mlx5e_qos_deactivate_queues()
|
| /linux/fs/qnx4/ |
| H A D | dir.c | 24 int ix, ino; in qnx4_readdir() local 37 ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; in qnx4_readdir() 38 for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { in qnx4_readdir() 42 offset = ix * QNX4_DIR_ENTRY_SIZE; in qnx4_readdir() 50 ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; in qnx4_readdir()
|
| /linux/arch/s390/lib/ |
| H A D | spinlock.c | 83 int ix; in arch_spin_lock_setup() local 86 for (ix = 0; ix < 4; ix++, node++) { in arch_spin_lock_setup() 89 (ix << _Q_TAIL_IDX_OFFSET); in arch_spin_lock_setup() 138 int ix, cpu; in arch_spin_decode_tail() local 140 ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in arch_spin_decode_tail() 142 return per_cpu_ptr(&spin_wait[ix], cpu - 1); in arch_spin_decode_tail() 159 int lockval, ix, node_id, tail_id, old, new, owner, count; in arch_spin_lock_queued() local 161 ix = get_lowcore()->spinlock_index++; in arch_spin_lock_queued() 164 node = this_cpu_ptr(&spin_wait[ix]); in arch_spin_lock_queued()
|
| /linux/drivers/media/dvb-frontends/ |
| H A D | mxl692.c | 196 u32 ix, div_size; in mxl692_checksum() local 203 for (ix = 0; ix < div_size; ix++) in mxl692_checksum() 204 cur_cksum += be32_to_cpu(buf[ix]); in mxl692_checksum() 215 u32 ix, temp; in mxl692_validate_fw_header() local 235 for (ix = 16; ix < buf_len; ix++) in mxl692_validate_fw_header() 236 temp_cksum += buffer[ix]; in mxl692_validate_fw_header() 251 u32 ix = 0, total_len = 0, addr = 0, chunk_len = 0, prevchunk_len = 0; in mxl692_write_fw_block() local 255 ix = *index; in mxl692_write_fw_block() 257 if (buffer[ix] == 0x53) { in mxl692_write_fw_block() 258 total_len = buffer[ix + 1] << 16 | buffer[ix + 2] << 8 | buffer[ix + 3]; in mxl692_write_fw_block() [all …]
|
| /linux/drivers/input/misc/ |
| H A D | yealink.c | 284 int ix, len; in yealink_set_ringtone() local 300 ix = 0; in yealink_set_ringtone() 301 while (size != ix) { in yealink_set_ringtone() 302 len = size - ix; in yealink_set_ringtone() 306 p->offset = cpu_to_be16(ix); in yealink_set_ringtone() 307 memcpy(p->data, &buf[ix], len); in yealink_set_ringtone() 309 ix += len; in yealink_set_ringtone() 319 int i, ix, len; in yealink_do_idle_tasks() local 321 ix = yld->stat_ix; in yealink_do_idle_tasks() 329 if (ix >= sizeof(yld->master)) { in yealink_do_idle_tasks() [all …]
|
| /linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
| H A D | smu_helper.h | 155 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 159 PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 167 cgs_write_ind_register(device, port, ix##reg, \ 168 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 172 cgs_write_ind_register(device, port, ix##reg, \ 173 PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 181 PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) 192 PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) 206 PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) 220 PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
|
| /linux/fs/netfs/ |
| H A D | rolling_buffer.c | 122 int nr, ix, to; in rolling_buffer_load_from_ra() local 132 ix = fq->vec.nr; in rolling_buffer_load_from_ra() 133 to = ix + nr; in rolling_buffer_load_from_ra() 135 for (; ix < to; ix++) { in rolling_buffer_load_from_ra() 136 struct folio *folio = folioq_folio(fq, ix); in rolling_buffer_load_from_ra() 139 fq->orders[ix] = order; in rolling_buffer_load_from_ra()
|
| H A D | iterator.c | 113 unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0; in netfs_limit_bvec() local 122 while (n && ix < nbv && skip) { in netfs_limit_bvec() 123 len = bvecs[ix].bv_len; in netfs_limit_bvec() 128 ix++; in netfs_limit_bvec() 131 while (n && ix < nbv) { in netfs_limit_bvec() 132 len = min3(n, bvecs[ix].bv_len - skip, max_size); in netfs_limit_bvec() 135 ix++; in netfs_limit_bvec()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
| H A D | conn.c | 103 unsigned int ix; in mlx5_fpga_conn_post_recv() local 115 ix = conn->qp.rq.pc & (conn->qp.rq.size - 1); in mlx5_fpga_conn_post_recv() 116 data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix); in mlx5_fpga_conn_post_recv() 122 conn->qp.rq.bufs[ix] = buf; in mlx5_fpga_conn_post_recv() 146 unsigned int ix, sgi; in mlx5_fpga_conn_post_send() local 149 ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); in mlx5_fpga_conn_post_send() 151 ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); in mlx5_fpga_conn_post_send() 171 conn->qp.sq.bufs[ix] = buf; in mlx5_fpga_conn_post_send() 254 int ix, err; in mlx5_fpga_conn_rq_cqe() local 256 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); in mlx5_fpga_conn_rq_cqe() [all …]
|
| /linux/net/rxrpc/ |
| H A D | call_event.c | 64 unsigned int ix = req->seq & RXRPC_TXQ_MASK; in rxrpc_retransmit_data() local 65 struct rxrpc_txbuf *txb = tq->bufs[ix]; in rxrpc_retransmit_data() 67 _enter("%x,%x,%x,%x", tq->qbase, req->seq, ix, txb->debug_id); in rxrpc_retransmit_data() 111 unsigned int ix = __ffs(lost); in rxrpc_resend() local 112 struct rxrpc_txbuf *txb = tq->bufs[ix]; in rxrpc_resend() 114 __clear_bit(ix, &lost); in rxrpc_resend() 118 req.seq = tq->qbase + ix; in rxrpc_resend() 231 int ix; in rxrpc_transmit_fresh_data() local 234 ix = seq & RXRPC_TXQ_MASK; in rxrpc_transmit_fresh_data() 235 if (!ix) { in rxrpc_transmit_fresh_data() [all …]
|
| H A D | input.c | 207 int ix) in rxrpc_add_data_rtt_sample() argument 209 ktime_t xmit_ts = ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]); in rxrpc_add_data_rtt_sample() 214 __clear_bit(ix, &tq->rtt_samples); /* Prevent repeat RTT sample */ in rxrpc_add_data_rtt_sample() 251 unsigned int ix = seq - call->tx_qbase; in rxrpc_rotate_tx_window() local 253 _debug("tq=%x seq=%x i=%d f=%x", tq->qbase, seq, ix, tq->bufs[ix]->flags); in rxrpc_rotate_tx_window() 254 if (tq->bufs[ix]->flags & RXRPC_LAST_PACKET) { in rxrpc_rotate_tx_window() 259 if (summary->acked_serial == tq->segment_serial[ix] && in rxrpc_rotate_tx_window() 260 test_bit(ix, &tq->rtt_samples)) in rxrpc_rotate_tx_window() 261 rxrpc_add_data_rtt_sample(call, summary, tq, ix); in rxrpc_rotate_tx_window() 263 if (ix == tq->nr_reported_acks) { in rxrpc_rotate_tx_window() [all …]
|
| /linux/drivers/net/wireless/marvell/mwifiex/ |
| H A D | util.c | 801 int ix; in mwifiex_hist_data_reset() local 805 for (ix = 0; ix < MWIFIEX_MAX_AC_RX_RATES; ix++) in mwifiex_hist_data_reset() 806 atomic_set(&phist_data->rx_rate[ix], 0); in mwifiex_hist_data_reset() 807 for (ix = 0; ix < MWIFIEX_MAX_SNR; ix++) in mwifiex_hist_data_reset() 808 atomic_set(&phist_data->snr[ix], 0); in mwifiex_hist_data_reset() 809 for (ix = 0; ix < MWIFIEX_MAX_NOISE_FLR; ix++) in mwifiex_hist_data_reset() 810 atomic_set(&phist_data->noise_flr[ix], 0); in mwifiex_hist_data_reset() 811 for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++) in mwifiex_hist_data_reset() 812 atomic_set(&phist_data->sig_str[ix], 0); in mwifiex_hist_data_reset()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | trinity_dpm.c | 541 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE; in trinity_set_divider_value() local 548 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix); in trinity_set_divider_value() 551 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value); in trinity_set_divider_value() 558 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix); in trinity_set_divider_value() 561 WREG32_SMC(SMU_SCLK_DPM_STATE_0_PG_CNTL + ix, value); in trinity_set_divider_value() 568 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE; in trinity_set_ds_dividers() local 570 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix); in trinity_set_ds_dividers() 573 WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix, value); in trinity_set_ds_dividers() 580 u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE; in trinity_set_ss_dividers() local 582 value = RREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_1 + ix); in trinity_set_ss_dividers() [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | cache.c | 383 struct ib_gid_table *table, int ix) in del_gid() argument 391 ix, table->data_vec[ix]->attr.gid.raw); in del_gid() 394 entry = table->data_vec[ix]; in del_gid() 400 table->data_vec[ix] = NULL; in del_gid() 553 int ix; in __ib_cache_gid_add() local 566 ix = find_gid(table, gid, attr, default_gid, mask, &empty); in __ib_cache_gid_add() 567 if (ix >= 0) in __ib_cache_gid_add() 607 int ix; in _ib_cache_gid_del() local 613 ix = find_gid(table, gid, attr, default_gid, mask, NULL); in _ib_cache_gid_del() 614 if (ix < 0) { in _ib_cache_gid_del() [all …]
|
| /linux/drivers/s390/char/ |
| H A D | con3215.c | 176 int len, count, ix, lines; in raw3215_mk_write_req() local 201 ix = req->start; in raw3215_mk_write_req() 202 while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) { in raw3215_mk_write_req() 203 if (raw->buffer[ix] == 0x15) in raw3215_mk_write_req() 205 ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1); in raw3215_mk_write_req() 207 len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1; in raw3215_mk_write_req() 214 req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE); in raw3215_mk_write_req() 216 ix = req->start; in raw3215_mk_write_req() 222 ccw->cda = virt_to_dma32(raw->buffer + ix); in raw3215_mk_write_req() 224 if (ix + count > RAW3215_BUFFER_SIZE) in raw3215_mk_write_req() [all …]
|