/linux/drivers/usb/mtu3/ |
H A D | mtu3_gadget.c | 15 __releases(mep->mtu->lock) in mtu3_req_complete() 16 __acquires(mep->mtu->lock) in mtu3_req_complete() 19 struct mtu3 *mtu = mreq->mtu; in mtu3_req_complete() local 29 usb_gadget_unmap_request(&mtu->g, req, mep->is_in); in mtu3_req_complete() 31 dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", in mtu3_req_complete() 34 spin_unlock(&mtu->lock); in mtu3_req_complete() 36 spin_lock(&mtu->lock); in mtu3_req_complete() 46 dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); in nuke() 63 struct mtu3 *mtu = mep->mtu; in mtu3_ep_enable() local 74 switch (mtu->g.speed) { in mtu3_ep_enable() [all …]
|
H A D | mtu3_core.c | 45 dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n", in ep_fifo_alloc() 66 dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n", in ep_fifo_free() 71 static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable) in mtu3_ss_func_set() argument 75 mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set() 77 mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set() 79 dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable); in mtu3_ss_func_set() 83 static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) in mtu3_hs_softconn_set() argument 86 mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set() 89 mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set() 92 dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); in mtu3_hs_softconn_set() [all …]
|
H A D | mtu3_gadget_ep0.c | 18 #define next_ep0_request(mtu) next_request((mtu)->ep0) argument 39 static char *decode_ep0_state(struct mtu3 *mtu) in decode_ep0_state() argument 41 switch (mtu->ep0_state) { in decode_ep0_state() 57 static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) in ep0_req_giveback() argument 59 mtu3_req_complete(mtu->ep0, req, 0); in ep0_req_giveback() 63 forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) in forward_to_driver() argument 64 __releases(mtu->lock) in forward_to_driver() 65 __acquires(mtu->lock) in forward_to_driver() 69 if (!mtu->gadget_driver || !mtu->async_callbacks) in forward_to_driver() 72 spin_unlock(&mtu->lock); in forward_to_driver() [all …]
|
H A D | mtu3_qmu.c | 38 #define GPD_RX_BUF_LEN(mtu, x) \ argument 41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \ 46 #define GPD_DATA_LEN(mtu, x) \ argument 49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \ 57 #define GPD_EXT_NGP(mtu, x) \ argument 60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \ 63 #define GPD_EXT_BUF(mtu, x) \ argument 66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \ 171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); in mtu3_gpd_ring_alloc() 184 dma_pool_free(mep->mtu->qmu_gpd_pool, in mtu3_gpd_ring_free() [all …]
|
H A D | mtu3_debugfs.c | 81 struct mtu3 *mtu = sf->private; in mtu3_link_state_show() local 82 void __iomem *mbase = mtu->mac_base; in mtu3_link_state_show() 93 struct mtu3 *mtu = sf->private; in mtu3_ep_used_show() local 99 spin_lock_irqsave(&mtu->lock, flags); in mtu3_ep_used_show() 101 for (i = 0; i < mtu->num_eps; i++) { in mtu3_ep_used_show() 102 mep = mtu->in_eps + i; in mtu3_ep_used_show() 108 mep = mtu->out_eps + i; in mtu3_ep_used_show() 116 spin_unlock_irqrestore(&mtu->lock, flags); in mtu3_ep_used_show() 124 static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base, in mtu3_debugfs_regset() argument 131 mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL); in mtu3_debugfs_regset() [all …]
|
H A D | mtu3.h | 277 struct mtu3 *mtu; member 300 struct mtu3 *mtu; member 423 int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, 425 void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep); 427 void mtu3_start(struct mtu3 *mtu); 428 void mtu3_stop(struct mtu3 *mtu); 429 void mtu3_dev_on_off(struct mtu3 *mtu, int is_on); 431 int mtu3_gadget_setup(struct mtu3 *mtu); 432 void mtu3_gadget_cleanup(struct mtu3 *mtu); 433 void mtu3_gadget_reset(struct mtu3 *mtu); [all …]
|
/linux/drivers/clocksource/ |
H A D | sh_mtu2.c | 33 struct sh_mtu2_device *mtu; member 161 return ioread8(ch->mtu->mapbase + 0x280); in sh_mtu2_read() 177 return iowrite8(value, ch->mtu->mapbase + 0x280); in sh_mtu2_write() 192 raw_spin_lock_irqsave(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch() 201 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch() 210 pm_runtime_get_sync(&ch->mtu->pdev->dev); in sh_mtu2_enable() 211 dev_pm_syscore_device(&ch->mtu->pdev->dev, true); in sh_mtu2_enable() 214 ret = clk_enable(ch->mtu->clk); in sh_mtu2_enable() 216 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", in sh_mtu2_enable() 224 rate = clk_get_rate(ch->mtu->clk) / 64; in sh_mtu2_enable() [all …]
|
/linux/tools/testing/selftests/net/ |
H A D | pmtu.sh | 1100 mtu() { function 1103 mtu="${3}" 1105 ${ns_cmd} ip link set dev ${dev} mtu ${mtu} 1179 mtu "${ns_a}" veth_A-R1 2000 1180 mtu "${ns_r1}" veth_R1-A 2000 1181 mtu "${ns_r1}" veth_R1-B 1400 1182 mtu "${ns_b}" veth_B-R1 1400 1184 mtu "${ns_a}" veth_A-R2 2000 1185 mtu "${ns_r2}" veth_R2-A 2000 1186 mtu "${ns_r2}" veth_R2-B 1500 [all …]
|
H A D | fib_nexthop_multiprefix.sh | 154 local mtu=$2 156 run_cmd ip -netns h${hostid} li set eth0 mtu ${mtu} 157 run_cmd ip -netns ${r1} li set eth${hostid} mtu ${mtu} 166 local mtu=$2 181 echo " cache .* mtu ${mtu}" 186 grep -q "cache .* mtu ${mtu}" 189 log_test $rc 0 "IPv4: host 0 to host ${i}, mtu ${mtu}" 195 local mtu=$2 210 echo " ${dst}.* via ${r1_ip} dev eth0 src ${h0_ip} .* mtu ${mtu}" 215 grep -q "${dst}.* via ${r1_ip} dev eth0 src ${h0_ip} .* mtu ${mtu}" [all …]
|
/linux/tools/testing/selftests/net/forwarding/ |
H A D | ipip_lib.sh | 319 local mtu=$1 321 ip link set mtu $mtu dev $h1 322 ip link set mtu $mtu dev $ol1 323 ip link set mtu $mtu dev g1a 324 ip link set mtu $mtu dev $ul1 325 ip link set mtu $mtu dev $ul1.111 326 ip link set mtu $mtu dev $h2 327 ip link set mtu $mtu dev $ol2 328 ip link set mtu $mtu dev g2a 329 ip link set mtu $mtu dev $ul2 [all …]
|
H A D | min_max_mtu.sh | 110 local mtu=$(min_max_mtu_get_if ${NETIFS[p1]} $min_max) 116 if [ $current_mtu -ne $mtu ]; then 125 local mtu=$1; shift 128 mtu_set $dev $mtu 2>/dev/null 129 check_err_fail $should_fail $? "Set MTU $mtu for $dev" 134 local mtu=$1; shift 138 mtu_set_if ${NETIFS[p$i]} $mtu 139 mtu_set_if ${NETIFS[p$i]}.10 $mtu 155 local mtu=$1; shift 160 local pkt_size=$((mtu - ping_headers_len)) [all …]
|
H A D | ip6gre_lib.sh | 411 local mtu=$1 413 ip link set mtu $mtu dev $h1 414 ip link set mtu $mtu dev $ol1 415 ip link set mtu $mtu dev g1a 416 ip link set mtu $mtu dev $ul1 417 ip link set mtu $mtu dev $ul1.111 418 ip link set mtu $mtu dev $h2 419 ip link set mtu $mtu dev $ol2 420 ip link set mtu $mtu dev g2a 421 ip link set mtu $mtu dev $ul2 [all …]
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | check_mtu.c | 104 static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex) in test_check_mtu_xdp() argument 114 skel->rodata->GLOBAL_USER_MTU = mtu; in test_check_mtu_xdp() 121 test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu); in test_check_mtu_xdp() 122 test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu); in test_check_mtu_xdp() 123 test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu); in test_check_mtu_xdp() 124 test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu); in test_check_mtu_xdp() 125 test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu); in test_check_mtu_xdp() 157 static void test_check_mtu_tc(__u32 mtu, __u32 ifindex) in test_check_mtu_tc() argument 167 skel->rodata->GLOBAL_USER_MTU = mtu; in test_check_mtu_tc() 174 test_check_mtu_run_tc(skel, skel->progs.tc_use_helper, mtu); in test_check_mtu_tc() [all …]
|
/linux/drivers/mfd/ |
H A D | rz-mtu3.c | 67 struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent); in rz_mtu3_shared_reg_read() local 68 struct rz_mtu3_priv *priv = mtu->priv_data; in rz_mtu3_shared_reg_read() 79 struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent); in rz_mtu3_8bit_ch_read() local 80 struct rz_mtu3_priv *priv = mtu->priv_data; in rz_mtu3_8bit_ch_read() 91 struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent); in rz_mtu3_16bit_ch_read() local 92 struct rz_mtu3_priv *priv = mtu->priv_data; in rz_mtu3_16bit_ch_read() 107 struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent); in rz_mtu3_32bit_ch_read() local 108 struct rz_mtu3_priv *priv = mtu->priv_data; in rz_mtu3_32bit_ch_read() 122 struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent); in rz_mtu3_8bit_ch_write() local 123 struct rz_mtu3_priv *priv = mtu->priv_data; in rz_mtu3_8bit_ch_write() [all …]
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_param.h | 14 static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu) in rxe_mtu_int_to_enum() argument 16 if (mtu < 256) in rxe_mtu_int_to_enum() 18 else if (mtu < 512) in rxe_mtu_int_to_enum() 20 else if (mtu < 1024) in rxe_mtu_int_to_enum() 22 else if (mtu < 2048) in rxe_mtu_int_to_enum() 24 else if (mtu < 4096) in rxe_mtu_int_to_enum() 31 static inline enum ib_mtu eth_mtu_int_to_enum(int mtu) in eth_mtu_int_to_enum() argument 33 mtu -= RXE_MAX_HDR_LENGTH; in eth_mtu_int_to_enum() 35 return rxe_mtu_int_to_enum(mtu); in eth_mtu_int_to_enum()
|
/linux/net/rxrpc/ |
H A D | peer_event.c | 103 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) in rxrpc_adjust_mtu() argument 106 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) in rxrpc_adjust_mtu() 107 peer->if_mtu = mtu; in rxrpc_adjust_mtu() 109 if (mtu == 0) { in rxrpc_adjust_mtu() 111 mtu = peer->if_mtu; in rxrpc_adjust_mtu() 112 if (mtu > 1500) { in rxrpc_adjust_mtu() 113 mtu >>= 1; in rxrpc_adjust_mtu() 114 if (mtu < 1500) in rxrpc_adjust_mtu() 115 mtu = 1500; in rxrpc_adjust_mtu() 117 mtu -= 100; in rxrpc_adjust_mtu() [all …]
|
/linux/net/ipv6/ |
H A D | xfrm6_output.c | 19 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) in xfrm6_local_rxpmtu() argument 27 ipv6_local_rxpmtu(sk, &fl6, mtu); in xfrm6_local_rxpmtu() 30 void xfrm6_local_error(struct sk_buff *skb, u32 mtu) in xfrm6_local_error() argument 40 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); in xfrm6_local_error() 65 unsigned int mtu; in __xfrm6_output() local 79 mtu = ip6_skb_dst_mtu(skb); in __xfrm6_output() 81 mtu = dst_mtu(skb_dst(skb)); in __xfrm6_output() 83 toobig = skb->len > mtu && !skb_is_gso(skb); in __xfrm6_output() 86 xfrm6_local_rxpmtu(skb, mtu); in __xfrm6_output() 93 xfrm_local_error(skb, mtu); in __xfrm6_output()
|
H A D | ip6_output.c | 148 struct sk_buff *skb, unsigned int mtu) in ip6_finish_output_gso_slowpath_drop() argument 175 err = segs->len > mtu ? in ip6_finish_output_gso_slowpath_drop() 186 struct sk_buff *skb, unsigned int mtu) in ip6_finish_output_gso() argument 189 !skb_gso_validate_network_len(skb, mtu)) in ip6_finish_output_gso() 190 return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu); in ip6_finish_output_gso() 197 unsigned int mtu; in __ip6_finish_output() local 207 mtu = ip6_skb_dst_mtu(skb); in __ip6_finish_output() 209 return ip6_finish_output_gso(net, sk, skb, mtu); in __ip6_finish_output() 211 if (skb->len > mtu || in __ip6_finish_output() 283 u32 mtu; in ip6_xmit() local [all …]
|
H A D | netfilter.c | 134 unsigned int mtu, hlen; in br_ip6_fragment() local 144 mtu = skb->dev->mtu; in br_ip6_fragment() 145 if (frag_max_size > mtu || in br_ip6_fragment() 149 mtu = frag_max_size; in br_ip6_fragment() 150 if (mtu < hlen + sizeof(struct frag_hdr) + 8) in br_ip6_fragment() 152 mtu -= hlen + sizeof(struct frag_hdr); in br_ip6_fragment() 167 if (first_len - hlen > mtu || in br_ip6_fragment() 175 if (frag2->len > mtu || in br_ip6_fragment() 216 ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom, in br_ip6_fragment()
|
/linux/drivers/pwm/ |
H A D | pwm-rz-mtu3.c | 57 struct rz_mtu3_channel *mtu; member 100 *pv_val = rz_mtu3_16bit_ch_read(priv->mtu, reg_pv_offset); in rz_mtu3_pwm_read_tgr_registers() 101 *dc_val = rz_mtu3_16bit_ch_read(priv->mtu, reg_dc_offset); in rz_mtu3_pwm_read_tgr_registers() 108 rz_mtu3_16bit_ch_write(priv->mtu, reg_pv_offset, pv_val); in rz_mtu3_pwm_write_tgr_registers() 109 rz_mtu3_16bit_ch_write(priv->mtu, reg_dc_offset, dc_val); in rz_mtu3_pwm_write_tgr_registers() 153 is_channel_en = rz_mtu3_is_enabled(priv->mtu); in rz_mtu3_pwm_is_ch_enabled() 158 val = rz_mtu3_8bit_ch_read(priv->mtu, RZ_MTU3_TIORH); in rz_mtu3_pwm_is_ch_enabled() 160 val = rz_mtu3_8bit_ch_read(priv->mtu, RZ_MTU3_TIORL); in rz_mtu3_pwm_is_ch_enabled() 182 is_mtu3_channel_available = rz_mtu3_request_channel(priv->mtu); in rz_mtu3_pwm_request() 207 rz_mtu3_release_channel(priv->mtu); in rz_mtu3_pwm_free() [all …]
|
/linux/include/net/ |
H A D | ip6_route.h | 189 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, 191 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); 207 void rt6_mtu_change(struct net_device *dev, unsigned int mtu); 274 unsigned int mtu; in ip6_skb_dst_mtu() local 277 mtu = READ_ONCE(dst->dev->mtu); in ip6_skb_dst_mtu() 278 mtu -= lwtunnel_headroom(dst->lwtstate, mtu); in ip6_skb_dst_mtu() 280 mtu = dst_mtu(dst); in ip6_skb_dst_mtu() 282 return mtu; in ip6_skb_dst_mtu() 330 unsigned int mtu; in ip6_dst_mtu_maybe_forward() local 333 mtu = dst_metric_raw(dst, RTAX_MTU); in ip6_dst_mtu_maybe_forward() [all …]
|
/linux/net/sched/ |
H A D | sch_teql.c | 193 dev->mtu < m->dev->mtu) in teql_qdisc_init() 202 if (dev->mtu < m->dev->mtu) in teql_qdisc_init() 203 m->dev->mtu = dev->mtu; in teql_qdisc_init() 210 m->dev->mtu = dev->mtu; in teql_qdisc_init() 359 int mtu = 0xFFFE; in teql_master_open() local 374 if (slave->mtu < mtu) in teql_master_open() 375 mtu = slave->mtu; in teql_master_open() 391 m->dev->mtu = mtu; in teql_master_open() 422 if (new_mtu > qdisc_dev(q)->mtu) in teql_master_mtu() 427 WRITE_ONCE(dev->mtu, new_mtu); in teql_master_mtu() [all …]
|
/linux/net/ipv4/ |
H A D | ip_forward.c | 43 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) in ip_exceeds_mtu() argument 45 if (skb->len <= mtu) in ip_exceeds_mtu() 52 if (unlikely(IPCB(skb)->frag_max_size > mtu)) in ip_exceeds_mtu() 58 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) in ip_exceeds_mtu() 85 u32 mtu; in ip_forward() local 134 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); in ip_forward() 135 if (ip_exceeds_mtu(skb, mtu)) { in ip_forward() 138 htonl(mtu)); in ip_forward()
|
/linux/drivers/infiniband/hw/irdma/ |
H A D | main.c | 55 static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev) in irdma_log_invalid_mtu() argument 57 if (mtu < IRDMA_MIN_MTU_IPV4) in irdma_log_invalid_mtu() 58 …rn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu); in irdma_log_invalid_mtu() 59 else if (mtu < IRDMA_MIN_MTU_IPV6) in irdma_log_invalid_mtu() 60 …(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu); in irdma_log_invalid_mtu() 94 ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); in irdma_iidc_event_handler() 95 if (iwdev->vsi.mtu != iwdev->netdev->mtu) { in irdma_iidc_event_handler() 96 l2params.mtu = iwdev->netdev->mtu; in irdma_iidc_event_handler() 98 irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); in irdma_iidc_event_handler() 288 l2params.mtu = iwdev->netdev->mtu; in irdma_probe()
|
/linux/include/rdma/ |
H A D | ib_addr.h | 174 static inline enum ib_mtu iboe_get_mtu(int mtu) in iboe_get_mtu() argument 179 mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + in iboe_get_mtu() 183 if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) in iboe_get_mtu() 185 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048)) in iboe_get_mtu() 187 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) in iboe_get_mtu() 189 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512)) in iboe_get_mtu() 191 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256)) in iboe_get_mtu()
|