Lines Matching refs:cf_sk
58 static int rx_flow_is_on(struct caifsock *cf_sk) in rx_flow_is_on() argument
60 return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state); in rx_flow_is_on()
63 static int tx_flow_is_on(struct caifsock *cf_sk) in tx_flow_is_on() argument
65 return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state); in tx_flow_is_on()
68 static void set_rx_flow_off(struct caifsock *cf_sk) in set_rx_flow_off() argument
70 clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state); in set_rx_flow_off()
73 static void set_rx_flow_on(struct caifsock *cf_sk) in set_rx_flow_on() argument
75 set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state); in set_rx_flow_on()
78 static void set_tx_flow_off(struct caifsock *cf_sk) in set_tx_flow_off() argument
80 clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state); in set_tx_flow_off()
83 static void set_tx_flow_on(struct caifsock *cf_sk) in set_tx_flow_on() argument
85 set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state); in set_tx_flow_on()
90 struct caifsock *cf_sk; in caif_read_lock() local
91 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_lock()
92 mutex_lock(&cf_sk->readlock); in caif_read_lock()
97 struct caifsock *cf_sk; in caif_read_unlock() local
98 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_unlock()
99 mutex_unlock(&cf_sk->readlock); in caif_read_unlock()
102 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) in sk_rcvbuf_lowwater() argument
105 return cf_sk->sk.sk_rcvbuf / 4; in sk_rcvbuf_lowwater()
110 struct caifsock *cf_sk; in caif_flow_ctrl() local
111 cf_sk = container_of(sk, struct caifsock, sk); in caif_flow_ctrl()
112 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) in caif_flow_ctrl()
113 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); in caif_flow_ctrl()
125 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_queue_rcv_skb() local
129 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { in caif_queue_rcv_skb()
131 atomic_read(&cf_sk->sk.sk_rmem_alloc), in caif_queue_rcv_skb()
132 sk_rcvbuf_lowwater(cf_sk)); in caif_queue_rcv_skb()
133 set_rx_flow_off(cf_sk); in caif_queue_rcv_skb()
141 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { in caif_queue_rcv_skb()
142 set_rx_flow_off(cf_sk); in caif_queue_rcv_skb()
163 struct caifsock *cf_sk; in caif_sktrecv_cb() local
166 cf_sk = container_of(layr, struct caifsock, layer); in caif_sktrecv_cb()
169 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { in caif_sktrecv_cb()
173 caif_queue_rcv_skb(&cf_sk->sk, skb); in caif_sktrecv_cb()
179 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); in cfsk_hold() local
180 sock_hold(&cf_sk->sk); in cfsk_hold()
185 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); in cfsk_put() local
186 sock_put(&cf_sk->sk); in cfsk_put()
194 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); in caif_ctrl_cb() local
198 set_tx_flow_on(cf_sk); in caif_ctrl_cb()
199 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
204 set_tx_flow_off(cf_sk); in caif_ctrl_cb()
205 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
210 caif_client_register_refcnt(&cf_sk->layer, in caif_ctrl_cb()
212 cf_sk->sk.sk_state = CAIF_CONNECTED; in caif_ctrl_cb()
213 set_tx_flow_on(cf_sk); in caif_ctrl_cb()
214 cf_sk->sk.sk_shutdown = 0; in caif_ctrl_cb()
215 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
220 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_ctrl_cb()
221 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
226 cf_sk->sk.sk_err = ECONNREFUSED; in caif_ctrl_cb()
227 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_ctrl_cb()
228 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; in caif_ctrl_cb()
233 set_tx_flow_on(cf_sk); in caif_ctrl_cb()
234 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
239 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; in caif_ctrl_cb()
240 cf_sk->sk.sk_err = ECONNRESET; in caif_ctrl_cb()
241 set_rx_flow_on(cf_sk); in caif_ctrl_cb()
242 sk_error_report(&cf_sk->sk); in caif_ctrl_cb()
252 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_check_flow_release() local
254 if (rx_flow_is_on(cf_sk)) in caif_check_flow_release()
257 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { in caif_check_flow_release()
258 set_rx_flow_on(cf_sk); in caif_check_flow_release()
460 static long caif_wait_for_flow_on(struct caifsock *cf_sk, in caif_wait_for_flow_on() argument
463 struct sock *sk = &cf_sk->sk; in caif_wait_for_flow_on()
467 if (tx_flow_is_on(cf_sk) && in caif_wait_for_flow_on()
468 (!wait_writeable || sock_writeable(&cf_sk->sk))) in caif_wait_for_flow_on()
484 if (cf_sk->sk.sk_state != CAIF_CONNECTED) in caif_wait_for_flow_on()
496 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, in transmit_skb() argument
503 cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); in transmit_skb()
505 if (cf_sk->layer.dn == NULL) { in transmit_skb()
510 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); in transmit_skb()
518 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_seqpkt_sendmsg() local
524 caif_assert(cf_sk); in caif_seqpkt_sendmsg()
546 if (cf_sk->sk.sk_state != CAIF_CONNECTED || in caif_seqpkt_sendmsg()
553 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) in caif_seqpkt_sendmsg()
556 buffer_size = len + cf_sk->headroom + cf_sk->tailroom; in caif_seqpkt_sendmsg()
564 skb_reserve(skb, cf_sk->headroom); in caif_seqpkt_sendmsg()
570 ret = transmit_skb(skb, cf_sk, noblock, timeo); in caif_seqpkt_sendmsg()
590 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_stream_sendmsg() local
604 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); in caif_stream_sendmsg()
613 if (size > cf_sk->maxframe) in caif_stream_sendmsg()
614 size = cf_sk->maxframe; in caif_stream_sendmsg()
624 size + cf_sk->headroom + in caif_stream_sendmsg()
625 cf_sk->tailroom, in caif_stream_sendmsg()
631 skb_reserve(skb, cf_sk->headroom); in caif_stream_sendmsg()
646 err = transmit_skb(skb, cf_sk, in caif_stream_sendmsg()
669 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in setsockopt() local
672 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) in setsockopt()
683 lock_sock(&(cf_sk->sk)); in setsockopt()
684 cf_sk->conn_req.link_selector = linksel; in setsockopt()
685 release_sock(&cf_sk->sk); in setsockopt()
691 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) in setsockopt()
693 lock_sock(&(cf_sk->sk)); in setsockopt()
694 if (ol > sizeof(cf_sk->conn_req.param.data) || in setsockopt()
695 copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) { in setsockopt()
696 release_sock(&cf_sk->sk); in setsockopt()
699 cf_sk->conn_req.param.size = ol; in setsockopt()
700 release_sock(&cf_sk->sk); in setsockopt()
741 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_connect() local
784 caif_disconnect_client(sock_net(sk), &cf_sk->layer); in caif_connect()
785 caif_free_client(&cf_sk->layer); in caif_connect()
798 sk_stream_kill_queues(&cf_sk->sk); in caif_connect()
804 memcpy(&cf_sk->conn_req.sockaddr, uaddr, in caif_connect()
813 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) in caif_connect()
814 cf_sk->conn_req.priority = CAIF_PRIO_MAX; in caif_connect()
815 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) in caif_connect()
816 cf_sk->conn_req.priority = CAIF_PRIO_MIN; in caif_connect()
818 cf_sk->conn_req.priority = cf_sk->sk.sk_priority; in caif_connect()
821 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; in caif_connect()
823 cf_sk->layer.receive = caif_sktrecv_cb; in caif_connect()
825 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, in caif_connect()
826 &cf_sk->layer, &ifindex, &headroom, &tailroom); in caif_connect()
829 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; in caif_connect()
830 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_connect()
841 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); in caif_connect()
845 cf_sk->tailroom = tailroom; in caif_connect()
846 cf_sk->maxframe = mtu - (headroom + tailroom); in caif_connect()
847 if (cf_sk->maxframe < 1) { in caif_connect()
894 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_release() local
899 set_tx_flow_off(cf_sk); in caif_release()
911 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); in caif_release()
912 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); in caif_release()
914 lock_sock(&(cf_sk->sk)); in caif_release()
918 caif_disconnect_client(sock_net(sk), &cf_sk->layer); in caif_release()
919 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; in caif_release()
923 sk_stream_kill_queues(&cf_sk->sk); in caif_release()
935 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_poll() local
957 if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) in caif_poll()
1004 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_sock_destructor() local
1012 sk_stream_kill_queues(&cf_sk->sk); in caif_sock_destructor()
1014 caif_free_client(&cf_sk->layer); in caif_sock_destructor()
1021 struct caifsock *cf_sk = NULL; in caif_create() local
1055 cf_sk = container_of(sk, struct caifsock, sk); in caif_create()
1076 lock_sock(&(cf_sk->sk)); in caif_create()
1082 mutex_init(&cf_sk->readlock); /* single task reading lock */ in caif_create()
1083 cf_sk->layer.ctrlcmd = caif_ctrl_cb; in caif_create()
1084 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; in caif_create()
1085 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_create()
1087 set_tx_flow_off(cf_sk); in caif_create()
1088 set_rx_flow_on(cf_sk); in caif_create()
1091 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; in caif_create()
1092 cf_sk->conn_req.protocol = protocol; in caif_create()
1093 release_sock(&cf_sk->sk); in caif_create()