Lines Matching refs:nl
291 struct net_local *nl = netdev_priv(dev); in plip_init_netdev() local
302 nl->port_owner = 0; in plip_init_netdev()
305 nl->trigger = PLIP_TRIGGER_WAIT; in plip_init_netdev()
306 nl->nibble = PLIP_NIBBLE_WAIT; in plip_init_netdev()
309 INIT_WORK(&nl->immediate, plip_bh); in plip_init_netdev()
310 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); in plip_init_netdev()
313 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); in plip_init_netdev()
315 spin_lock_init(&nl->lock); in plip_init_netdev()
324 struct net_local *nl = in plip_kick_bh() local
327 if (nl->is_deferred) in plip_kick_bh()
328 schedule_work(&nl->immediate); in plip_kick_bh()
342 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
352 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
368 struct net_local *nl = container_of(work, struct net_local, immediate); in plip_bh() local
369 struct plip_local *snd = &nl->snd_data; in plip_bh()
370 struct plip_local *rcv = &nl->rcv_data; in plip_bh()
374 nl->is_deferred = 0; in plip_bh()
375 f = connection_state_table[nl->connection]; in plip_bh()
376 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK && in plip_bh()
377 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { in plip_bh()
378 nl->is_deferred = 1; in plip_bh()
379 schedule_delayed_work(&nl->deferred, 1); in plip_bh()
386 struct net_local *nl = in plip_timer_bh() local
389 if (!(atomic_read (&nl->kill_timer))) { in plip_timer_bh()
390 plip_interrupt (nl->dev); in plip_timer_bh()
392 schedule_delayed_work(&nl->timer, 1); in plip_timer_bh()
395 complete(&nl->killed_timer_cmp); in plip_timer_bh()
400 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, in plip_bh_timeout_error() argument
415 spin_lock_irq(&nl->lock); in plip_bh_timeout_error()
416 if (nl->connection == PLIP_CN_SEND) { in plip_bh_timeout_error()
419 nl->timeout_count++; in plip_bh_timeout_error()
420 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) || in plip_bh_timeout_error()
421 nl->timeout_count <= 3) { in plip_bh_timeout_error()
422 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
433 } else if (nl->connection == PLIP_CN_RECEIVE) { in plip_bh_timeout_error()
436 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
440 if (++nl->timeout_count <= 3) { in plip_bh_timeout_error()
441 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
461 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
468 nl->connection = PLIP_CN_ERROR; in plip_bh_timeout_error()
475 plip_none(struct net_device *dev, struct net_local *nl, in plip_none() argument
588 plip_receive_packet(struct net_device *dev, struct net_local *nl, in plip_receive_packet() argument
591 unsigned short nibble_timeout = nl->nibble; in plip_receive_packet()
608 if (plip_receive(nl->trigger, dev, in plip_receive_packet()
612 nl->is_deferred = 1; in plip_receive_packet()
613 nl->connection = PLIP_CN_SEND; in plip_receive_packet()
614 schedule_delayed_work(&nl->deferred, 1); in plip_receive_packet()
688 spin_lock_irq(&nl->lock); in plip_receive_packet()
690 nl->connection = PLIP_CN_SEND; in plip_receive_packet()
691 spin_unlock_irq(&nl->lock); in plip_receive_packet()
692 schedule_work(&nl->immediate); in plip_receive_packet()
697 nl->connection = PLIP_CN_NONE; in plip_receive_packet()
698 spin_unlock_irq(&nl->lock); in plip_receive_packet()
756 plip_send_packet(struct net_device *dev, struct net_local *nl, in plip_send_packet() argument
759 unsigned short nibble_timeout = nl->nibble; in plip_send_packet()
778 cx = nl->trigger; in plip_send_packet()
781 spin_lock_irq(&nl->lock); in plip_send_packet()
782 if (nl->connection == PLIP_CN_RECEIVE) { in plip_send_packet()
783 spin_unlock_irq(&nl->lock); in plip_send_packet()
790 spin_unlock_irq(&nl->lock); in plip_send_packet()
793 if (nl->connection == PLIP_CN_RECEIVE) { in plip_send_packet()
809 nl->timeout_count = 0; in plip_send_packet()
812 spin_unlock_irq(&nl->lock); in plip_send_packet()
865 nl->connection = PLIP_CN_CLOSING; in plip_send_packet()
866 nl->is_deferred = 1; in plip_send_packet()
867 schedule_delayed_work(&nl->deferred, 1); in plip_send_packet()
876 plip_connection_close(struct net_device *dev, struct net_local *nl, in plip_connection_close() argument
879 spin_lock_irq(&nl->lock); in plip_connection_close()
880 if (nl->connection == PLIP_CN_CLOSING) { in plip_connection_close()
881 nl->connection = PLIP_CN_NONE; in plip_connection_close()
884 spin_unlock_irq(&nl->lock); in plip_connection_close()
885 if (nl->should_relinquish) { in plip_connection_close()
886 nl->should_relinquish = nl->port_owner = 0; in plip_connection_close()
887 parport_release(nl->pardev); in plip_connection_close()
894 plip_error(struct net_device *dev, struct net_local *nl, in plip_error() argument
903 nl->connection = PLIP_CN_NONE; in plip_error()
904 nl->should_relinquish = 0; in plip_error()
910 nl->is_deferred = 1; in plip_error()
911 schedule_delayed_work(&nl->deferred, 1); in plip_error()
922 struct net_local *nl; in plip_interrupt() local
927 nl = netdev_priv(dev); in plip_interrupt()
928 rcv = &nl->rcv_data; in plip_interrupt()
930 spin_lock_irqsave (&nl->lock, flags); in plip_interrupt()
936 spin_unlock_irqrestore (&nl->lock, flags); in plip_interrupt()
943 switch (nl->connection) { in plip_interrupt()
950 nl->connection = PLIP_CN_RECEIVE; in plip_interrupt()
951 nl->timeout_count = 0; in plip_interrupt()
952 schedule_work(&nl->immediate); in plip_interrupt()
966 spin_unlock_irqrestore(&nl->lock, flags); in plip_interrupt()
972 struct net_local *nl = netdev_priv(dev); in plip_tx_packet() local
973 struct plip_local *snd = &nl->snd_data; in plip_tx_packet()
979 if (!nl->port_owner) { in plip_tx_packet()
980 if (parport_claim(nl->pardev)) in plip_tx_packet()
982 nl->port_owner = 1; in plip_tx_packet()
996 spin_lock_irq(&nl->lock); in plip_tx_packet()
1000 if (nl->connection == PLIP_CN_NONE) { in plip_tx_packet()
1001 nl->connection = PLIP_CN_SEND; in plip_tx_packet()
1002 nl->timeout_count = 0; in plip_tx_packet()
1004 schedule_work(&nl->immediate); in plip_tx_packet()
1005 spin_unlock_irq(&nl->lock); in plip_tx_packet()
1069 struct net_local *nl = netdev_priv(dev); in plip_open() local
1073 if (!nl->port_owner) { in plip_open()
1074 if (parport_claim(nl->pardev)) return -EAGAIN; in plip_open()
1075 nl->port_owner = 1; in plip_open()
1078 nl->should_relinquish = 0; in plip_open()
1087 atomic_set (&nl->kill_timer, 0); in plip_open()
1088 schedule_delayed_work(&nl->timer, 1); in plip_open()
1092 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE; in plip_open()
1093 nl->rcv_data.skb = nl->snd_data.skb = NULL; in plip_open()
1094 nl->connection = PLIP_CN_NONE; in plip_open()
1095 nl->is_deferred = 0; in plip_open()
1129 struct net_local *nl = netdev_priv(dev); in plip_close() local
1130 struct plip_local *snd = &nl->snd_data; in plip_close()
1131 struct plip_local *rcv = &nl->rcv_data; in plip_close()
1139 init_completion(&nl->killed_timer_cmp); in plip_close()
1140 atomic_set (&nl->kill_timer, 1); in plip_close()
1141 wait_for_completion(&nl->killed_timer_cmp); in plip_close()
1147 nl->is_deferred = 0; in plip_close()
1148 nl->connection = PLIP_CN_NONE; in plip_close()
1149 if (nl->port_owner) { in plip_close()
1150 parport_release(nl->pardev); in plip_close()
1151 nl->port_owner = 0; in plip_close()
1176 struct net_local *nl = netdev_priv(dev); in plip_preempt() local
1179 if (nl->connection != PLIP_CN_NONE) { in plip_preempt()
1180 nl->should_relinquish = 1; in plip_preempt()
1184 nl->port_owner = 0; /* Remember that we released the bus */ in plip_preempt()
1192 struct net_local *nl = netdev_priv(dev); in plip_wakeup() local
1194 if (nl->port_owner) { in plip_wakeup()
1197 if (!parport_claim(nl->pardev)) in plip_wakeup()
1208 if (!parport_claim(nl->pardev)) { in plip_wakeup()
1209 nl->port_owner = 1; in plip_wakeup()
1219 struct net_local *nl = netdev_priv(dev); in plip_siocdevprivate() local
1230 pc->trigger = nl->trigger; in plip_siocdevprivate()
1231 pc->nibble = nl->nibble; in plip_siocdevprivate()
1236 nl->trigger = pc->trigger; in plip_siocdevprivate()
1237 nl->nibble = pc->nibble; in plip_siocdevprivate()
1270 struct net_local *nl; in plip_attach() local
1295 nl = netdev_priv(dev); in plip_attach()
1296 nl->dev = dev; in plip_attach()
1304 nl->pardev = parport_register_dev_model(port, dev->name, in plip_attach()
1307 if (!nl->pardev) { in plip_attach()
1333 parport_unregister_device(nl->pardev); in plip_attach()
1370 struct net_local *nl = netdev_priv(dev); in plip_cleanup_module() local
1372 if (nl->port_owner) in plip_cleanup_module()
1373 parport_release(nl->pardev); in plip_cleanup_module()
1374 parport_unregister_device(nl->pardev); in plip_cleanup_module()