Lines Matching defs:dp83640
228 struct dp83640_private *dp83640 = phydev->priv;
231 if (dp83640->clock->page != page) {
233 dp83640->clock->page = page;
244 struct dp83640_private *dp83640 = phydev->priv;
246 if (dp83640->clock->page != page) {
248 dp83640->clock->page = page;
307 struct dp83640_private *dp83640 = clock->chosen;
308 struct phy_device *phydev = dp83640->phydev;
541 struct dp83640_private *dp83640 = phydev->priv;
542 struct dp83640_clock *clock = dp83640->clock;
589 static void prune_rx_ts(struct dp83640_private *dp83640)
594 list_for_each_safe(this, next, &dp83640->rxts) {
598 list_add(&rxts->list, &dp83640->rxpool);
732 static int decode_evnt(struct dp83640_private *dp83640,
760 dp83640->edata.sec_hi = phy_txts->sec_hi;
763 dp83640->edata.sec_lo = phy_txts->sec_lo;
766 dp83640->edata.ns_hi = phy_txts->ns_hi;
769 dp83640->edata.ns_lo = phy_txts->ns_lo;
778 event.timestamp = phy2txts(&dp83640->edata);
786 ptp_clock_event(dp83640->clock->ptp_clock, &event);
825 static void decode_rxts(struct dp83640_private *dp83640,
838 spin_lock_irqsave(&dp83640->rx_lock, flags);
840 prune_rx_ts(dp83640);
842 if (list_empty(&dp83640->rxpool)) {
846 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
850 spin_lock(&dp83640->rx_queue.lock);
851 skb_queue_walk(&dp83640->rx_queue, skb) {
856 __skb_unlink(skb, &dp83640->rx_queue);
860 list_add(&rxts->list, &dp83640->rxpool);
864 spin_unlock(&dp83640->rx_queue.lock);
867 list_add_tail(&rxts->list, &dp83640->rxts);
869 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
875 static void decode_txts(struct dp83640_private *dp83640,
886 skb = skb_dequeue(&dp83640->tx_queue);
897 skb = skb_dequeue(&dp83640->tx_queue);
913 static void decode_status_frame(struct dp83640_private *dp83640,
935 decode_rxts(dp83640, phy_rxts);
941 decode_txts(dp83640, phy_txts);
946 size = decode_evnt(dp83640, ptr, len, ests);
988 sprintf(clock->caps.name, "dp83640 timer");
1098 struct dp83640_private *dp83640 = phydev->priv;
1099 struct dp83640_clock *clock = dp83640->clock;
1207 struct dp83640_private *dp83640 =
1214 dp83640->hwts_tx_en = cfg->tx_type;
1218 dp83640->hwts_rx_en = 0;
1219 dp83640->layer = 0;
1220 dp83640->version = 0;
1225 dp83640->hwts_rx_en = 1;
1226 dp83640->layer = PTP_CLASS_L4;
1227 dp83640->version = PTP_CLASS_V1;
1233 dp83640->hwts_rx_en = 1;
1234 dp83640->layer = PTP_CLASS_L4;
1235 dp83640->version = PTP_CLASS_V2;
1241 dp83640->hwts_rx_en = 1;
1242 dp83640->layer = PTP_CLASS_L2;
1243 dp83640->version = PTP_CLASS_V2;
1249 dp83640->hwts_rx_en = 1;
1250 dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
1251 dp83640->version = PTP_CLASS_V2;
1258 txcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
1259 rxcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
1261 if (dp83640->layer & PTP_CLASS_L2) {
1265 if (dp83640->layer & PTP_CLASS_L4) {
1270 if (dp83640->hwts_tx_en)
1273 if (dp83640->hwts_tx_en == HWTSTAMP_TX_ONESTEP_SYNC)
1276 if (dp83640->hwts_rx_en)
1279 mutex_lock(&dp83640->clock->extreg_lock);
1281 ext_write(0, dp83640->phydev, PAGE5, PTP_TXCFG0, txcfg0);
1282 ext_write(0, dp83640->phydev, PAGE5, PTP_RXCFG0, rxcfg0);
1284 mutex_unlock(&dp83640->clock->extreg_lock);
1291 struct dp83640_private *dp83640 =
1296 while ((skb = skb_dequeue(&dp83640->rx_queue))) {
1301 skb_queue_head(&dp83640->rx_queue, skb);
1308 if (!skb_queue_empty(&dp83640->rx_queue))
1309 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
1315 struct dp83640_private *dp83640 =
1324 decode_status_frame(dp83640, skb);
1329 if (!dp83640->hwts_rx_en)
1332 if ((type & dp83640->version) == 0 || (type & dp83640->layer) == 0)
1335 spin_lock_irqsave(&dp83640->rx_lock, flags);
1336 prune_rx_ts(dp83640);
1337 list_for_each_safe(this, next, &dp83640->rxts) {
1344 list_add(&rxts->list, &dp83640->rxpool);
1348 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1353 skb_queue_tail(&dp83640->rx_queue, skb);
1354 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
1366 struct dp83640_private *dp83640 =
1369 switch (dp83640->hwts_tx_en) {
1380 skb_queue_tail(&dp83640->tx_queue, skb);
1393 struct dp83640_private *dp83640 =
1400 info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
1417 struct dp83640_private *dp83640;
1427 dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
1428 if (!dp83640)
1431 dp83640->phydev = phydev;
1432 dp83640->mii_ts.rxtstamp = dp83640_rxtstamp;
1433 dp83640->mii_ts.txtstamp = dp83640_txtstamp;
1434 dp83640->mii_ts.hwtstamp = dp83640_hwtstamp;
1435 dp83640->mii_ts.ts_info = dp83640_ts_info;
1437 INIT_DELAYED_WORK(&dp83640->ts_work, rx_timestamp_work);
1438 INIT_LIST_HEAD(&dp83640->rxts);
1439 INIT_LIST_HEAD(&dp83640->rxpool);
1441 list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
1445 phydev->mii_ts = &dp83640->mii_ts;
1446 phydev->priv = dp83640;
1448 spin_lock_init(&dp83640->rx_lock);
1449 skb_queue_head_init(&dp83640->rx_queue);
1450 skb_queue_head_init(&dp83640->tx_queue);
1452 dp83640->clock = clock;
1455 clock->chosen = dp83640;
1463 list_add_tail(&dp83640->list, &clock->phylist);
1470 kfree(dp83640);
1481 struct dp83640_private *tmp, *dp83640 = phydev->priv;
1489 cancel_delayed_work_sync(&dp83640->ts_work);
1491 skb_queue_purge(&dp83640->rx_queue);
1492 skb_queue_purge(&dp83640->tx_queue);
1494 clock = dp83640_clock_get(dp83640->clock);
1496 if (dp83640 == clock->chosen) {
1502 if (tmp == dp83640) {
1510 kfree(dp83640);