/freebsd/sys/dev/gve/ |
H A D | gve_tx_dqo.c | 38 gve_unmap_packet(struct gve_tx_ring *tx, in gve_unmap_packet() argument 41 bus_dmamap_sync(tx->dqo.buf_dmatag, pending_pkt->dmamap, in gve_unmap_packet() 43 bus_dmamap_unload(tx->dqo.buf_dmatag, pending_pkt->dmamap); in gve_unmap_packet() 54 gve_free_tx_mbufs_dqo(struct gve_tx_ring *tx) in gve_free_tx_mbufs_dqo() argument 59 for (i = 0; i < tx->dqo.num_pending_pkts; i++) { in gve_free_tx_mbufs_dqo() 60 pending_pkt = &tx->dqo.pending_pkts[i]; in gve_free_tx_mbufs_dqo() 64 if (gve_is_qpl(tx->com.priv)) in gve_free_tx_mbufs_dqo() 67 gve_unmap_packet(tx, pending_pkt); in gve_free_tx_mbufs_dqo() 77 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_dqo() local 80 if (tx->dqo.desc_ring != NULL) { in gve_tx_free_ring_dqo() [all …]
|
H A D | gve_tx.c | 38 gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_fifo_init() argument 40 struct gve_queue_page_list *qpl = tx->com.qpl; in gve_tx_fifo_init() 41 struct gve_tx_fifo *fifo = &tx->fifo; in gve_tx_fifo_init() 54 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_gqi() local 56 if (tx->desc_ring != NULL) { in gve_tx_free_ring_gqi() 57 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_gqi() 58 tx->desc_ring = NULL; in gve_tx_free_ring_gqi() 61 if (tx->info != NULL) { in gve_tx_free_ring_gqi() 62 free(tx->info, M_GVE); in gve_tx_free_ring_gqi() 63 tx->info = NULL; in gve_tx_free_ring_gqi() [all …]
|
/freebsd/sys/contrib/openzfs/module/zfs/ |
H A D | txg.c | 122 tx_state_t *tx = &dp->dp_tx; in txg_init() local 124 memset(tx, 0, sizeof (tx_state_t)); in txg_init() 126 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); in txg_init() 131 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); in txg_init() 132 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP, in txg_init() 135 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, in txg_init() 137 list_create(&tx->tx_cpu[c].tc_callbacks[i], in txg_init() 143 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); in txg_init() 145 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL); in txg_init() 146 cv_init(&tx in txg_init() 160 tx_state_t *tx = &dp->dp_tx; txg_fini() local 198 tx_state_t *tx = &dp->dp_tx; txg_sync_start() local 223 txg_thread_enter(tx_state_t * tx,callb_cpr_t * cpr) txg_thread_enter() argument 230 txg_thread_exit(tx_state_t * tx,callb_cpr_t * cpr,kthread_t ** tpp) txg_thread_exit() argument 241 txg_thread_wait(tx_state_t * tx,callb_cpr_t * cpr,kcondvar_t * cv,clock_t time) txg_thread_wait() argument 261 tx_state_t *tx = &dp->dp_tx; txg_sync_stop() local 319 tx_state_t *tx = &dp->dp_tx; txg_hold_open() local 388 tx_state_t *tx = &dp->dp_tx; txg_quiesce() local 451 tx_state_t *tx = &dp->dp_tx; txg_dispatch_callbacks() local 495 tx_state_t *tx = &dp->dp_tx; txg_wait_callbacks() local 504 tx_state_t *tx = &dp->dp_tx; txg_is_quiescing() local 512 tx_state_t *tx = &dp->dp_tx; txg_has_quiesced_to_sync() local 522 tx_state_t *tx = &dp->dp_tx; txg_sync_thread() local 612 tx_state_t *tx = &dp->dp_tx; txg_quiesce_thread() local 667 tx_state_t *tx = &dp->dp_tx; txg_delay() local 695 tx_state_t *tx = &dp->dp_tx; txg_wait_synced_impl() local 756 tx_state_t *tx = &dp->dp_tx; txg_wait_open() local 793 tx_state_t *tx = &dp->dp_tx; txg_kick() local 811 tx_state_t *tx = &dp->dp_tx; txg_stalled() local 818 tx_state_t *tx = &dp->dp_tx; txg_sync_waiting() local [all...] |
H A D | dmu_tx.c | 43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 67 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); in dmu_tx_create_dd() local 68 tx->tx_dir = dd; in dmu_tx_create_dd() 70 tx->tx_pool = dd->dd_pool; in dmu_tx_create_dd() 71 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), in dmu_tx_create_dd() 73 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), in dmu_tx_create_dd() 75 tx->tx_start = gethrtime(); in dmu_tx_create_dd() 76 return (tx); in dmu_tx_create_dd() 82 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); in dmu_tx_create() local 83 tx->tx_objset = os; in dmu_tx_create() [all …]
|
H A D | dsl_destroy.c | 91 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx) in dsl_destroy_snapshot_check() argument 97 dsl_pool_t *dp = dmu_tx_pool(tx); in dsl_destroy_snapshot_check() 128 process_old_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) in process_old_cb() argument 137 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, bp_freed, tx); in process_old_cb() 148 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp); in process_old_cb() 155 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx) in process_old_deadlist() argument 170 process_old_cb, &poa, tx)); in process_old_deadlist() 176 -poa.used, -poa.comp, -poa.uncomp, tx); in process_old_deadlist() 205 dsl_dir_remove_clones_key_impl(dsl_dir_t *dd, uint64_t mintxg, dmu_tx_t *tx, in dsl_dir_remove_clones_key_impl() argument 231 mintxg, tx); in dsl_dir_remove_clones_key_impl() [all …]
|
H A D | dmu_object.c | 49 int dnodesize, dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx) in dmu_object_alloc_impl() argument 193 bonuslen, dn_slots, tx); in dmu_object_alloc_impl() 195 dmu_tx_add_new_object(tx, dn); in dmu_object_alloc_impl() 227 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) in dmu_object_alloc() argument 230 bonuslen, 0, NULL, NULL, tx); in dmu_object_alloc() 236 dmu_tx_t *tx) in dmu_object_alloc_ibs() argument 239 bonustype, bonuslen, 0, NULL, NULL, tx); in dmu_object_alloc_ibs() 244 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx) in dmu_object_alloc_dnsize() argument 247 bonuslen, dnodesize, NULL, NULL, tx)); in dmu_object_alloc_dnsize() 258 int dnodesize, dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx) in dmu_object_alloc_hold() argument [all …]
|
H A D | dsl_dataset.c | 104 uint64_t obj, dmu_tx_t *tx); 106 dmu_tx_t *tx); 137 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) in dsl_dataset_block_born() argument 139 spa_t *spa = dmu_tx_pool(tx)->dp_spa; in dsl_dataset_block_born() 148 ASSERT(dmu_tx_is_syncing(tx)); in dsl_dataset_block_born() 155 dsl_pool_mos_diduse_space(tx->tx_pool, in dsl_dataset_block_born() 162 dmu_buf_will_dirty(ds->ds_dbuf, tx); in dsl_dataset_block_born() 206 DD_USED_REFRSRV, DD_USED_HEAD, tx); in dsl_dataset_block_born() 218 uint64_t size, uint64_t birth, dmu_tx_t *tx) in dsl_dataset_block_remapped() argument 222 ASSERT(dmu_tx_is_syncing(tx)); in dsl_dataset_block_remapped() [all …]
|
H A D | dsl_pool.c | 447 dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) in dsl_pool_create_obsolete_bpobj() argument 456 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); in dsl_pool_create_obsolete_bpobj() 459 DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); in dsl_pool_create_obsolete_bpobj() 460 spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); in dsl_pool_create_obsolete_bpobj() 464 dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) in dsl_pool_destroy_obsolete_bpobj() argument 466 spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); in dsl_pool_destroy_obsolete_bpobj() 469 DMU_POOL_OBSOLETE_BPOBJ, tx)); in dsl_pool_destroy_obsolete_bpobj() 471 dp->dp_obsolete_bpobj.bpo_object, tx); in dsl_pool_destroy_obsolete_bpobj() 481 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); in dsl_pool_create() local 494 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); in dsl_pool_create() [all …]
|
H A D | spa_history.c | 87 spa_history_create_obj(spa_t *spa, dmu_tx_t *tx) in spa_history_create_obj() argument 96 sizeof (spa_history_phys_t), tx); in spa_history_create_obj() 100 &spa->spa_history, tx)); in spa_history_create_obj() 106 dmu_buf_will_dirty(dbp, tx); in spa_history_create_obj() 152 dmu_tx_t *tx) in spa_history_write() argument 171 dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx); in spa_history_write() 177 len, (char *)buf + firstwrite, tx); in spa_history_write() 252 spa_history_log_sync(void *arg, dmu_tx_t *tx) in spa_history_log_sync() argument 255 spa_t *spa = dmu_tx_pool(tx)->dp_spa; in spa_history_log_sync() 270 spa_history_create_obj(spa, tx); in spa_history_log_sync() [all …]
|
H A D | dsl_synctask.c | 36 dsl_null_checkfunc(void *arg, dmu_tx_t *tx) in dsl_null_checkfunc() argument 38 (void) arg, (void) tx; in dsl_null_checkfunc() 48 dmu_tx_t *tx; in dsl_sync_task_common() local 59 tx = dmu_tx_create_dd(dp->dp_mos_dir); in dsl_sync_task_common() 60 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); in dsl_sync_task_common() 63 dst.dst_txg = dmu_tx_get_txg(tx); in dsl_sync_task_common() 73 err = dst.dst_checkfunc(arg, tx); in dsl_sync_task_common() 77 dmu_tx_commit(tx); in dsl_sync_task_common() 86 dmu_tx_commit(tx); in dsl_sync_task_common() 90 sigfunc(arg, tx); in dsl_sync_task_common() [all …]
|
H A D | zfeature.c | 305 dmu_tx_t *tx) in feature_sync() argument 311 sizeof (uint64_t), 1, &refcount, tx)); in feature_sync() 330 spa_activate_mos_feature(spa, feature->fi_guid, tx); in feature_sync() 338 feature_enable_sync(spa_t *spa, zfeature_info_t *feature, dmu_tx_t *tx) in feature_enable_sync() argument 356 spa_feature_enable(spa, feature->fi_depends[i], tx); in feature_enable_sync() 360 feature->fi_desc, tx)); in feature_enable_sync() 362 feature_sync(spa, feature, initial_refcount, tx); in feature_enable_sync() 365 uint64_t enabling_txg = dmu_tx_get_txg(tx); in feature_enable_sync() 371 DMU_POOL_FEATURE_ENABLED_TXG, tx); in feature_enable_sync() 373 spa_feature_incr(spa, SPA_FEATURE_ENABLED_TXG, tx); in feature_enable_sync() [all …]
|
H A D | dsl_deadlist.c | 378 dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx) in dsl_deadlist_alloc() argument 381 return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx)); in dsl_deadlist_alloc() 383 sizeof (dsl_deadlist_phys_t), tx)); in dsl_deadlist_alloc() 387 dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx) in dsl_deadlist_free() argument 396 bpobj_free(os, dlobj, tx); in dsl_deadlist_free() 406 bpobj_decr_empty(os, tx); in dsl_deadlist_free() 408 bpobj_free(os, obj, tx); in dsl_deadlist_free() 413 VERIFY0(dmu_object_free(os, dlobj, tx)); in dsl_deadlist_free() 418 const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) in dle_enqueue() argument 423 uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx); in dle_enqueue() [all …]
|
/freebsd/sys/dev/ioat/ |
H A D | ioat_test.c | 83 ioat_test_transaction_destroy(struct test_transaction *tx) in ioat_test_transaction_destroy() argument 88 if (tx->buf[i] != NULL) { in ioat_test_transaction_destroy() 89 free(tx->buf[i], M_IOAT_TEST); in ioat_test_transaction_destroy() 90 tx->buf[i] = NULL; in ioat_test_transaction_destroy() 94 free(tx, M_IOAT_TEST); in ioat_test_transaction_destroy() 101 struct test_transaction *tx; in ioat_test_transaction_destroy() 104 tx = malloc(sizeof(*tx), M_IOAT_TEST, M_NOWAIT | M_ZERO); in ioat_test_transaction_create() 105 if (tx == NULL) in ioat_test_transaction_create() 108 tx in ioat_test_transaction_create() 107 struct test_transaction *tx; ioat_test_transaction_create() local 146 ioat_compare_ok(struct test_transaction * tx) ioat_compare_ok() argument 180 struct test_transaction *tx; ioat_dma_test_callback() local 206 struct test_transaction *tx; ioat_test_prealloc_memory() local 237 struct test_transaction *tx, *s; ioat_test_release_memory() local 251 struct test_transaction *tx; ioat_test_submit_1_tx() local [all...] |
/freebsd/sys/contrib/openzfs/include/sys/ |
H A D | zap.h | 124 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 126 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx); 128 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 131 int dnodesize, dmu_tx_t *tx); 134 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 138 int dnodesize, dmu_tx_t *tx); 142 dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx); 145 uint64_t parent_obj, const char *name, dmu_tx_t *tx); 147 uint64_t parent_obj, const char *name, int dnodesize, dmu_tx_t *tx); 153 dmu_tx_t *tx); [all …]
|
H A D | dmu_tx.h | 145 int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); 146 void dmu_tx_commit(dmu_tx_t *tx); 147 void dmu_tx_abort(dmu_tx_t *tx); 148 uint64_t dmu_tx_get_txg(dmu_tx_t *tx); 149 struct dsl_pool *dmu_tx_pool(dmu_tx_t *tx); 150 void dmu_tx_wait(dmu_tx_t *tx); 161 int dmu_tx_is_syncing(dmu_tx_t *tx); 162 int dmu_tx_private_ok(dmu_tx_t *tx); 163 void dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn); 164 void dmu_tx_dirty_buf(dmu_tx_t *tx, struct dmu_buf_impl *db); [all …]
|
H A D | dmu.h | 326 cred_t *cr, dmu_tx_t *tx); 412 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 415 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); 418 int dnodesize, dmu_tx_t *tx); 422 dmu_tx_t *tx); 424 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 427 int dnodesize, dmu_tx_t *tx); 432 int bonuslen, int dnodesize, boolean_t keep_spill, dmu_tx_t *tx); 433 int dmu_object_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx); 451 int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); [all …]
|
/freebsd/sys/contrib/openzfs/module/os/freebsd/zfs/ |
H A D | zfs_dir.c | 270 zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) in zfs_unlinked_add() argument 277 VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); in zfs_unlinked_add() 293 dmu_tx_t *tx; in zfs_unlinked_drain() local 337 tx = dmu_tx_create(zfsvfs->z_os); in zfs_unlinked_drain() 338 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); in zfs_unlinked_drain() 339 error = dmu_tx_assign(tx, TXG_WAIT); in zfs_unlinked_drain() 341 dmu_tx_abort(tx); in zfs_unlinked_drain() 347 &zp->z_links, sizeof (zp->z_links), tx)); in zfs_unlinked_drain() 348 dmu_tx_commit(tx); in zfs_unlinked_drain() 375 dmu_tx_t *tx; in zfs_purgedir() local [all …]
|
/freebsd/tools/tools/iwn/iwnstats/ |
H A D | main.c | 147 iwn_stats_tx_print(struct iwn_tx_stats *tx) in iwn_stats_tx_print() argument 152 le32toh(tx->preamble), in iwn_stats_tx_print() 153 le32toh(tx->rx_detected), in iwn_stats_tx_print() 154 le32toh(tx->bt_defer), in iwn_stats_tx_print() 155 le32toh(tx->bt_kill), in iwn_stats_tx_print() 156 le32toh(tx->short_len)); in iwn_stats_tx_print() 160 le32toh(tx->cts_timeout), in iwn_stats_tx_print() 161 le32toh(tx->ack_timeout), in iwn_stats_tx_print() 162 le32toh(tx->exp_ack), in iwn_stats_tx_print() 163 le32toh(tx->ack), in iwn_stats_tx_print() [all …]
|
/freebsd/sys/contrib/openzfs/module/os/linux/zfs/ |
H A D | zfs_vnops_os.c | 609 dmu_tx_t *tx; in zfs_create() local 722 tx = dmu_tx_create(os); in zfs_create() 724 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + in zfs_create() 729 zfs_fuid_txhold(zfsvfs, tx); in zfs_create() 730 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); in zfs_create() 731 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); in zfs_create() 734 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, in zfs_create() 738 error = dmu_tx_assign(tx, in zfs_create() 744 dmu_tx_wait(tx); in zfs_create() 745 dmu_tx_abort(tx); in zfs_create() [all …]
|
H A D | zfs_dir.c | 460 zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) in zfs_unlinked_add() argument 468 zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); in zfs_unlinked_add() 598 dmu_tx_t *tx; in zfs_purgedir() local 617 tx = dmu_tx_create(zfsvfs->z_os); in zfs_purgedir() 618 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); in zfs_purgedir() 619 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap->za_name); in zfs_purgedir() 620 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); in zfs_purgedir() 621 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); in zfs_purgedir() 623 zfs_sa_upgrade_txholds(tx, xzp); in zfs_purgedir() 624 dmu_tx_mark_netfree(tx); in zfs_purgedir() [all …]
|
/freebsd/sys/dev/mlx5/mlx5_accel/ |
H A D | mlx5_ipsec_fs.c | 648 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) in ipsec_counter_rule_tx() argument 660 dest.counter_id = mlx5_fc_id(tx->fc->cnt); in ipsec_counter_rule_tx() 661 fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1); in ipsec_counter_rule_tx() 668 tx->status.rule = fte; in ipsec_counter_rule_tx() 675 static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) { in tx_destroy_roce() argument 676 if (!tx->roce.ft) in tx_destroy_roce() 679 mlx5_del_flow_rules(&tx->roce.rule); in tx_destroy_roce() 680 mlx5_destroy_flow_group(tx->roce.g); in tx_destroy_roce() 681 mlx5_destroy_flow_table(tx->roce.ft); in tx_destroy_roce() 682 tx->roce.ft = NULL; in tx_destroy_roce() [all …]
|
/freebsd/tools/tools/ath/athalq/ |
H A D | ar5416_ds.c | 50 MF(txs.u.tx.status[9], AR_TxDone), in ar5416_decode_txstatus() 51 MF(txs.u.tx.status[1], AR_FrmXmitOK), in ar5416_decode_txstatus() 52 MF(txs.u.tx.status[1], AR_Filtered), in ar5416_decode_txstatus() 53 txs.u.tx.status[2]); in ar5416_decode_txstatus() 57 MS(txs.u.tx.status[0], AR_TxRSSIAnt00), in ar5416_decode_txstatus() 58 MS(txs.u.tx.status[0], AR_TxRSSIAnt01), in ar5416_decode_txstatus() 59 MS(txs.u.tx.status[0], AR_TxRSSIAnt02)); in ar5416_decode_txstatus() 63 MS(txs.u.tx.status[5], AR_TxRSSIAnt10), in ar5416_decode_txstatus() 64 MS(txs.u.tx.status[5], AR_TxRSSIAnt11), in ar5416_decode_txstatus() 65 MS(txs.u.tx.status[5], AR_TxRSSIAnt12), in ar5416_decode_txstatus() [all …]
|
H A D | ar5416_ds_tdma.c | 47 if (MF(txs.u.tx.status[9], AR_TxDone) == 0) in ar5416_decode_txstatus() 53 MF(txs.u.tx.status[9], AR_TxDone), in ar5416_decode_txstatus() 54 txs.u.tx.status[2], in ar5416_decode_txstatus() 55 txs.u.tx.status[2] - tx_tsf); in ar5416_decode_txstatus() 57 tx_tsf = txs.u.tx.status[2]; in ar5416_decode_txstatus() 62 MS(txs.u.tx.status[0], AR_TxRSSIAnt00), in ar5416_decode_txstatus() 63 MS(txs.u.tx.status[0], AR_TxRSSIAnt01), in ar5416_decode_txstatus() 64 MS(txs.u.tx.status[0], AR_TxRSSIAnt02)); in ar5416_decode_txstatus() 66 MF(txs.u.tx.status[0], AR_TxBaStatus)); in ar5416_decode_txstatus() 70 MF(txs.u.tx.status[1], AR_FrmXmitOK), in ar5416_decode_txstatus() [all …]
|
/freebsd/contrib/kyua/store/ |
H A D | write_transaction_test.cpp | 78 store::write_transaction tx = backend.start_write(); in do_put_result_ok_test() local 83 tx.put_result(result, 312, start_time, end_time); in do_put_result_ok_test() 84 tx.commit(); in do_put_result_ok_test() 114 store::write_transaction tx = backend.start_write(); in ATF_TEST_CASE_BODY() local 117 tx.commit(); in ATF_TEST_CASE_BODY() 135 store::write_transaction tx = backend.start_write(); in ATF_TEST_CASE_BODY() local 136 tx.put_context(context); in ATF_TEST_CASE_BODY() 141 ATF_REQUIRE_THROW(store::error, tx.commit()); in ATF_TEST_CASE_BODY() 146 store::write_transaction tx = backend.start_write(); in ATF_TEST_CASE_BODY() local 147 tx.put_context(context); in ATF_TEST_CASE_BODY() [all …]
|
H A D | read_transaction_test.cpp | 69 store::read_transaction tx = backend.start_read(); in ATF_TEST_CASE_BODY() local 70 ATF_REQUIRE_THROW_RE(store::error, "context: no data", tx.get_context()); in ATF_TEST_CASE_BODY() 95 store::read_transaction tx = backend.start_read(); in ATF_TEST_CASE_BODY() local 97 tx.get_context()); in ATF_TEST_CASE_BODY() 125 store::read_transaction tx = backend.start_read(); in ATF_TEST_CASE_BODY() local 127 tx.get_context()); in ATF_TEST_CASE_BODY() 146 store::read_transaction tx = backend.start_read(); in ATF_TEST_CASE_BODY() local 148 tx.get_context()); in ATF_TEST_CASE_BODY() 164 store::read_transaction tx = backend.start_read(); in ATF_TEST_CASE_BODY() local 165 store::results_iterator iter = tx.get_results(); in ATF_TEST_CASE_BODY() [all …]
|