| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | selftest_timeline.c | 27 static struct page *hwsp_page(struct intel_timeline *tl) in hwsp_page() argument 29 struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; in hwsp_page() 35 static unsigned long hwsp_cacheline(struct intel_timeline *tl) in hwsp_cacheline() argument 37 unsigned long address = (unsigned long)page_address(hwsp_page(tl)); in hwsp_cacheline() 39 return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES; in hwsp_cacheline() 42 static int selftest_tl_pin(struct intel_timeline *tl) in selftest_tl_pin() argument 49 err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww); in selftest_tl_pin() 51 err = intel_timeline_pin(tl, &ww); in selftest_tl_pin() 79 struct intel_timeline *tl) in __mock_hwsp_record() argument 81 tl = xchg(&state->history[idx], tl); in __mock_hwsp_record() [all …]
|
| H A D | intel_context.h | 251 struct intel_timeline *tl = ce->timeline; in intel_context_timeline_lock() local 255 err = mutex_lock_interruptible_nested(&tl->mutex, 0); in intel_context_timeline_lock() 257 err = mutex_lock_interruptible_nested(&tl->mutex, in intel_context_timeline_lock() 260 err = mutex_lock_interruptible(&tl->mutex); in intel_context_timeline_lock() 264 return tl; in intel_context_timeline_lock() 267 static inline void intel_context_timeline_unlock(struct intel_timeline *tl) in intel_context_timeline_unlock() argument 268 __releases(&tl->mutex) in intel_context_timeline_unlock() 270 mutex_unlock(&tl->mutex); in intel_context_timeline_unlock()
|
| H A D | mock_engine.c | 16 static int mock_timeline_pin(struct intel_timeline *tl) in mock_timeline_pin() argument 20 if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL))) in mock_timeline_pin() 23 err = intel_timeline_pin_map(tl); in mock_timeline_pin() 24 i915_gem_object_unlock(tl->hwsp_ggtt->obj); in mock_timeline_pin() 28 atomic_inc(&tl->pin_count); in mock_timeline_pin() 32 static void mock_timeline_unpin(struct intel_timeline *tl) in mock_timeline_unpin() argument 34 GEM_BUG_ON(!atomic_read(&tl->pin_count)); in mock_timeline_unpin() 35 atomic_dec(&tl->pin_count); in mock_timeline_unpin()
|
| H A D | intel_ring.c | 194 struct intel_timeline *tl, in wait_for_space() argument 203 GEM_BUG_ON(list_empty(&tl->requests)); in wait_for_space() 204 list_for_each_entry(target, &tl->requests, link) { in wait_for_space() 214 if (GEM_WARN_ON(&target->link == &tl->requests)) in wait_for_space()
|
| /linux/fs/smb/client/ |
| H A D | dfs_cache.h | 55 dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, in dfs_cache_get_next_tgt() argument 58 if (!tl || !tl->tl_numtgts || list_empty(&tl->tl_list) || in dfs_cache_get_next_tgt() 59 !it || list_is_last(&it->it_list, &tl->tl_list)) in dfs_cache_get_next_tgt() 65 dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl) in dfs_cache_get_tgt_iterator() argument 67 if (!tl) in dfs_cache_get_tgt_iterator() 69 return list_first_entry_or_null(&tl->tl_list, in dfs_cache_get_tgt_iterator() 74 static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl) in dfs_cache_free_tgts() argument 78 if (!tl || !tl->tl_numtgts || list_empty(&tl->tl_list)) in dfs_cache_free_tgts() 80 list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) { in dfs_cache_free_tgts() 85 tl->tl_numtgts = 0; in dfs_cache_free_tgts() [all …]
|
| H A D | dfs.h | 24 struct dfs_cache_tgt_list tl; member 42 #define ref_walk_tl(w) (&ref_walk_cur(w)->tl) 67 dfs_cache_free_tgts(&ref->tl); in __ref_walk_free() 106 tit = dfs_cache_get_tgt_iterator(&ref->tl); in ref_walk_next_tgt() 108 tit = dfs_cache_get_next_tgt(&ref->tl, ref->tit); in ref_walk_next_tgt() 165 struct dfs_cache_tgt_list *tl) in dfs_get_referral() argument 172 cifs_remap(cifs_sb), path, NULL, tl); in dfs_get_referral()
|
| H A D | dfs.c | 340 struct dfs_cache_tgt_list *tl) in tree_connect_dfs_target() argument 350 for (tit = dfs_cache_get_tgt_iterator(tl); in tree_connect_dfs_target() 351 tit; tit = dfs_cache_get_next_tgt(tl, tit)) { in tree_connect_dfs_target() 382 dfs_cache_free_tgts(tl); in tree_connect_dfs_target() 391 DFS_CACHE_TGT_LIST(tl); in cifs_tree_connect() 439 dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) { in cifs_tree_connect() 446 &tl); in cifs_tree_connect()
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| H A D | nfp_net_debugdump.c | 63 struct nfp_dump_tl_hdr tl; member 69 struct nfp_dump_tl_hdr tl; member 75 struct nfp_dump_tl_hdr tl; member 83 struct nfp_dump_tl_hdr tl; member 92 struct nfp_dump_tl_hdr tl; member 97 struct nfp_dump_tl_hdr tl; member 117 typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl, 125 struct nfp_dump_tl *tl; in nfp_traverse_tlvs() local 130 while (remaining >= sizeof(*tl)) { in nfp_traverse_tlvs() 131 tl = p; in nfp_traverse_tlvs() [all …]
|
| /linux/drivers/isdn/mISDN/ |
| H A D | fsm.c | 98 struct FsmTimer *ft = timer_container_of(ft, t, tl); in FsmExpireTimer() 114 timer_setup(&ft->tl, FsmExpireTimer, 0); in mISDN_FsmInitTimer() 126 timer_delete(&ft->tl); in mISDN_FsmDelTimer() 141 if (timer_pending(&ft->tl)) { in mISDN_FsmAddTimer() 152 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmAddTimer() 153 add_timer(&ft->tl); in mISDN_FsmAddTimer() 169 if (timer_pending(&ft->tl)) in mISDN_FsmRestartTimer() 170 timer_delete(&ft->tl); in mISDN_FsmRestartTimer() 173 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmRestartTimer() 174 add_timer(&ft->tl); in mISDN_FsmRestartTimer()
|
| H A D | timerdev.c | 39 struct timer_list tl; member 77 timer_shutdown_sync(&timer->tl); in mISDN_close() 158 struct mISDNtimer *timer = timer_container_of(timer, t, tl); in dev_expire_timer() 183 timer_setup(&timer->tl, dev_expire_timer, 0); in misdn_add_timer() 189 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); in misdn_add_timer() 190 add_timer(&timer->tl); in misdn_add_timer() 207 timer_shutdown_sync(&timer->tl); in misdn_del_timer()
|
| H A D | dsp_tones.c | 462 struct dsp *dsp = timer_container_of(dsp, t, tone.tl); in dsp_tone_timeout() 481 tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000; in dsp_tone_timeout() 482 add_timer(&tone->tl); in dsp_tone_timeout() 507 if (dsp->features.hfc_loops && timer_pending(&tonet->tl)) in dsp_tone() 508 timer_delete(&tonet->tl); in dsp_tone() 541 if (timer_pending(&tonet->tl)) in dsp_tone() 542 timer_delete(&tonet->tl); in dsp_tone() 543 tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000; in dsp_tone() 544 add_timer(&tonet->tl); in dsp_tone()
|
| /linux/fs/ext4/ |
| H A D | fast_commit.c | 695 struct ext4_fc_tl tl; in ext4_fc_reserve_space() local 733 tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); in ext4_fc_reserve_space() 734 tl.fc_len = cpu_to_le16(remaining); in ext4_fc_reserve_space() 735 memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN); in ext4_fc_reserve_space() 760 struct ext4_fc_tl tl; in ext4_fc_write_tail() local 775 tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL); in ext4_fc_write_tail() 776 tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail)); in ext4_fc_write_tail() 779 memcpy(dst, &tl, EXT4_FC_TAG_BASE_LEN); in ext4_fc_write_tail() 803 struct ext4_fc_tl tl; in ext4_fc_add_tlv() local 810 tl.fc_tag = cpu_to_le16(tag); in ext4_fc_add_tlv() [all …]
|
| /linux/drivers/s390/net/ |
| H A D | fsm.c | 136 fsm_timer *this = timer_container_of(this, t, tl); in fsm_expire_timer() 152 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_settimer() 162 timer_delete(&this->tl); in fsm_deltimer() 174 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_addtimer() 177 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_addtimer() 178 add_timer(&this->tl); in fsm_addtimer() 192 timer_delete(&this->tl); in fsm_modtimer() 193 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_modtimer() 196 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_modtimer() 197 add_timer(&this->tl); in fsm_modtimer()
|
| /linux/kernel/sched/ |
| H A D | topology.c | 1632 sd_init(struct sched_domain_topology_level *tl, in sd_init() argument 1636 struct sd_data *sdd = &tl->data; in sd_init() 1641 sd_weight = cpumask_weight(tl->mask(tl, cpu)); in sd_init() 1643 if (tl->sd_flags) in sd_init() 1644 sd_flags = (*tl->sd_flags)(); in sd_init() 1681 .name = tl->name, in sd_init() 1685 cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu)); in sd_init() 1714 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init() 1746 const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu) in tl_smt_mask() argument 1758 const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu) in tl_cls_mask() argument [all …]
|
| /linux/arch/s390/include/asm/ |
| H A D | dat-bits.h | 24 unsigned long tl : 2; /* Region- or Segment-Table Length */ member 46 unsigned long tl : 2; /* Region-Second-Table Length */ member 61 unsigned long tl : 2; /* Region-Third-Table Length */ member 75 unsigned long tl : 2; /* Segment-Table Length */ member
|
| /linux/drivers/net/ethernet/qlogic/qed/ |
| H A D | qed_vf.h | 55 struct channel_tlv tl; member 62 struct channel_tlv tl; member 74 struct channel_tlv tl; member 123 struct channel_tlv tl; member 232 struct channel_tlv tl; member 345 struct channel_tlv tl; member 353 struct channel_tlv tl; member 359 struct channel_tlv tl; member 365 struct channel_tlv tl; member 377 struct channel_tlv tl; member [all …]
|
| /linux/drivers/net/wireless/intel/iwlegacy/ |
| H A D | 4965-rs.c | 229 il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time) in il4965_rs_tl_rm_old_stats() argument 234 while (tl->queue_count && tl->time_stamp < oldest_time) { in il4965_rs_tl_rm_old_stats() 235 tl->total -= tl->packet_count[tl->head]; in il4965_rs_tl_rm_old_stats() 236 tl->packet_count[tl->head] = 0; in il4965_rs_tl_rm_old_stats() 237 tl->time_stamp += TID_QUEUE_CELL_SPACING; in il4965_rs_tl_rm_old_stats() 238 tl->queue_count--; in il4965_rs_tl_rm_old_stats() 239 tl->head++; in il4965_rs_tl_rm_old_stats() 240 if (tl->head >= TID_QUEUE_MAX_SIZE) in il4965_rs_tl_rm_old_stats() 241 tl->head = 0; in il4965_rs_tl_rm_old_stats() 255 struct il_traffic_load *tl = NULL; in il4965_rs_tl_add_packet() local [all …]
|
| /linux/drivers/gpu/drm/i915/pxp/ |
| H A D | intel_pxp_cmd.c | 86 struct intel_timeline * const tl = i915_request_timeline(rq); in pxp_request_commit() local 88 lockdep_unpin_lock(&tl->mutex, rq->cookie); in pxp_request_commit() 94 mutex_unlock(&tl->mutex); in pxp_request_commit()
|
| /linux/arch/sparc/kernel/ |
| H A D | etrap_64.S | 221 rdpr %tl, %g1 223 wrpr %g0, 1, %tl 233 wrpr %g0, 2, %tl 248 wrpr %g0, 3, %tl 258 wrpr %g0, 4, %tl 271 wrpr %g0, 1, %tl
|
| H A D | cherrs.S | 182 rdpr %tl, %g1 ! Save original trap level 185 1: wrpr %g2, %tl ! Set trap level to check 189 wrpr %g1, %tl ! Restore original trap level 194 wrpr %g1, %tl ! Restore original trap level 233 rdpr %tl, %g1 ! Save original trap level 236 1: wrpr %g2, %tl ! Set trap level to check 240 wrpr %g1, %tl ! Restore original trap level 245 wrpr %g1, %tl ! Restore original trap level
|
| /linux/drivers/net/ethernet/broadcom/bnx2x/ |
| H A D | bnx2x_vfpf.h | 99 struct channel_tlv tl; member 105 struct channel_tlv tl; member 117 struct channel_tlv tl; member 213 struct channel_tlv tl; member 219 struct channel_tlv tl; member
|
| H A D | bnx2x_vfpf.c | 32 struct channel_tlv *tl = in bnx2x_add_tlv() local 35 tl->type = type; in bnx2x_add_tlv() 36 tl->length = length; in bnx2x_add_tlv() 52 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); in bnx2x_vfpf_prep() 63 first_tlv->tl.type); in bnx2x_vfpf_finalize() 256 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, in bnx2x_vfpf_acquire() 266 req->first_tlv.tl.length + sizeof(struct channel_tlv), in bnx2x_vfpf_acquire() 412 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_release() 463 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_init() 514 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, in bnx2x_vfpf_close_vf() [all …]
|
| /linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
| H A D | rs.c | 227 static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) in rs_tl_rm_old_stats() argument 232 while (tl->queue_count && in rs_tl_rm_old_stats() 233 (tl->time_stamp < oldest_time)) { in rs_tl_rm_old_stats() 234 tl->total -= tl->packet_count[tl->head]; in rs_tl_rm_old_stats() 235 tl->packet_count[tl->head] = 0; in rs_tl_rm_old_stats() 236 tl->time_stamp += TID_QUEUE_CELL_SPACING; in rs_tl_rm_old_stats() 237 tl->queue_count--; in rs_tl_rm_old_stats() 238 tl->head++; in rs_tl_rm_old_stats() 239 if (tl->head >= TID_QUEUE_MAX_SIZE) in rs_tl_rm_old_stats() 240 tl->head = 0; in rs_tl_rm_old_stats() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | fabrics-cmd-auth.c | 231 return le32_to_cpu(req->cmd->auth_send.tl); in nvmet_auth_send_data_len() 239 u32 tl; in nvmet_execute_auth_send() local 261 tl = nvmet_auth_send_data_len(req); in nvmet_execute_auth_send() 262 if (!tl) { in nvmet_execute_auth_send() 265 offsetof(struct nvmf_auth_send_command, tl); in nvmet_execute_auth_send() 268 if (!nvmet_check_transfer_len(req, tl)) { in nvmet_execute_auth_send() 269 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); in nvmet_execute_auth_send() 273 d = kmalloc(tl, GFP_KERNEL); in nvmet_execute_auth_send() 279 status = nvmet_copy_from_sgl(req, 0, d, tl); in nvmet_execute_auth_send()
|
| /linux/arch/s390/kvm/ |
| H A D | gaccess.c | 468 if (vaddr.rfx01 > asce.tl) in guest_translate() 475 if (vaddr.rsx01 > asce.tl) in guest_translate() 482 if (vaddr.rtx01 > asce.tl) in guest_translate() 489 if (vaddr.sx01 > asce.tl) in guest_translate() 506 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) in guest_translate() 524 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) in guest_translate() 552 if (vaddr.sx01 > rtte.fc0.tl) in guest_translate() 1224 if (vaddr.rfx01 > asce.tl && !*fake) in kvm_s390_shadow_tables() 1230 if (vaddr.rsx01 > asce.tl) in kvm_s390_shadow_tables() 1236 if (vaddr.rtx01 > asce.tl) in kvm_s390_shadow_tables() [all …]
|