/linux/fs/quota/ |
H A D | quota.c | 131 tstate = state.s_state + type; in quota_getinfo() 339 if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) in quota_state_to_flags() 341 if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) in quota_state_to_flags() 343 if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) in quota_state_to_flags() 345 if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) in quota_state_to_flags() 347 if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) in quota_state_to_flags() 349 if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) in quota_state_to_flags() 373 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; in quota_getstate() 374 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; in quota_getstate() 375 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; in quota_getstate() [all …]
|
/linux/fs/xfs/ |
H A D | xfs_quotaops.c | 70 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state() 72 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state() 74 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state() 76 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state() 78 state->s_state[PRJQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state() 80 state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state() 82 error = xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, in xfs_fs_get_quota_state() 86 error = xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, in xfs_fs_get_quota_state() 90 error = xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, in xfs_fs_get_quota_state()
|
/linux/drivers/infiniband/hw/qib/ |
H A D | qib_uc.c | 87 switch (qp->s_state) { in qib_make_uc_req() 109 qp->s_state = OP(SEND_FIRST); in qib_make_uc_req() 114 qp->s_state = OP(SEND_ONLY); in qib_make_uc_req() 116 qp->s_state = in qib_make_uc_req() 138 qp->s_state = OP(RDMA_WRITE_FIRST); in qib_make_uc_req() 143 qp->s_state = OP(RDMA_WRITE_ONLY); in qib_make_uc_req() 145 qp->s_state = in qib_make_uc_req() 164 qp->s_state = OP(SEND_MIDDLE); in qib_make_uc_req() 173 qp->s_state = OP(SEND_LAST); in qib_make_uc_req() 175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); in qib_make_uc_req() [all …]
|
H A D | qib_rc.c | 277 switch (qp->s_state) { in qib_make_rc_req() 320 qp->s_state = OP(SEND_FIRST); in qib_make_rc_req() 325 qp->s_state = OP(SEND_ONLY); in qib_make_rc_req() 327 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); in qib_make_rc_req() 355 qp->s_state = OP(RDMA_WRITE_FIRST); in qib_make_rc_req() 360 qp->s_state = OP(RDMA_WRITE_ONLY); in qib_make_rc_req() 362 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); in qib_make_rc_req() 396 qp->s_state = OP(RDMA_READ_REQUEST); in qib_make_rc_req() 422 qp->s_state = OP(COMPARE_SWAP); in qib_make_rc_req() 428 qp->s_state = OP(FETCH_ADD); in qib_make_rc_req() [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | uc.c | 74 switch (qp->s_state) { in hfi1_make_uc_req() 122 qp->s_state = OP(SEND_FIRST); in hfi1_make_uc_req() 127 qp->s_state = OP(SEND_ONLY); in hfi1_make_uc_req() 129 qp->s_state = in hfi1_make_uc_req() 151 qp->s_state = OP(RDMA_WRITE_FIRST); in hfi1_make_uc_req() 156 qp->s_state = OP(RDMA_WRITE_ONLY); in hfi1_make_uc_req() 158 qp->s_state = in hfi1_make_uc_req() 177 qp->s_state = OP(SEND_MIDDLE); in hfi1_make_uc_req() 187 qp->s_state = OP(SEND_LAST); in hfi1_make_uc_req() 189 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); in hfi1_make_uc_req() [all …]
|
H A D | rc.c | 471 switch (qp->s_state) { in hfi1_make_rc_req() 560 qp->s_state = OP(SEND_FIRST); in hfi1_make_rc_req() 565 qp->s_state = OP(SEND_ONLY); in hfi1_make_rc_req() 567 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); in hfi1_make_rc_req() 572 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); in hfi1_make_rc_req() 602 qp->s_state = OP(RDMA_WRITE_FIRST); in hfi1_make_rc_req() 607 qp->s_state = OP(RDMA_WRITE_ONLY); in hfi1_make_rc_req() 609 qp->s_state = in hfi1_make_rc_req() 643 priv->s_state = TID_OP(WRITE_RESP); in hfi1_make_rc_req() 680 priv->s_state == in hfi1_make_rc_req() [all …]
|
H A D | tid_rdma.c | 353 qpriv->s_state = TID_OP(WRITE_RESP); in hfi1_qp_priv_init() 1771 qp->s_state = TID_OP(READ_REQ); in hfi1_build_tid_rdma_read_packet() 3393 qp->s_state = TID_OP(WRITE_REQ); in hfi1_build_tid_rdma_write_req() 4613 qpriv->s_state = TID_OP(WRITE_DATA_LAST); in hfi1_rc_rcv_tid_rdma_ack() 4618 qpriv->s_state = TID_OP(WRITE_DATA); in hfi1_rc_rcv_tid_rdma_ack() 4709 qpriv->s_state = TID_OP(WRITE_REQ); in hfi1_rc_rcv_tid_rdma_ack() 4732 qpriv->s_state = TID_OP(WRITE_REQ); in hfi1_rc_rcv_tid_rdma_ack() 4834 priv->s_state = TID_OP(RESYNC); in hfi1_tid_retry_timeout() 5005 priv->s_state = TID_OP(WRITE_RESP); in update_tid_tail() 5072 switch (priv->s_state) { in hfi1_make_tid_rdma_pkt() [all …]
|
/linux/fs/jfs/ |
H A D | jfs_mount.c | 321 le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state), in chkSuper() 348 if (j_sb->s_state != cpu_to_le32(FM_CLEAN) && in chkSuper() 355 sbi->state = le32_to_cpu(j_sb->s_state); in chkSuper() 368 le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) { in chkSuper() 428 j_sb->s_state = cpu_to_le32(state); in updateSuper()
|
H A D | jfs_superblock.h | 46 __le32 s_state; /* 4: mount/unmount/recovery state: member
|
H A D | resize.c | 229 j_sb->s_state |= cpu_to_le32(FM_EXTENDFS); in jfs_extendfs() 487 j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS); in jfs_extendfs()
|
/linux/fs/minix/ |
H A D | inode.c | 49 sbi->s_ms->s_state = sbi->s_mount_state; in minix_put_super() 127 if (ms->s_state & MINIX_VALID_FS || in minix_reconfigure() 132 ms->s_state = sbi->s_mount_state; in minix_reconfigure() 137 sbi->s_mount_state = ms->s_state; in minix_reconfigure() 138 ms->s_state &= ~MINIX_VALID_FS; in minix_reconfigure() 202 sbi->s_mount_state = ms->s_state; in minix_fill_super() 317 ms->s_state &= ~MINIX_VALID_FS; in minix_fill_super()
|
/linux/fs/nilfs2/ |
H A D | super.c | 97 sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); in nilfs_set_error() 99 sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS); in nilfs_set_error() 322 sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); in nilfs_cleanup_super() 330 sbp[1]->s_state = sbp[0]->s_state; in nilfs_cleanup_super() 477 sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & in nilfs_resize_fs() 835 sbp[0]->s_state = in nilfs_setup_super() 836 cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); in nilfs_setup_super()
|
/linux/drivers/misc/ |
H A D | tifm_7xx1.c | 80 unsigned int s_state; in tifm_7xx1_toggle_sock_power() local 93 s_state = readl(sock_addr + SOCK_PRESENT_STATE); in tifm_7xx1_toggle_sock_power() 94 if (!(TIFM_SOCK_STATE_OCCUPIED & s_state)) in tifm_7xx1_toggle_sock_power() 105 writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00, in tifm_7xx1_toggle_sock_power()
|
/linux/include/linux/ |
H A D | sysv_fs.h | 92 __fs32 s_state; /* file system state: 0x7c269d38-s_time means clean */ member 120 __fs32 s_state; /* file system state: 0xcb096f43 means clean */ member
|
/linux/fs/ceph/ |
H A D | mds_client.c | 999 s->s_state = CEPH_MDS_SESSION_NEW; in register_session() 1675 session->s_state = CEPH_MDS_SESSION_OPENING; in __open_session() 1704 if (session->s_state == CEPH_MDS_SESSION_NEW || in __open_export_target_session() 1705 session->s_state == CEPH_MDS_SESSION_CLOSING) { in __open_export_target_session() 2052 ceph_session_state_name(session->s_state), seq); in send_flushmsg_ack() 2107 ceph_session_state_name(session->s_state), session->s_seq); in request_close_session() 2122 if (session->s_state >= CEPH_MDS_SESSION_CLOSING) in __close_session() 2124 session->s_state = CEPH_MDS_SESSION_CLOSING; in __close_session() 2408 if (session->s_state == CEPH_MDS_SESSION_OPEN || in ceph_cap_release_work() 2409 session->s_state == CEPH_MDS_SESSION_HUNG) in ceph_cap_release_work() [all …]
|
/linux/fs/ext2/ |
H A D | super.c | 60 es->s_state |= cpu_to_le16(EXT2_ERROR_FS); in ext2_error() 159 es->s_state = cpu_to_le16(sbi->s_mount_state); in ext2_put_super() 1025 sbi->s_mount_state = le16_to_cpu(es->s_state); in ext2_fill_super() 1282 if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) { in ext2_sync_fs() 1284 es->s_state &= cpu_to_le16(~EXT2_VALID_FS); in ext2_sync_fs() 1306 sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state); in ext2_freeze() 1355 if (le16_to_cpu(es->s_state) & EXT2_VALID_FS || in ext2_remount() 1363 es->s_state = cpu_to_le16(sbi->s_mount_state); in ext2_remount() 1388 sbi->s_mount_state = le16_to_cpu(es->s_state); in ext2_remount()
|
/linux/drivers/gpu/drm/sun4i/ |
H A D | sun4i_layer.c | 56 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(state); in sun4i_backend_layer_destroy_state() local 60 kfree(s_state); in sun4i_backend_layer_destroy_state()
|
H A D | sun4i_backend.c | 580 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state); in sun4i_backend_atomic_check() local 589 s_state->pipe = current_pipe; in sun4i_backend_atomic_check()
|
/linux/fs/bcachefs/ |
H A D | quota.c | 678 state->s_state[i].flags |= QCI_SYSFILE; in bch2_quota_get_state() 683 state->s_state[i].flags |= QCI_ACCT_ENABLED; in bch2_quota_get_state() 685 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit; in bch2_quota_get_state() 686 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit; in bch2_quota_get_state() 688 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit; in bch2_quota_get_state() 689 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit; in bch2_quota_get_state()
|
/linux/include/uapi/linux/ |
H A D | minix_fs.h | 75 __u16 s_state; member
|
H A D | nilfs2_ondisk.h | 175 __le16 s_state; /* File system state */ member
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | cmd.c | 2349 struct mlx4_slave_state *s_state; in mlx4_multi_func_init() local 2396 s_state = &priv->mfunc.master.slave_state[i]; in mlx4_multi_func_init() 2397 s_state->last_cmd = MLX4_COMM_CMD_RESET; in mlx4_multi_func_init() 2398 s_state->vst_qinq_supported = false; in mlx4_multi_func_init() 2401 s_state->event_eq[j].eqn = -1; in mlx4_multi_func_init() 2410 s_state->vlan_filter[port] = in mlx4_multi_func_init() 2413 if (!s_state->vlan_filter[port]) { in mlx4_multi_func_init() 2415 kfree(s_state->vlan_filter[port]); in mlx4_multi_func_init() 2421 INIT_LIST_HEAD(&s_state->mcast_filters[port]); in mlx4_multi_func_init() 2433 spin_lock_init(&s_state->lock); in mlx4_multi_func_init()
|
H A D | eq.c | 314 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; in mlx4_get_slave_port_state() local 323 return s_state[slave].port_state[port]; in mlx4_get_slave_port_state() 331 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; in mlx4_set_slave_port_state() local 340 s_state[slave].port_state[port] = state; in mlx4_set_slave_port_state()
|
/linux/fs/sysv/ |
H A D | super.c | 102 sbi->s_sb_state = &sbd->s_state; in detected_sysv4() 130 sbi->s_sb_state = &sbd->s_state; in detected_sysv2()
|
/linux/arch/x86/kvm/ |
H A D | cpuid.c | 1117 bool s_state; in __do_cpuid_func() local 1119 s_state = false; in __do_cpuid_func() 1121 s_state = true; in __do_cpuid_func() 1137 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { in __do_cpuid_func()
|