1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 **
8 **
9 *******************************************************************************
10 ******************************************************************************/
11
12 #include "dlm_internal.h"
13 #include "midcomms.h"
14 #include "lowcomms.h"
15 #include "config.h"
16 #include "memory.h"
17 #include "ast.h"
18
19 static struct kmem_cache *writequeue_cache;
20 static struct kmem_cache *mhandle_cache;
21 static struct kmem_cache *msg_cache;
22 static struct kmem_cache *lkb_cache;
23 static struct kmem_cache *rsb_cache;
24 static struct kmem_cache *cb_cache;
25
26
dlm_memory_init(void)27 int __init dlm_memory_init(void)
28 {
29 writequeue_cache = dlm_lowcomms_writequeue_cache_create();
30 if (!writequeue_cache)
31 goto out;
32
33 mhandle_cache = dlm_midcomms_cache_create();
34 if (!mhandle_cache)
35 goto mhandle;
36
37 lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
38 __alignof__(struct dlm_lkb), 0, NULL);
39 if (!lkb_cache)
40 goto lkb;
41
42 msg_cache = dlm_lowcomms_msg_cache_create();
43 if (!msg_cache)
44 goto msg;
45
46 rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb),
47 __alignof__(struct dlm_rsb), 0, NULL);
48 if (!rsb_cache)
49 goto rsb;
50
51 cb_cache = kmem_cache_create("dlm_cb", sizeof(struct dlm_callback),
52 __alignof__(struct dlm_callback), 0,
53 NULL);
54 if (!cb_cache)
55 goto cb;
56
57 return 0;
58
59 cb:
60 kmem_cache_destroy(rsb_cache);
61 rsb:
62 kmem_cache_destroy(msg_cache);
63 msg:
64 kmem_cache_destroy(lkb_cache);
65 lkb:
66 kmem_cache_destroy(mhandle_cache);
67 mhandle:
68 kmem_cache_destroy(writequeue_cache);
69 out:
70 return -ENOMEM;
71 }
72
dlm_memory_exit(void)73 void dlm_memory_exit(void)
74 {
75 rcu_barrier();
76
77 kmem_cache_destroy(writequeue_cache);
78 kmem_cache_destroy(mhandle_cache);
79 kmem_cache_destroy(msg_cache);
80 kmem_cache_destroy(lkb_cache);
81 kmem_cache_destroy(rsb_cache);
82 kmem_cache_destroy(cb_cache);
83 }
84
dlm_allocate_lvb(struct dlm_ls * ls)85 char *dlm_allocate_lvb(struct dlm_ls *ls)
86 {
87 return kzalloc(ls->ls_lvblen, GFP_ATOMIC);
88 }
89
dlm_free_lvb(char * p)90 void dlm_free_lvb(char *p)
91 {
92 kfree(p);
93 }
94
dlm_allocate_rsb(void)95 struct dlm_rsb *dlm_allocate_rsb(void)
96 {
97 return kmem_cache_zalloc(rsb_cache, GFP_ATOMIC);
98 }
99
__free_rsb_rcu(struct rcu_head * rcu)100 static void __free_rsb_rcu(struct rcu_head *rcu)
101 {
102 struct dlm_rsb *r = container_of(rcu, struct dlm_rsb, rcu);
103 if (r->res_lvbptr)
104 dlm_free_lvb(r->res_lvbptr);
105 kmem_cache_free(rsb_cache, r);
106 }
107
dlm_free_rsb(struct dlm_rsb * r)108 void dlm_free_rsb(struct dlm_rsb *r)
109 {
110 call_rcu(&r->rcu, __free_rsb_rcu);
111 }
112
dlm_allocate_lkb(void)113 struct dlm_lkb *dlm_allocate_lkb(void)
114 {
115 return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC);
116 }
117
__free_lkb_rcu(struct rcu_head * rcu)118 static void __free_lkb_rcu(struct rcu_head *rcu)
119 {
120 struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu);
121
122 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
123 struct dlm_user_args *ua;
124 ua = lkb->lkb_ua;
125 if (ua) {
126 kfree(ua->lksb.sb_lvbptr);
127 kfree(ua);
128 }
129 }
130
131 kmem_cache_free(lkb_cache, lkb);
132 }
133
dlm_free_lkb(struct dlm_lkb * lkb)134 void dlm_free_lkb(struct dlm_lkb *lkb)
135 {
136 call_rcu(&lkb->rcu, __free_lkb_rcu);
137 }
138
dlm_allocate_mhandle(void)139 struct dlm_mhandle *dlm_allocate_mhandle(void)
140 {
141 return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC);
142 }
143
dlm_free_mhandle(struct dlm_mhandle * mhandle)144 void dlm_free_mhandle(struct dlm_mhandle *mhandle)
145 {
146 kmem_cache_free(mhandle_cache, mhandle);
147 }
148
dlm_allocate_writequeue(void)149 struct writequeue_entry *dlm_allocate_writequeue(void)
150 {
151 return kmem_cache_alloc(writequeue_cache, GFP_ATOMIC);
152 }
153
dlm_free_writequeue(struct writequeue_entry * writequeue)154 void dlm_free_writequeue(struct writequeue_entry *writequeue)
155 {
156 kmem_cache_free(writequeue_cache, writequeue);
157 }
158
dlm_allocate_msg(void)159 struct dlm_msg *dlm_allocate_msg(void)
160 {
161 return kmem_cache_alloc(msg_cache, GFP_ATOMIC);
162 }
163
dlm_free_msg(struct dlm_msg * msg)164 void dlm_free_msg(struct dlm_msg *msg)
165 {
166 kmem_cache_free(msg_cache, msg);
167 }
168
dlm_allocate_cb(void)169 struct dlm_callback *dlm_allocate_cb(void)
170 {
171 return kmem_cache_alloc(cb_cache, GFP_ATOMIC);
172 }
173
dlm_free_cb(struct dlm_callback * cb)174 void dlm_free_cb(struct dlm_callback *cb)
175 {
176 kmem_cache_free(cb_cache, cb);
177 }
178