1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/ksynch.h>
28 #include <sys/kmem.h>
29 #include <sys/sdt.h>
30
31 #include <sys/varargs.h>
32 #include <sys/unistat/spcs_s.h>
33
34 #include "safestore.h"
35 #include "safestore_impl.h"
36 #include "sd_trace.h"
37
38 typedef struct safestore_modules_s {
39 struct safestore_modules_s *ssm_next;
40 safestore_ops_t *ssm_module;
41 } safestore_modules_t;
42
43 safestore_modules_t *ss_modules;
44 kmutex_t safestore_mutex;
45 int ss_initialized;
46
47 /* the safestore module init/deinit functions */
48
49 void ss_ram_init();
50 void ss_ram_deinit();
51
52 /* CSTYLED */
53 /**#
54 * initialize the safestore subsystem and all safestore
55 * modules by calling all safestore modules' initialization functions
56 *
57 * NOTE: This function must be called with the _sdbc_config_lock held
58 *
59 * @param none
60 * @return void
61 *
62 */
63 void
sst_init()64 sst_init()
65 {
66 /*
67 * initialize the ss modules we know about
68 * this results in calls to sst_register_mod()
69 */
70 if (ss_initialized != SS_INITTED) {
71 mutex_init(&safestore_mutex, NULL, MUTEX_DRIVER, NULL);
72 ss_ram_init();
73 ss_initialized = SS_INITTED;
74 }
75
76 }
77
78 /* CSTYLED */
79 /**#
80 * deinitialize the safestore subsystem and all safestore modules
81 * by calling all safestore modules' deinitialization functions
82 *
83 * NOTE: This function must be called with the _sdbc_config_lock held
84 *
85 * @param none
86 * @return void
87 *
88 */
89 void
sst_deinit()90 sst_deinit()
91 {
92 if (ss_initialized == SS_INITTED) {
93 ss_ram_deinit();
94 mutex_destroy(&safestore_mutex);
95 ss_initialized = 0;
96 }
97 }
98
99 /* BEGIN CSTYLED */
100 /**#
101 * called by a safestore module to register its ops table
102 * for use by clients
103 *
104 * @param ss_ops structure of safestore functions
105 * @return void
106 *
107 * @see safestore_ops_t{}
108 */
109 void
sst_register_mod(safestore_ops_t * ss_ops)110 sst_register_mod(safestore_ops_t *ss_ops) /* END CSTYLED */
111 {
112 safestore_modules_t *new;
113
114 new = kmem_alloc(sizeof (*new), KM_SLEEP);
115
116 mutex_enter(&safestore_mutex);
117 new->ssm_module = ss_ops;
118 new->ssm_next = ss_modules;
119
120 ss_modules = new;
121 mutex_exit(&safestore_mutex);
122 }
123
124 /* BEGIN CSTYLED */
125 /**#
126 * called by a safestore module to unregister its ops table
127 * @param ss_ops structure of safestore functions
128 *
129 * @return void
130 *
131 * @see safestore_ops_t{}
132 */
133 void
sst_unregister_mod(safestore_ops_t * ss_ops)134 sst_unregister_mod(safestore_ops_t *ss_ops) /* END CSTYLED */
135 {
136 safestore_modules_t *ssm, *prev;
137 int found = 0;
138
139 mutex_enter(&safestore_mutex);
140 prev = NULL;
141 for (ssm = ss_modules; ssm; prev = ssm, ssm = ssm->ssm_next) {
142 if (ssm->ssm_module == ss_ops) {
143 if (!prev)
144 ss_modules = ssm->ssm_next;
145 else
146 prev->ssm_next = ssm->ssm_next;
147
148 kmem_free(ssm, sizeof (safestore_modules_t));
149 ++found;
150 break;
151 }
152 }
153 mutex_exit(&safestore_mutex);
154
155 if (!found)
156 cmn_err(CE_WARN, "ss(sst_unregister_mod) "
157 "ss module %p not found", (void *)ss_ops);
158 }
159
160 /* BEGIN CSTYLED */
161 /**#
162 * open a safestore module for use by a client
163 * @param ss_type specifies a valid media type and transport type.
164 * the first module found that supports these reqested type
165 * is used. may contain more than one media type or transport
166 * type if client has no preference among several types.
167 * more than one ss_type may be specified in the call if
168 * client has an ordered preference.
169 *
170 * @return safestore_ops_t * pointer to a valid safestore ops structure
171 * if the request is satisfied.
172 * NULL otherwise
173 *
174 * @see safestore_ops_t{}
175 * @see SS_M_RAM
176 * @see SS_M_NV_SINGLENODE
177 * @see SS_M_NV_DUALNODE_NOMIRROR
178 * @see SS_M_NV_DUALNODE_MIRROR
179 * @see SS_T_STE
180 * @see SS_T_RPC
181 * @see SS_T_NONE
182 */
183 safestore_ops_t *
sst_open(uint_t ss_type,...)184 sst_open(uint_t ss_type, ...) /* END CSTYLED */
185 {
186 va_list ap;
187 uint_t ssop_type;
188 safestore_modules_t *ssm;
189
190 if ((ss_modules == NULL) || !ss_type)
191 return (NULL);
192
193 va_start(ap, ss_type);
194 mutex_enter(&safestore_mutex);
195 do {
196 for (ssm = ss_modules; ssm; ssm = ssm->ssm_next) {
197 ssop_type = ssm->ssm_module->ssop_type;
198 if ((ssop_type & SS_MEDIA_MASK) & ss_type)
199 if ((ssop_type & SS_TRANSPORT_MASK) & ss_type) {
200 va_end(ap);
201 mutex_exit(&safestore_mutex);
202 return (ssm->ssm_module);
203 }
204 }
205 } while ((ss_type = va_arg(ap, uint_t)) != 0);
206 mutex_exit(&safestore_mutex);
207
208 va_end(ap);
209 return (NULL);
210 }
211
212 /* BEGIN CSTYLED */
213 /**#
214 * close a safestore module. called when client no longer wishes to use
215 * a safestore module
216 *
217 * @param ssp points to a safestore_ops_t obtained from a previous call
218 * to sst_open()
219 *
220 * @return SS_OK if successful
221 * SS_ERR otherwise
222 */
223 /*ARGSUSED*/
224 int
sst_close(safestore_ops_t * ssp)225 sst_close(safestore_ops_t *ssp) /* END CSTYLED */
226 {
227 return (SS_OK);
228 }
229
230
231 /*
232 * _sdbc_writeq_configure - configure the given writeq
233 * Allocate the lock and sv we need to maintain waiters
234 *
235 */
236 int
_sdbc_writeq_configure(_sd_writeq_t * wrq)237 _sdbc_writeq_configure(_sd_writeq_t *wrq)
238 {
239 int i;
240
241 wrq->wq_inq = 0;
242 mutex_init(&wrq->wq_qlock, NULL, MUTEX_DRIVER, NULL);
243 wrq->wq_qtop = NULL;
244 wrq->wq_slp_top = 0;
245 wrq->wq_slp_index = 0;
246 wrq->wq_slp_inq = 0;
247
248 for (i = 0; i < SD_WR_SLP_Q_MAX; i++) {
249 wrq->wq_slp[i].slp_wqneed = 0;
250 cv_init(&wrq->wq_slp[i].slp_wqcv, NULL, CV_DRIVER, NULL);
251 }
252
253 return (0);
254 }
255
256 /*
257 * _sdbc_writeq_deconfigure - deconfigure the given writeq
258 * Deallocate the lock and sv if present.
259 *
260 */
261 void
_sdbc_writeq_deconfigure(_sd_writeq_t * wrq)262 _sdbc_writeq_deconfigure(_sd_writeq_t *wrq)
263 {
264 int i;
265
266 if (wrq) {
267 mutex_destroy(&wrq->wq_qlock);
268 for (i = 0; i < SD_WR_SLP_Q_MAX; i++) {
269 cv_destroy(&wrq->wq_slp[i].slp_wqcv);
270 }
271 wrq->wq_inq = 0;
272 wrq->wq_qtop = NULL;
273 }
274
275 }
276
277
278 int _sd_wblk_sync = 1;
279
280 ss_wr_cctl_t *
ss_alloc_write(int need,int * stall,_sd_writeq_t * q)281 ss_alloc_write(int need, int *stall, _sd_writeq_t *q)
282 {
283 ss_wr_cctl_t *wctl;
284 ss_wr_cctl_t *ret;
285 int i;
286 int aged = 0;
287
288 if (_sd_wblk_sync && (q->wq_inq == 0))
289 return (NULL); /* do sync write if queue empty */
290
291 SDTRACE(ST_ENTER|SDF_WR_ALLOC, SDT_INV_CD, need,
292 SDT_INV_BL, q->wq_inq, _SD_NO_NET);
293
294 if (need <= 0) {
295 cmn_err(CE_WARN, "ss_alloc_write: bogus need value! %d", need);
296 return (NULL);
297 }
298
299 mutex_enter(&(q->wq_qlock));
300 retry_wr_get:
301 if (q->wq_inq < need) {
302 if (!_sd_wblk_sync) {
303 unsigned stime;
304 stime = nsc_usec();
305
306 /*
307 * Try to keep requests ordered so large requests
308 * are not starved. We can queue 255 write requests,
309 * After That go into write-through.
310 */
311 if (q->wq_slp_inq < SD_WR_SLP_Q_MAX) {
312 q->wq_slp_inq++;
313 /* give preference to aged requests */
314 if (aged) {
315 WQ_SVWAIT_TOP(q, need);
316 } else {
317 WQ_SVWAIT_BOTTOM(q, need);
318 }
319 aged++;
320 } else {
321 mutex_exit(&(q->wq_qlock));
322 return (NULL);
323 }
324
325 SDTRACE(ST_INFO|SDF_WR_ALLOC,
326 SDT_INV_CD, need, SDT_INV_BL, q->wq_inq,
327 (nsc_usec()-stime));
328 (void) (*stall)++;
329 goto retry_wr_get;
330 }
331 ret = NULL;
332 } else {
333 get_wctl:
334 wctl = q->wq_qtop;
335 ret = wctl;
336 DTRACE_PROBE1(alloc_write,
337 ss_wr_cctl_t *, wctl);
338 for (i = 1; i < need; ++i) {
339 wctl = wctl->wc_next;
340 DTRACE_PROBE1(alloc_write_cont,
341 ss_wr_cctl_t *, wctl);
342 }
343
344 q->wq_qtop = wctl->wc_next;
345 wctl->wc_next = NULL;
346 q->wq_inq -= need;
347 }
348 mutex_exit(&(q->wq_qlock));
349
350 SDTRACE(ST_EXIT|SDF_WR_ALLOC, SDT_INV_CD, need,
351 SDT_INV_BL, q->wq_inq, _SD_NO_NET);
352 return (ret);
353 }
354
355 /*
356 * ss_release_write - put a write block back in the writeq.
357 *
358 * ARGUMENTS:
359 * wctl - Write control block to be release.
360 * q - write q to put the wctl
361 *
362 * RETURNS: NONE
363 */
364
365 void
ss_release_write(ss_wr_cctl_t * wctl,_sd_writeq_t * q)366 ss_release_write(ss_wr_cctl_t *wctl, _sd_writeq_t *q)
367 {
368
369 SDTRACE(ST_ENTER|SDF_WR_FREE, SDT_INV_CD, 0, SDT_INV_BL, q->wq_inq,
370 _SD_NO_NET);
371
372 DTRACE_PROBE1(release_write,
373 ss_wr_cctl_t *, wctl);
374
375 #if defined(_SD_DEBUG)
376 if (wctl->wc_gl_info->sci_dirty) {
377 SDALERT(SDF_WR_FREE, wctl->wc_gl_info->sci_cd,
378 0, wctl->wc_gl_info->sci_fpos,
379 wctl->wc_gl_info->sci_dirty, 0);
380 }
381 #endif
382 mutex_enter(&q->wq_qlock);
383
384 wctl->wc_next = q->wq_qtop;
385 q->wq_qtop = wctl;
386 q->wq_inq++;
387 if (WQ_NEED_SIG(q)) {
388 q->wq_slp_inq--;
389 WQ_SVSIG(q);
390 }
391 mutex_exit(&q->wq_qlock);
392 SDTRACE(ST_EXIT|SDF_WR_FREE, SDT_INV_CD, 0, SDT_INV_BL, q->wq_inq,
393 _SD_NO_NET);
394 }
395